index
int64 0
100k
| blob_id
stringlengths 40
40
| code
stringlengths 7
7.27M
| steps
listlengths 1
1.25k
| error
bool 2
classes |
---|---|---|---|---|
1,000 |
153a33b85cf8b3ef9c742f05b460e94e0c684682
|
#Author: AKHILESH
#This program illustrates the advanced concepts of inheritance
#Python looks up for method in following order: Instance attributes, class attributes and the
#from the base class
#mro: Method Resolution order
class Data(object):
def __init__(self, data):
self.data = data
def getData(self):
print('Data:',self.data)
class Time(Data): #Inhertiting from Data class
def getTime(self):
print('Time:',self.data)
if __name__ == '__main__':
data = Data(10)
time = Time(20) #inherited Class -> Value passed to __init__of Data (Base class)
time.getTime()
data.getData()
time.getData()
print(Time.mro())
|
[
"#Author: AKHILESH\n#This program illustrates the advanced concepts of inheritance\n#Python looks up for method in following order: Instance attributes, class attributes and the\n#from the base class\n#mro: Method Resolution order\n\nclass Data(object):\n def __init__(self, data):\n self.data = data\n\n def getData(self):\n print('Data:',self.data)\n\nclass Time(Data): #Inhertiting from Data class\n def getTime(self):\n print('Time:',self.data)\n\nif __name__ == '__main__':\n data = Data(10)\n time = Time(20) #inherited Class -> Value passed to __init__of Data (Base class)\n\n time.getTime()\n data.getData()\n time.getData()\n\n print(Time.mro())\n",
"class Data(object):\n\n def __init__(self, data):\n self.data = data\n\n def getData(self):\n print('Data:', self.data)\n\n\nclass Time(Data):\n\n def getTime(self):\n print('Time:', self.data)\n\n\nif __name__ == '__main__':\n data = Data(10)\n time = Time(20)\n time.getTime()\n data.getData()\n time.getData()\n print(Time.mro())\n",
"class Data(object):\n\n def __init__(self, data):\n self.data = data\n\n def getData(self):\n print('Data:', self.data)\n\n\nclass Time(Data):\n\n def getTime(self):\n print('Time:', self.data)\n\n\n<code token>\n",
"class Data(object):\n\n def __init__(self, data):\n self.data = data\n <function token>\n\n\nclass Time(Data):\n\n def getTime(self):\n print('Time:', self.data)\n\n\n<code token>\n",
"class Data(object):\n <function token>\n <function token>\n\n\nclass Time(Data):\n\n def getTime(self):\n print('Time:', self.data)\n\n\n<code token>\n",
"<class token>\n\n\nclass Time(Data):\n\n def getTime(self):\n print('Time:', self.data)\n\n\n<code token>\n",
"<class token>\n\n\nclass Time(Data):\n <function token>\n\n\n<code token>\n",
"<class token>\n<class token>\n<code token>\n"
] | false |
1,001 |
e207063eb3eb1929e0e24b62e6b77a8924a80489
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 18 20:24:53 2020
@author: filip
"""
import re
texto = "Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido. Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia."
texto1 = ['Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido', ' Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia']
def separa_sentencas(texto):
'''A funcao recebe um texto e devolve uma lista das sentencas dentro do texto'''
sentencas = re.split(r'[.!?]+', texto)
if sentencas[-1] == '':
del sentencas[-1]
return sentencas
def separa_frases(sentenca):
'''A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca'''
sentenca = re.split(r'[,:;]+', sentenca)
return sentenca
def separa_palavras(frase):
'''A funcao recebe uma frase e devolve uma lista das palavras dentro da frase'''
return frase.split()
def n_palavras_unicas(lista_palavras):
'''Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez'''
freq = dict()
unicas = 0
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
if freq[p] == 1:
unicas -= 1
freq[p] += 1
else:
freq[p] = 1
unicas += 1
return unicas
def n_palavras_diferentes(lista_palavras):
'''Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas'''
freq = dict()
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
freq[p] += 1
else:
freq[p] = 1
return len(freq)
def lista_frases (sentenca):
list_frases = []
list_sent = separa_sentencas(sentenca)
for sent in list_sent:
novas_frases = separa_frases(sent)
list_frases.extend(novas_frases)
return list_frases
def lista_palavras (frases):
list_palavras = []
list_fr = lista_frases(frases)
for frase in list_fr:
novas_palavras = separa_palavras(frase)
list_palavras.extend(novas_palavras)
return list_palavras
def tam_medio (list_palavras): # Traço linguístico 1
palavras = lista_palavras(texto)
i = 0
soma_palavras = 0
while i < len(palavras):
x = palavras[i]
soma_palavras = soma_palavras + len(x)
i +=1
tam = soma_palavras/len(palavras)
return tam
def type_token(list_palavras): # Traço linguístico 2
palavras = lista_palavras(texto)
TT = n_palavras_diferentes(palavras)/ len(palavras)
return TT
def hapax_legomana (list_palavras): # Traço linguístico 3
palavras = lista_palavras(texto)
HL = n_palavras_unicas(palavras)/ len(palavras)
return HL
def soma_caracteres_sentenca(lista_sent):
lista_sent = separa_sentencas(texto)
i = 0
soma = 0
while i < len(lista_sent):
x = lista_sent[i]
len(x)
soma = soma + len(x)
i +=1
return soma
def tam_medio_sentenca(lista_sent): # Traço linguístico 4
TMS = soma_caracteres_sentenca(lista_sent)/ len(separa_sentencas(lista_sent))
return TMS
def frases (sentenca):
list_frases = []
list_sent = separa_sentencas(texto)
for sent in list_sent:
novas_frases = separa_frases(sent)
list_frases.extend(novas_frases)
return list_frases
def complexidade_sentenca (texto): # Traço linguístico 5
CS = len(frases(texto))/ len(separa_sentencas(texto))
return CS
def soma_caracteres_frases(lista_frases):
lista_fr = frases(lista_frases)
i = 0
soma_fr = 0
while i < len(lista_fr):
x = lista_fr[i]
len(x)
soma_fr = soma_fr + len(x)
i +=1
return soma_fr
def tam_medio_frase(lista_frases): # Traço linguístico 6
TMF = soma_caracteres_frases(lista_frases)/ len (frases(lista_frases))
return TMF
def le_textos():
'''A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento'''
i = 1
textos = []
texto = input("Digite o texto " + str(i) +" (aperte enter para sair):")
while texto:
textos.append(texto)
i += 1
texto = input("Digite o texto " + str(i) +" (aperte enter para sair):")
return textos
def compara_assinatura(as_a, as_b):
'''IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.'''
i = 0
soma = 0
for i in range(6):
soma += abs (as_a[i] - as_b[i])
Sab = soma / 6
return Sab
def calcula_assinatura(texto):
as_b = []
lista.append(tam_medio(texto))
lista.append(type_token(texto))
lista.append(hapax_legomana (texto))
lista.append(tam_medio_sentenca(texto))
lista.append(complexidade_sentenca (texto))
lista.append(tam_medio_frase(texto))
return as_b
def avalia_textos(textos, ass_cp):
'''IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.'''
lista_sab = []
menor = 0
for texto in textos:
as_texto = calcula_assinatura(texto)
comparar = compara_assinatura(ass_cp, as_texto)
lista_sab.append(comparar)
menor = min(lista_sab)
return (lista.index(menor) + 1)
|
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 18 20:24:53 2020\r\n\r\n@author: filip\r\n\"\"\"\r\n\r\nimport re\r\n\r\ntexto = \"Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido. Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia.\"\r\ntexto1 = ['Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido', ' Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia']\r\n\r\n\r\ndef separa_sentencas(texto):\r\n '''A funcao recebe um texto e devolve uma lista das sentencas dentro do texto'''\r\n sentencas = re.split(r'[.!?]+', texto)\r\n if sentencas[-1] == '':\r\n del sentencas[-1]\r\n return sentencas\r\n\r\ndef separa_frases(sentenca):\r\n '''A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca'''\r\n sentenca = re.split(r'[,:;]+', sentenca)\r\n return sentenca\r\n \r\ndef separa_palavras(frase):\r\n '''A funcao recebe uma frase e devolve uma lista das palavras dentro da frase'''\r\n return frase.split()\r\n\r\ndef n_palavras_unicas(lista_palavras):\r\n '''Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez'''\r\n freq = dict()\r\n unicas = 0\r\n for palavra in lista_palavras:\r\n p = palavra.lower()\r\n if p in freq:\r\n if freq[p] == 1:\r\n unicas -= 1\r\n freq[p] += 1\r\n else:\r\n freq[p] = 1\r\n unicas += 1\r\n\r\n return unicas\r\n\r\ndef n_palavras_diferentes(lista_palavras):\r\n '''Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas'''\r\n freq = dict()\r\n for palavra in lista_palavras:\r\n p = palavra.lower()\r\n if p in freq:\r\n freq[p] += 1\r\n else:\r\n freq[p] = 1\r\n\r\n return len(freq)\r\n\r\n\r\ndef lista_frases (sentenca):\r\n list_frases = []\r\n list_sent = separa_sentencas(sentenca)\r\n for sent in list_sent:\r\n novas_frases = separa_frases(sent)\r\n list_frases.extend(novas_frases)\r\n return list_frases\r\n \r\ndef lista_palavras (frases):\r\n list_palavras = []\r\n list_fr = lista_frases(frases)\r\n for frase in list_fr:\r\n novas_palavras = separa_palavras(frase)\r\n list_palavras.extend(novas_palavras)\r\n return list_palavras\r\n \r\n\r\n\r\n\r\ndef tam_medio (list_palavras): # Traço linguístico 1\r\n palavras = lista_palavras(texto)\r\n i = 0\r\n soma_palavras = 0\r\n while i < len(palavras):\r\n x = palavras[i]\r\n soma_palavras = soma_palavras + len(x)\r\n i +=1\r\n tam = soma_palavras/len(palavras)\r\n return tam\r\n\r\ndef type_token(list_palavras): # Traço linguístico 2\r\n palavras = lista_palavras(texto)\r\n TT = n_palavras_diferentes(palavras)/ len(palavras)\r\n return TT\r\n\r\ndef hapax_legomana (list_palavras): # Traço linguístico 3\r\n palavras = lista_palavras(texto)\r\n HL = n_palavras_unicas(palavras)/ len(palavras)\r\n return HL\r\n\r\n\r\ndef soma_caracteres_sentenca(lista_sent):\r\n lista_sent = separa_sentencas(texto)\r\n i = 0\r\n soma = 0\r\n while i < len(lista_sent):\r\n x = lista_sent[i]\r\n len(x)\r\n soma = soma + len(x)\r\n i +=1\r\n return soma\r\n\r\ndef tam_medio_sentenca(lista_sent): # Traço linguístico 4\r\n TMS = soma_caracteres_sentenca(lista_sent)/ len(separa_sentencas(lista_sent))\r\n return TMS\r\n\r\ndef frases (sentenca):\r\n list_frases = []\r\n list_sent = separa_sentencas(texto)\r\n for sent in list_sent:\r\n novas_frases = separa_frases(sent)\r\n list_frases.extend(novas_frases)\r\n return list_frases\r\n \r\n\r\ndef complexidade_sentenca (texto): # Traço linguístico 5\r\n CS = len(frases(texto))/ len(separa_sentencas(texto))\r\n return CS\r\n\r\n\r\ndef soma_caracteres_frases(lista_frases):\r\n lista_fr = frases(lista_frases)\r\n i = 0\r\n soma_fr = 0\r\n while i < len(lista_fr):\r\n x = lista_fr[i]\r\n len(x)\r\n soma_fr = soma_fr + len(x)\r\n i +=1\r\n return soma_fr\r\n\r\ndef tam_medio_frase(lista_frases): # Traço linguístico 6\r\n TMF = soma_caracteres_frases(lista_frases)/ len (frases(lista_frases))\r\n\r\n return TMF\r\n\r\ndef le_textos():\r\n '''A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento'''\r\n i = 1\r\n textos = []\r\n texto = input(\"Digite o texto \" + str(i) +\" (aperte enter para sair):\")\r\n while texto:\r\n textos.append(texto)\r\n i += 1\r\n texto = input(\"Digite o texto \" + str(i) +\" (aperte enter para sair):\")\r\n\r\n return textos\r\n\r\n\r\ndef compara_assinatura(as_a, as_b):\r\n '''IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.'''\r\n i = 0\r\n soma = 0\r\n for i in range(6):\r\n soma += abs (as_a[i] - as_b[i])\r\n Sab = soma / 6\r\n return Sab\r\n\r\ndef calcula_assinatura(texto):\r\n as_b = []\r\n lista.append(tam_medio(texto))\r\n lista.append(type_token(texto))\r\n lista.append(hapax_legomana (texto))\r\n lista.append(tam_medio_sentenca(texto))\r\n lista.append(complexidade_sentenca (texto))\r\n lista.append(tam_medio_frase(texto))\r\n return as_b\r\n\r\ndef avalia_textos(textos, ass_cp):\r\n '''IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.'''\r\n lista_sab = []\r\n menor = 0\r\n for texto in textos:\r\n as_texto = calcula_assinatura(texto)\r\n comparar = compara_assinatura(ass_cp, as_texto)\r\n lista_sab.append(comparar)\r\n menor = min(lista_sab)\r\n return (lista.index(menor) + 1)\r\n\r\n\r\n\r\n\r\n",
"<docstring token>\nimport re\ntexto = (\n 'Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido. Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia.'\n )\ntexto1 = [\n 'Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido'\n ,\n ' Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia'\n ]\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\ndef n_palavras_unicas(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez\"\"\"\n freq = dict()\n unicas = 0\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n if freq[p] == 1:\n unicas -= 1\n freq[p] += 1\n else:\n freq[p] = 1\n unicas += 1\n return unicas\n\n\ndef n_palavras_diferentes(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas\"\"\"\n freq = dict()\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n freq[p] += 1\n else:\n freq[p] = 1\n return len(freq)\n\n\ndef lista_frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(sentenca)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\ndef tam_medio(list_palavras):\n palavras = lista_palavras(texto)\n i = 0\n soma_palavras = 0\n while i < len(palavras):\n x = palavras[i]\n soma_palavras = soma_palavras + len(x)\n i += 1\n tam = soma_palavras / len(palavras)\n return tam\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\ndef soma_caracteres_sentenca(lista_sent):\n lista_sent = separa_sentencas(texto)\n i = 0\n soma = 0\n while i < len(lista_sent):\n x = lista_sent[i]\n len(x)\n soma = soma + len(x)\n i += 1\n return soma\n\n\ndef tam_medio_sentenca(lista_sent):\n TMS = soma_caracteres_sentenca(lista_sent) / len(separa_sentencas(\n lista_sent))\n return TMS\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\ndef le_textos():\n \"\"\"A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento\"\"\"\n i = 1\n textos = []\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):')\n while texto:\n textos.append(texto)\n i += 1\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):'\n )\n return textos\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\ndef calcula_assinatura(texto):\n as_b = []\n lista.append(tam_medio(texto))\n lista.append(type_token(texto))\n lista.append(hapax_legomana(texto))\n lista.append(tam_medio_sentenca(texto))\n lista.append(complexidade_sentenca(texto))\n lista.append(tam_medio_frase(texto))\n return as_b\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\ntexto = (\n 'Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido. Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia.'\n )\ntexto1 = [\n 'Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido'\n ,\n ' Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia'\n ]\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\ndef n_palavras_unicas(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez\"\"\"\n freq = dict()\n unicas = 0\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n if freq[p] == 1:\n unicas -= 1\n freq[p] += 1\n else:\n freq[p] = 1\n unicas += 1\n return unicas\n\n\ndef n_palavras_diferentes(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas\"\"\"\n freq = dict()\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n freq[p] += 1\n else:\n freq[p] = 1\n return len(freq)\n\n\ndef lista_frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(sentenca)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\ndef tam_medio(list_palavras):\n palavras = lista_palavras(texto)\n i = 0\n soma_palavras = 0\n while i < len(palavras):\n x = palavras[i]\n soma_palavras = soma_palavras + len(x)\n i += 1\n tam = soma_palavras / len(palavras)\n return tam\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\ndef soma_caracteres_sentenca(lista_sent):\n lista_sent = separa_sentencas(texto)\n i = 0\n soma = 0\n while i < len(lista_sent):\n x = lista_sent[i]\n len(x)\n soma = soma + len(x)\n i += 1\n return soma\n\n\ndef tam_medio_sentenca(lista_sent):\n TMS = soma_caracteres_sentenca(lista_sent) / len(separa_sentencas(\n lista_sent))\n return TMS\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\ndef le_textos():\n \"\"\"A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento\"\"\"\n i = 1\n textos = []\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):')\n while texto:\n textos.append(texto)\n i += 1\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):'\n )\n return textos\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\ndef calcula_assinatura(texto):\n as_b = []\n lista.append(tam_medio(texto))\n lista.append(type_token(texto))\n lista.append(hapax_legomana(texto))\n lista.append(tam_medio_sentenca(texto))\n lista.append(complexidade_sentenca(texto))\n lista.append(tam_medio_frase(texto))\n return as_b\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\ndef n_palavras_unicas(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez\"\"\"\n freq = dict()\n unicas = 0\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n if freq[p] == 1:\n unicas -= 1\n freq[p] += 1\n else:\n freq[p] = 1\n unicas += 1\n return unicas\n\n\ndef n_palavras_diferentes(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas\"\"\"\n freq = dict()\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n freq[p] += 1\n else:\n freq[p] = 1\n return len(freq)\n\n\ndef lista_frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(sentenca)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\ndef tam_medio(list_palavras):\n palavras = lista_palavras(texto)\n i = 0\n soma_palavras = 0\n while i < len(palavras):\n x = palavras[i]\n soma_palavras = soma_palavras + len(x)\n i += 1\n tam = soma_palavras / len(palavras)\n return tam\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\ndef soma_caracteres_sentenca(lista_sent):\n lista_sent = separa_sentencas(texto)\n i = 0\n soma = 0\n while i < len(lista_sent):\n x = lista_sent[i]\n len(x)\n soma = soma + len(x)\n i += 1\n return soma\n\n\ndef tam_medio_sentenca(lista_sent):\n TMS = soma_caracteres_sentenca(lista_sent) / len(separa_sentencas(\n lista_sent))\n return TMS\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\ndef le_textos():\n \"\"\"A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento\"\"\"\n i = 1\n textos = []\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):')\n while texto:\n textos.append(texto)\n i += 1\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):'\n )\n return textos\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\ndef calcula_assinatura(texto):\n as_b = []\n lista.append(tam_medio(texto))\n lista.append(type_token(texto))\n lista.append(hapax_legomana(texto))\n lista.append(tam_medio_sentenca(texto))\n lista.append(complexidade_sentenca(texto))\n lista.append(tam_medio_frase(texto))\n return as_b\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\ndef n_palavras_unicas(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez\"\"\"\n freq = dict()\n unicas = 0\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n if freq[p] == 1:\n unicas -= 1\n freq[p] += 1\n else:\n freq[p] = 1\n unicas += 1\n return unicas\n\n\ndef n_palavras_diferentes(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas\"\"\"\n freq = dict()\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n freq[p] += 1\n else:\n freq[p] = 1\n return len(freq)\n\n\ndef lista_frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(sentenca)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\ndef soma_caracteres_sentenca(lista_sent):\n lista_sent = separa_sentencas(texto)\n i = 0\n soma = 0\n while i < len(lista_sent):\n x = lista_sent[i]\n len(x)\n soma = soma + len(x)\n i += 1\n return soma\n\n\ndef tam_medio_sentenca(lista_sent):\n TMS = soma_caracteres_sentenca(lista_sent) / len(separa_sentencas(\n lista_sent))\n return TMS\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\ndef le_textos():\n \"\"\"A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento\"\"\"\n i = 1\n textos = []\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):')\n while texto:\n textos.append(texto)\n i += 1\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):'\n )\n return textos\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\ndef calcula_assinatura(texto):\n as_b = []\n lista.append(tam_medio(texto))\n lista.append(type_token(texto))\n lista.append(hapax_legomana(texto))\n lista.append(tam_medio_sentenca(texto))\n lista.append(complexidade_sentenca(texto))\n lista.append(tam_medio_frase(texto))\n return as_b\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\ndef n_palavras_unicas(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez\"\"\"\n freq = dict()\n unicas = 0\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n if freq[p] == 1:\n unicas -= 1\n freq[p] += 1\n else:\n freq[p] = 1\n unicas += 1\n return unicas\n\n\ndef n_palavras_diferentes(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas\"\"\"\n freq = dict()\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n freq[p] += 1\n else:\n freq[p] = 1\n return len(freq)\n\n\ndef lista_frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(sentenca)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\n<function token>\n\n\ndef tam_medio_sentenca(lista_sent):\n TMS = soma_caracteres_sentenca(lista_sent) / len(separa_sentencas(\n lista_sent))\n return TMS\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\ndef le_textos():\n \"\"\"A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento\"\"\"\n i = 1\n textos = []\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):')\n while texto:\n textos.append(texto)\n i += 1\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):'\n )\n return textos\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\ndef calcula_assinatura(texto):\n as_b = []\n lista.append(tam_medio(texto))\n lista.append(type_token(texto))\n lista.append(hapax_legomana(texto))\n lista.append(tam_medio_sentenca(texto))\n lista.append(complexidade_sentenca(texto))\n lista.append(tam_medio_frase(texto))\n return as_b\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\n<function token>\n\n\ndef n_palavras_diferentes(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas\"\"\"\n freq = dict()\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n freq[p] += 1\n else:\n freq[p] = 1\n return len(freq)\n\n\ndef lista_frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(sentenca)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\n<function token>\n\n\ndef tam_medio_sentenca(lista_sent):\n TMS = soma_caracteres_sentenca(lista_sent) / len(separa_sentencas(\n lista_sent))\n return TMS\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\ndef le_textos():\n \"\"\"A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento\"\"\"\n i = 1\n textos = []\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):')\n while texto:\n textos.append(texto)\n i += 1\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):'\n )\n return textos\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\ndef calcula_assinatura(texto):\n as_b = []\n lista.append(tam_medio(texto))\n lista.append(type_token(texto))\n lista.append(hapax_legomana(texto))\n lista.append(tam_medio_sentenca(texto))\n lista.append(complexidade_sentenca(texto))\n lista.append(tam_medio_frase(texto))\n return as_b\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\n<function token>\n\n\ndef n_palavras_diferentes(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas\"\"\"\n freq = dict()\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n freq[p] += 1\n else:\n freq[p] = 1\n return len(freq)\n\n\ndef lista_frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(sentenca)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\n<function token>\n<function token>\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\ndef le_textos():\n \"\"\"A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento\"\"\"\n i = 1\n textos = []\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):')\n while texto:\n textos.append(texto)\n i += 1\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):'\n )\n return textos\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\ndef calcula_assinatura(texto):\n as_b = []\n lista.append(tam_medio(texto))\n lista.append(type_token(texto))\n lista.append(hapax_legomana(texto))\n lista.append(tam_medio_sentenca(texto))\n lista.append(complexidade_sentenca(texto))\n lista.append(tam_medio_frase(texto))\n return as_b\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\n<function token>\n\n\ndef n_palavras_diferentes(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas\"\"\"\n freq = dict()\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n freq[p] += 1\n else:\n freq[p] = 1\n return len(freq)\n\n\ndef lista_frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(sentenca)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\n<function token>\n<function token>\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\ndef le_textos():\n \"\"\"A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento\"\"\"\n i = 1\n textos = []\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):')\n while texto:\n textos.append(texto)\n i += 1\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):'\n )\n return textos\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\n<function token>\n<function token>\n\n\ndef lista_frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(sentenca)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\n<function token>\n<function token>\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\ndef le_textos():\n \"\"\"A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento\"\"\"\n i = 1\n textos = []\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):')\n while texto:\n textos.append(texto)\n i += 1\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):'\n )\n return textos\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\n<function token>\n<function token>\n\n\ndef lista_frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(sentenca)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\n<function token>\n<function token>\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\n<function token>\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\n<function token>\n<function token>\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\n<function token>\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\n<function token>\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\n<function token>\n<function token>\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\n<function token>\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\n<function token>\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\n<function token>\n<function token>\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\n<function token>\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\n<function token>\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\n<function token>\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\n<function token>\n<function token>\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\n<function token>\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\n<function token>\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\n<function token>\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\n<function token>\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\n<function token>\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\n<function token>\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\n<function token>\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\n<function token>\n<function token>\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\n<function token>\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\n<function token>\n<function token>\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\n<function token>\n<function token>\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
1,002 |
5d7080f2778133d1938853512ca038edcf7c0dc4
|
from Products.CMFPlone.utils import getFSVersionTuple
from bda.plone.ticketshop.interfaces import ITicketShopExtensionLayer
from plone.app.robotframework.testing import MOCK_MAILHOST_FIXTURE
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.app.testing import TEST_USER_ID
from plone.app.testing import setRoles
from plone.testing import z2
from zope.interface import alsoProvides
import plone.api
if getFSVersionTuple()[0] >= 5:
PLONE5 = 1
else:
PLONE5 = 0
def set_browserlayer(request):
"""Set the BrowserLayer for the request.
We have to set the browserlayer manually, since importing the profile alone
doesn't do it in tests.
"""
alsoProvides(request, ITicketShopExtensionLayer)
class TicketshopLayer(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop,
context=configurationContext)
# Install products that use an old-style initialize() function
z2.installProduct(app, 'Products.DateRecurringIndex')
def setUpPloneSite(self, portal):
self.applyProfile(portal, 'bda.plone.ticketshop:default')
def tearDownZope(self, app):
# Uninstall old-style Products
z2.uninstallProduct(app, 'Products.DateRecurringIndex')
Ticketshop_FIXTURE = TicketshopLayer()
Ticketshop_INTEGRATION_TESTING = IntegrationTesting(
bases=(Ticketshop_FIXTURE,),
name="Ticketshop:Integration")
class TicketshopATLayer(PloneSandboxLayer):
# don't use shop fixture here. looks like, test layers use differen ZODB
# connections and c.z.datagriedfield fails with a ZODB object reference
# error.
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
import Products.ATContentTypes
self.loadZCML(package=Products.ATContentTypes,
context=configurationContext)
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop,
context=configurationContext)
# Install products that use an old-style initialize() function
z2.installProduct(app, 'Products.DateRecurringIndex')
z2.installProduct(app, 'bda.plone.ticketshop.at')
def setUpPloneSite(self, portal):
if PLONE5:
self.applyProfile(portal, 'Products.ATContentTypes:default')
self.applyProfile(portal, 'bda.plone.ticketshop.at:default')
portal.portal_workflow.setDefaultChain("one_state_workflow")
setRoles(portal, TEST_USER_ID, ['Manager'])
# Create test users
cru = plone.api.user.create
cru(email="[email protected]", username="customer1", password="customer1")
cru(email="[email protected]", username="customer2", password="customer2")
cru(email="[email protected]", username="vendor1", password="vendor1")
cru(email="[email protected]", username="vendor2", password="vendor2")
# Create test content
crc = plone.api.content.create
crc(container=portal, type='Buyable Event', id='folder_1')
crc(container=portal['folder_1'], type='Ticket', id='item_11',
title="item_11")
crc(container=portal['folder_1'], type='Ticket', id='item_12',
title="item_12")
crc(container=portal, type='Buyable Event', id='folder_2')
crc(container=portal['folder_2'], type='Ticket', id='item_21',
title="item_21")
crc(container=portal['folder_2'], type='Ticket', id='item_22',
title="item_22")
TicketshopAT_FIXTURE = TicketshopATLayer()
TicketshopAT_INTEGRATION_TESTING = IntegrationTesting(
bases=(TicketshopAT_FIXTURE,),
name="TicketshopAT:Integration")
TicketshopAT_ROBOT_TESTING = FunctionalTesting(
bases=(
MOCK_MAILHOST_FIXTURE,
TicketshopAT_FIXTURE,
z2.ZSERVER_FIXTURE
),
name="TicketshopAT:Robot")
|
[
"from Products.CMFPlone.utils import getFSVersionTuple\nfrom bda.plone.ticketshop.interfaces import ITicketShopExtensionLayer\nfrom plone.app.robotframework.testing import MOCK_MAILHOST_FIXTURE\nfrom plone.app.testing import FunctionalTesting\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import PLONE_FIXTURE\nfrom plone.app.testing import PloneSandboxLayer\nfrom plone.app.testing import TEST_USER_ID\nfrom plone.app.testing import setRoles\nfrom plone.testing import z2\nfrom zope.interface import alsoProvides\nimport plone.api\n\nif getFSVersionTuple()[0] >= 5:\n PLONE5 = 1\nelse:\n PLONE5 = 0\n\n\ndef set_browserlayer(request):\n \"\"\"Set the BrowserLayer for the request.\n\n We have to set the browserlayer manually, since importing the profile alone\n doesn't do it in tests.\n \"\"\"\n alsoProvides(request, ITicketShopExtensionLayer)\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop,\n context=configurationContext)\n\n # Install products that use an old-style initialize() function\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n # Uninstall old-style Products\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\nTicketshop_FIXTURE = TicketshopLayer()\nTicketshop_INTEGRATION_TESTING = IntegrationTesting(\n bases=(Ticketshop_FIXTURE,),\n name=\"Ticketshop:Integration\")\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n # don't use shop fixture here. looks like, test layers use differen ZODB\n # connections and c.z.datagriedfield fails with a ZODB object reference\n # error.\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes,\n context=configurationContext)\n\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop,\n context=configurationContext)\n\n # Install products that use an old-style initialize() function\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n\n portal.portal_workflow.setDefaultChain(\"one_state_workflow\")\n setRoles(portal, TEST_USER_ID, ['Manager'])\n\n # Create test users\n cru = plone.api.user.create\n cru(email=\"[email protected]\", username=\"customer1\", password=\"customer1\")\n cru(email=\"[email protected]\", username=\"customer2\", password=\"customer2\")\n cru(email=\"[email protected]\", username=\"vendor1\", password=\"vendor1\")\n cru(email=\"[email protected]\", username=\"vendor2\", password=\"vendor2\")\n\n # Create test content\n crc = plone.api.content.create\n\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title=\"item_11\")\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title=\"item_12\")\n\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title=\"item_21\")\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title=\"item_22\")\n\n\nTicketshopAT_FIXTURE = TicketshopATLayer()\nTicketshopAT_INTEGRATION_TESTING = IntegrationTesting(\n bases=(TicketshopAT_FIXTURE,),\n name=\"TicketshopAT:Integration\")\nTicketshopAT_ROBOT_TESTING = FunctionalTesting(\n bases=(\n MOCK_MAILHOST_FIXTURE,\n TicketshopAT_FIXTURE,\n z2.ZSERVER_FIXTURE\n ),\n name=\"TicketshopAT:Robot\")\n",
"from Products.CMFPlone.utils import getFSVersionTuple\nfrom bda.plone.ticketshop.interfaces import ITicketShopExtensionLayer\nfrom plone.app.robotframework.testing import MOCK_MAILHOST_FIXTURE\nfrom plone.app.testing import FunctionalTesting\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import PLONE_FIXTURE\nfrom plone.app.testing import PloneSandboxLayer\nfrom plone.app.testing import TEST_USER_ID\nfrom plone.app.testing import setRoles\nfrom plone.testing import z2\nfrom zope.interface import alsoProvides\nimport plone.api\nif getFSVersionTuple()[0] >= 5:\n PLONE5 = 1\nelse:\n PLONE5 = 0\n\n\ndef set_browserlayer(request):\n \"\"\"Set the BrowserLayer for the request.\n\n We have to set the browserlayer manually, since importing the profile alone\n doesn't do it in tests.\n \"\"\"\n alsoProvides(request, ITicketShopExtensionLayer)\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\nTicketshop_FIXTURE = TicketshopLayer()\nTicketshop_INTEGRATION_TESTING = IntegrationTesting(bases=(\n Ticketshop_FIXTURE,), name='Ticketshop:Integration')\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\nTicketshopAT_FIXTURE = TicketshopATLayer()\nTicketshopAT_INTEGRATION_TESTING = IntegrationTesting(bases=(\n TicketshopAT_FIXTURE,), name='TicketshopAT:Integration')\nTicketshopAT_ROBOT_TESTING = FunctionalTesting(bases=(MOCK_MAILHOST_FIXTURE,\n TicketshopAT_FIXTURE, z2.ZSERVER_FIXTURE), name='TicketshopAT:Robot')\n",
"<import token>\nif getFSVersionTuple()[0] >= 5:\n PLONE5 = 1\nelse:\n PLONE5 = 0\n\n\ndef set_browserlayer(request):\n \"\"\"Set the BrowserLayer for the request.\n\n We have to set the browserlayer manually, since importing the profile alone\n doesn't do it in tests.\n \"\"\"\n alsoProvides(request, ITicketShopExtensionLayer)\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\nTicketshop_FIXTURE = TicketshopLayer()\nTicketshop_INTEGRATION_TESTING = IntegrationTesting(bases=(\n Ticketshop_FIXTURE,), name='Ticketshop:Integration')\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\nTicketshopAT_FIXTURE = TicketshopATLayer()\nTicketshopAT_INTEGRATION_TESTING = IntegrationTesting(bases=(\n TicketshopAT_FIXTURE,), name='TicketshopAT:Integration')\nTicketshopAT_ROBOT_TESTING = FunctionalTesting(bases=(MOCK_MAILHOST_FIXTURE,\n TicketshopAT_FIXTURE, z2.ZSERVER_FIXTURE), name='TicketshopAT:Robot')\n",
"<import token>\nif getFSVersionTuple()[0] >= 5:\n PLONE5 = 1\nelse:\n PLONE5 = 0\n\n\ndef set_browserlayer(request):\n \"\"\"Set the BrowserLayer for the request.\n\n We have to set the browserlayer manually, since importing the profile alone\n doesn't do it in tests.\n \"\"\"\n alsoProvides(request, ITicketShopExtensionLayer)\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\n<assignment token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<assignment token>\n",
"<import token>\n<code token>\n\n\ndef set_browserlayer(request):\n \"\"\"Set the BrowserLayer for the request.\n\n We have to set the browserlayer manually, since importing the profile alone\n doesn't do it in tests.\n \"\"\"\n alsoProvides(request, ITicketShopExtensionLayer)\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\n<assignment token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<assignment token>\n",
"<import token>\n<code token>\n<function token>\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\n<assignment token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<assignment token>\n",
"<import token>\n<code token>\n<function token>\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n <assignment token>\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\n<assignment token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<assignment token>\n",
"<import token>\n<code token>\n<function token>\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n <assignment token>\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n <function token>\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\n<assignment token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<assignment token>\n",
"<import token>\n<code token>\n<function token>\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n <assignment token>\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n <function token>\n <function token>\n\n\n<assignment token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<assignment token>\n",
"<import token>\n<code token>\n<function token>\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<assignment token>\n",
"<import token>\n<code token>\n<function token>\n<class token>\n<assignment token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<assignment token>\n",
"<import token>\n<code token>\n<function token>\n<class token>\n<assignment token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n <assignment token>\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<assignment token>\n",
"<import token>\n<code token>\n<function token>\n<class token>\n<assignment token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n <assignment token>\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n <function token>\n\n\n<assignment token>\n",
"<import token>\n<code token>\n<function token>\n<class token>\n<assignment token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n <assignment token>\n <function token>\n <function token>\n\n\n<assignment token>\n",
"<import token>\n<code token>\n<function token>\n<class token>\n<assignment token>\n<class token>\n<assignment token>\n"
] | false |
1,003 |
646f6a0afc3dc129250c26270dda4355b8cea080
|
#!/usr/local/bin/python3.3
'''
http://projecteuler.net/problem=127()
abc-hits
Problem 127
The radical of n, rad(n), is the product of distinct prime factors of n. For example, 504 = 23 × 32 × 7, so rad(504) = 2 × 3 × 7 = 42.
We shall define the triplet of positive integers (a, b, c) to be an abc-hit if:
GCD(a, b) = GCD(a, c) = GCD(b, c) = 1
a < b
a + b = c
rad(abc) < c
For example, (5, 27, 32) is an abc-hit, because:
GCD(5, 27) = GCD(5, 32) = GCD(27, 32) = 1
5 < 27
5 + 27 = 32
rad(4320) = 30 < 32
It turns out that abc-hits are quite rare and there are only thirty-one abc-hits for c < 1000, with ∑c = 12523.
Find ∑c for c < 120000.
'''
'''
Notes on problem 127():
Very slow
'''
from PE_factors import genFactors
from PE_basic import product
def problem127():
GOAL = 120000
rad = {} # rad[6] = {2,3}, radn[8] = {2}
for primes in genFactors(GOAL):
rad[product(primes)] = (set(primes), product(set(primes)))
def relprime(s, t):
return s & t == set()
found = 0
total = 0
for b in range(1, GOAL):
for a in range(1, min(b, GOAL - b)):
c = a + b
x, y, z = rad[a], rad[b], rad[c]
if x[0] & y[0] != set():
continue
if x[1] * y[1] * z[1] < c:
found += 1
total += c
return total
if __name__ == "__main__":
print(problem127() == 18407904)
|
[
"#!/usr/local/bin/python3.3\n\n'''\nhttp://projecteuler.net/problem=127()\nabc-hits\nProblem 127\nThe radical of n, rad(n), is the product of distinct prime factors of n. For example, 504 = 23 × 32 × 7, so rad(504) = 2 × 3 × 7 = 42.\n\nWe shall define the triplet of positive integers (a, b, c) to be an abc-hit if:\n\nGCD(a, b) = GCD(a, c) = GCD(b, c) = 1\na < b\na + b = c\nrad(abc) < c\nFor example, (5, 27, 32) is an abc-hit, because:\n\nGCD(5, 27) = GCD(5, 32) = GCD(27, 32) = 1\n5 < 27\n5 + 27 = 32\nrad(4320) = 30 < 32\nIt turns out that abc-hits are quite rare and there are only thirty-one abc-hits for c < 1000, with ∑c = 12523.\n\nFind ∑c for c < 120000.\n'''\n\n'''\nNotes on problem 127():\nVery slow\n'''\n\nfrom PE_factors import genFactors\nfrom PE_basic import product\n\ndef problem127():\n GOAL = 120000\n\n rad = {} # rad[6] = {2,3}, radn[8] = {2}\n for primes in genFactors(GOAL):\n rad[product(primes)] = (set(primes), product(set(primes)))\n\n def relprime(s, t):\n return s & t == set()\n\n found = 0\n total = 0\n for b in range(1, GOAL):\n for a in range(1, min(b, GOAL - b)):\n c = a + b\n x, y, z = rad[a], rad[b], rad[c]\n if x[0] & y[0] != set():\n continue\n if x[1] * y[1] * z[1] < c:\n found += 1\n total += c\n return total\n\n\nif __name__ == \"__main__\":\n print(problem127() == 18407904)\n",
"<docstring token>\nfrom PE_factors import genFactors\nfrom PE_basic import product\n\n\ndef problem127():\n GOAL = 120000\n rad = {}\n for primes in genFactors(GOAL):\n rad[product(primes)] = set(primes), product(set(primes))\n\n def relprime(s, t):\n return s & t == set()\n found = 0\n total = 0\n for b in range(1, GOAL):\n for a in range(1, min(b, GOAL - b)):\n c = a + b\n x, y, z = rad[a], rad[b], rad[c]\n if x[0] & y[0] != set():\n continue\n if x[1] * y[1] * z[1] < c:\n found += 1\n total += c\n return total\n\n\nif __name__ == '__main__':\n print(problem127() == 18407904)\n",
"<docstring token>\n<import token>\n\n\ndef problem127():\n GOAL = 120000\n rad = {}\n for primes in genFactors(GOAL):\n rad[product(primes)] = set(primes), product(set(primes))\n\n def relprime(s, t):\n return s & t == set()\n found = 0\n total = 0\n for b in range(1, GOAL):\n for a in range(1, min(b, GOAL - b)):\n c = a + b\n x, y, z = rad[a], rad[b], rad[c]\n if x[0] & y[0] != set():\n continue\n if x[1] * y[1] * z[1] < c:\n found += 1\n total += c\n return total\n\n\nif __name__ == '__main__':\n print(problem127() == 18407904)\n",
"<docstring token>\n<import token>\n\n\ndef problem127():\n GOAL = 120000\n rad = {}\n for primes in genFactors(GOAL):\n rad[product(primes)] = set(primes), product(set(primes))\n\n def relprime(s, t):\n return s & t == set()\n found = 0\n total = 0\n for b in range(1, GOAL):\n for a in range(1, min(b, GOAL - b)):\n c = a + b\n x, y, z = rad[a], rad[b], rad[c]\n if x[0] & y[0] != set():\n continue\n if x[1] * y[1] * z[1] < c:\n found += 1\n total += c\n return total\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<code token>\n"
] | false |
1,004 |
3079fdbe6319454ad166d06bda5670554a5746ee
|
# len(): tamanho da string
# count(): conta quantas vezes um caractere aparece
# lower(), upper()
# replace(): substitui as letras por outra
# split(): quebra uma string a partir dos espacos em branco
a = len('Karen')
print(a)
b = 'Rainha Elizabeth'.count('a')
print(b)
c = 'karen nayara'.replace('a','@')
print(c)
d = 'karen meeseeks gomes'.split()
print(d)
|
[
"# len(): tamanho da string\n# count(): conta quantas vezes um caractere aparece\n# lower(), upper()\n# replace(): substitui as letras por outra\n# split(): quebra uma string a partir dos espacos em branco\n\na = len('Karen')\nprint(a)\nb = 'Rainha Elizabeth'.count('a')\nprint(b)\nc = 'karen nayara'.replace('a','@')\nprint(c)\nd = 'karen meeseeks gomes'.split()\nprint(d)",
"a = len('Karen')\nprint(a)\nb = 'Rainha Elizabeth'.count('a')\nprint(b)\nc = 'karen nayara'.replace('a', '@')\nprint(c)\nd = 'karen meeseeks gomes'.split()\nprint(d)\n",
"<assignment token>\nprint(a)\n<assignment token>\nprint(b)\n<assignment token>\nprint(c)\n<assignment token>\nprint(d)\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
1,005 |
2cdcd6976a1ec99b927adcedc48c36bbda1b4e18
|
""" Generate test pads for padder. """
# usage: python gen.py > pads.txt
import random
pad = ""
count = 0
# The pad chars MUST match the character set used by padder.
# See the 'characters' variable in 'main.hpp' for more
# information.
chars = "abcdefghijklmnopqrstuvwxyz0123456789-"
print "#", "Pad"
while count < 12:
for x in xrange(0, 98):
pad += random.choice(chars)
count = count+1
print count, pad
pad = ""
|
[
"\"\"\" Generate test pads for padder. \"\"\"\n\n# usage: python gen.py > pads.txt\n\nimport random\n\npad = \"\"\ncount = 0\n\n# The pad chars MUST match the character set used by padder.\n# See the 'characters' variable in 'main.hpp' for more\n# information.\nchars = \"abcdefghijklmnopqrstuvwxyz0123456789-\"\n\nprint \"#\", \"Pad\"\nwhile count < 12:\n for x in xrange(0, 98):\n pad += random.choice(chars)\n\n count = count+1\n print count, pad\n pad = \"\"\n\n"
] | true |
1,006 |
d68bd9c90a106a9eac767607ad77bdd84d0f18d2
|
#-*- coding = utf-8-*-
#@Time : 2020/6/26 11:02
#@Author :Ella
#@File :app.py
#@Software : PyCharm
import time
import datetime
from flask import Flask,render_template,request #render_template渲染模板
app = Flask(__name__) #初始化的对象
#路由解析,通过用户访问的路径,匹配想要的函数
@app.route('/')
def hello_world():
return '你好'
#通过访问路径,获取用户的字符串参数
@app.route('/test1/<name>')
def test1(name):
return '你好,%s'%name
#通过访问路径,获取用户的整形参数 此外,还有float类型
@app.route('/test2/<int:id>')
def test2(id):
return '你好,%d'%id
#返回给用户渲染后的网页文件
# @app.route('/index1')
# def index1():
# return render_template("index.html")
#向页面传递变量
@app.route('/index1')
def index2():
time = datetime.date.today() #普通变量
name = ['小新','小英','小红'] #列表类型
task = {"任务":"打扫卫生","时间":"3小时"} #字典类型
return render_template("index.html",var = time,list = name,task = task)
#表单提交
@app.route('/test/register')
def register():
return render_template("test/register.html")
#接受表单提交的路由,需要指定methods为post
@app.route('/result',methods = ['POST','GET'])
def result():
if request.method == 'POST':
result = request.form
return render_template("test/result.html",result = result)
if __name__ == '__main__':
app.run(debug=True)
|
[
"#-*- coding = utf-8-*-\n#@Time : 2020/6/26 11:02\n#@Author :Ella\n#@File :app.py\n#@Software : PyCharm\n\nimport time\nimport datetime\n\nfrom flask import Flask,render_template,request #render_template渲染模板\napp = Flask(__name__) #初始化的对象\n\n#路由解析,通过用户访问的路径,匹配想要的函数\[email protected]('/')\ndef hello_world():\n return '你好'\n\n#通过访问路径,获取用户的字符串参数\[email protected]('/test1/<name>')\ndef test1(name):\n return '你好,%s'%name\n\n#通过访问路径,获取用户的整形参数 此外,还有float类型\[email protected]('/test2/<int:id>')\ndef test2(id):\n return '你好,%d'%id\n\n#返回给用户渲染后的网页文件\n# @app.route('/index1')\n# def index1():\n# return render_template(\"index.html\")\n\n#向页面传递变量\[email protected]('/index1')\ndef index2():\n time = datetime.date.today() #普通变量\n name = ['小新','小英','小红'] #列表类型\n task = {\"任务\":\"打扫卫生\",\"时间\":\"3小时\"} #字典类型\n return render_template(\"index.html\",var = time,list = name,task = task)\n\n#表单提交\[email protected]('/test/register')\ndef register():\n return render_template(\"test/register.html\")\n\n#接受表单提交的路由,需要指定methods为post\[email protected]('/result',methods = ['POST','GET'])\ndef result():\n if request.method == 'POST':\n result = request.form\n return render_template(\"test/result.html\",result = result)\n\nif __name__ == '__main__':\n app.run(debug=True)",
"import time\nimport datetime\nfrom flask import Flask, render_template, request\napp = Flask(__name__)\n\n\[email protected]('/')\ndef hello_world():\n return '你好'\n\n\[email protected]('/test1/<name>')\ndef test1(name):\n return '你好,%s' % name\n\n\[email protected]('/test2/<int:id>')\ndef test2(id):\n return '你好,%d' % id\n\n\[email protected]('/index1')\ndef index2():\n time = datetime.date.today()\n name = ['小新', '小英', '小红']\n task = {'任务': '打扫卫生', '时间': '3小时'}\n return render_template('index.html', var=time, list=name, task=task)\n\n\[email protected]('/test/register')\ndef register():\n return render_template('test/register.html')\n\n\[email protected]('/result', methods=['POST', 'GET'])\ndef result():\n if request.method == 'POST':\n result = request.form\n return render_template('test/result.html', result=result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef hello_world():\n return '你好'\n\n\[email protected]('/test1/<name>')\ndef test1(name):\n return '你好,%s' % name\n\n\[email protected]('/test2/<int:id>')\ndef test2(id):\n return '你好,%d' % id\n\n\[email protected]('/index1')\ndef index2():\n time = datetime.date.today()\n name = ['小新', '小英', '小红']\n task = {'任务': '打扫卫生', '时间': '3小时'}\n return render_template('index.html', var=time, list=name, task=task)\n\n\[email protected]('/test/register')\ndef register():\n return render_template('test/register.html')\n\n\[email protected]('/result', methods=['POST', 'GET'])\ndef result():\n if request.method == 'POST':\n result = request.form\n return render_template('test/result.html', result=result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef hello_world():\n return '你好'\n\n\[email protected]('/test1/<name>')\ndef test1(name):\n return '你好,%s' % name\n\n\[email protected]('/test2/<int:id>')\ndef test2(id):\n return '你好,%d' % id\n\n\[email protected]('/index1')\ndef index2():\n time = datetime.date.today()\n name = ['小新', '小英', '小红']\n task = {'任务': '打扫卫生', '时间': '3小时'}\n return render_template('index.html', var=time, list=name, task=task)\n\n\[email protected]('/test/register')\ndef register():\n return render_template('test/register.html')\n\n\[email protected]('/result', methods=['POST', 'GET'])\ndef result():\n if request.method == 'POST':\n result = request.form\n return render_template('test/result.html', result=result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef hello_world():\n return '你好'\n\n\[email protected]('/test1/<name>')\ndef test1(name):\n return '你好,%s' % name\n\n\[email protected]('/test2/<int:id>')\ndef test2(id):\n return '你好,%d' % id\n\n\[email protected]('/index1')\ndef index2():\n time = datetime.date.today()\n name = ['小新', '小英', '小红']\n task = {'任务': '打扫卫生', '时间': '3小时'}\n return render_template('index.html', var=time, list=name, task=task)\n\n\[email protected]('/test/register')\ndef register():\n return render_template('test/register.html')\n\n\[email protected]('/result', methods=['POST', 'GET'])\ndef result():\n if request.method == 'POST':\n result = request.form\n return render_template('test/result.html', result=result)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef hello_world():\n return '你好'\n\n\[email protected]('/test1/<name>')\ndef test1(name):\n return '你好,%s' % name\n\n\n<function token>\n\n\[email protected]('/index1')\ndef index2():\n time = datetime.date.today()\n name = ['小新', '小英', '小红']\n task = {'任务': '打扫卫生', '时间': '3小时'}\n return render_template('index.html', var=time, list=name, task=task)\n\n\[email protected]('/test/register')\ndef register():\n return render_template('test/register.html')\n\n\[email protected]('/result', methods=['POST', 'GET'])\ndef result():\n if request.method == 'POST':\n result = request.form\n return render_template('test/result.html', result=result)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\[email protected]('/test1/<name>')\ndef test1(name):\n return '你好,%s' % name\n\n\n<function token>\n\n\[email protected]('/index1')\ndef index2():\n time = datetime.date.today()\n name = ['小新', '小英', '小红']\n task = {'任务': '打扫卫生', '时间': '3小时'}\n return render_template('index.html', var=time, list=name, task=task)\n\n\[email protected]('/test/register')\ndef register():\n return render_template('test/register.html')\n\n\[email protected]('/result', methods=['POST', 'GET'])\ndef result():\n if request.method == 'POST':\n result = request.form\n return render_template('test/result.html', result=result)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\[email protected]('/test1/<name>')\ndef test1(name):\n return '你好,%s' % name\n\n\n<function token>\n\n\[email protected]('/index1')\ndef index2():\n time = datetime.date.today()\n name = ['小新', '小英', '小红']\n task = {'任务': '打扫卫生', '时间': '3小时'}\n return render_template('index.html', var=time, list=name, task=task)\n\n\n<function token>\n\n\[email protected]('/result', methods=['POST', 'GET'])\ndef result():\n if request.method == 'POST':\n result = request.form\n return render_template('test/result.html', result=result)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\[email protected]('/test1/<name>')\ndef test1(name):\n return '你好,%s' % name\n\n\n<function token>\n\n\[email protected]('/index1')\ndef index2():\n time = datetime.date.today()\n name = ['小新', '小英', '小红']\n task = {'任务': '打扫卫生', '时间': '3小时'}\n return render_template('index.html', var=time, list=name, task=task)\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\[email protected]('/index1')\ndef index2():\n time = datetime.date.today()\n name = ['小新', '小英', '小红']\n task = {'任务': '打扫卫生', '时间': '3小时'}\n return render_template('index.html', var=time, list=name, task=task)\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
1,007 |
6da828a797efac7c37723db96a2682e960c317b5
|
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name = "keputils",
version = "0.2.1",
description = "Basic module for interaction with KOI and Kepler-stellar tables.",
long_description = readme(),
author = "Timothy D. Morton",
author_email = "[email protected]",
url = "https://github.com/timothydmorton/keputils",
packages = ['keputils'],
scripts = ['scripts/koiquery'],
#entry_points = {'console_scripts' : ['koiquery = koiquery:main']},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy'
],
install_requires=['pandas>=0.13','simpledist'],
zip_safe=False
)
|
[
"from setuptools import setup\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\nsetup(name = \"keputils\",\n version = \"0.2.1\",\n description = \"Basic module for interaction with KOI and Kepler-stellar tables.\",\n long_description = readme(),\n author = \"Timothy D. Morton\",\n author_email = \"[email protected]\",\n url = \"https://github.com/timothydmorton/keputils\",\n packages = ['keputils'],\n scripts = ['scripts/koiquery'],\n #entry_points = {'console_scripts' : ['koiquery = koiquery:main']},\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Astronomy'\n ],\n install_requires=['pandas>=0.13','simpledist'],\n zip_safe=False\n) \n",
"from setuptools import setup\n\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\n\nsetup(name='keputils', version='0.2.1', description=\n 'Basic module for interaction with KOI and Kepler-stellar tables.',\n long_description=readme(), author='Timothy D. Morton', author_email=\n '[email protected]', url=\n 'https://github.com/timothydmorton/keputils', packages=['keputils'],\n scripts=['scripts/koiquery'], classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Astronomy'], install_requires=[\n 'pandas>=0.13', 'simpledist'], zip_safe=False)\n",
"<import token>\n\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\n\nsetup(name='keputils', version='0.2.1', description=\n 'Basic module for interaction with KOI and Kepler-stellar tables.',\n long_description=readme(), author='Timothy D. Morton', author_email=\n '[email protected]', url=\n 'https://github.com/timothydmorton/keputils', packages=['keputils'],\n scripts=['scripts/koiquery'], classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Astronomy'], install_requires=[\n 'pandas>=0.13', 'simpledist'], zip_safe=False)\n",
"<import token>\n\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
1,008 |
9c6bb885c05ee13a283b09861a5aa7c5e62677cb
|
#!/usr/bin/python
def check(n):
if n == 0 :
print "neither Positive nor Negative"
if n < 0 :
print "Negative"
if n > 0 :
print "Positive"
print "10 is ", check(10)
print "-5 is ", check(-5)
print "0 is ", check(0)
|
[
"#!/usr/bin/python\ndef check(n):\n if n == 0 :\n print \"neither Positive nor Negative\"\n if n < 0 :\n print \"Negative\"\n if n > 0 :\n print \"Positive\"\n\n\n\nprint \"10 is \", check(10)\nprint \"-5 is \", check(-5)\nprint \"0 is \", check(0)"
] | true |
1,009 |
93e8e9fc4f0503dfc3243bef5ab8261a4cdfc296
|
#!/usr/bin/env python
# encoding: UTF-8
'''
Script to select current version for a given soft (python, ruby or java).
'''
import os
import re
import sys
import glob
import getopt
# fix input in Python 2 and 3
try:
input = raw_input # pylint: disable=redefined-builtin,invalid-name
except NameError:
pass
class Version(object): # pylint: disable=useless-object-inheritance
'''
Software management class
'''
HELP = '''version [-h] software
Select software version in a menu:
-h To print this help screen.
software Software version to choose.'''
SELECTED = ' *'
def __init__(self, soft):
'''
Constructor that takes software name
'''
self.soft = soft
self.app_dir = os.environ.get('APP_DIR')
if self.app_dir is None:
self.app_dir = '/opt'
self.sudo = True
if os.access(self.app_dir, os.W_OK):
self.sudo = False
self.soft_root = os.path.join(self.app_dir, self.soft)
self.soft_paths = sorted(glob.glob(self.soft_root+'/[0-9]*'))
self.versions = [v[len(self.soft_root)+1:] for v in self.soft_paths]
path = os.path.realpath("%s/current" % self.soft_root)
self.current_version = path[path.rindex(os.path.sep)+1:]
def set_version(self, index):
'''
Set software version by index
'''
sudo = 'sudo ' if self.sudo else ''
old_dir = "current"
if index == -1:
print("Selecting system version")
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system("cd %s && %srm %s" % (self.soft_root, sudo, old_dir))
else:
print("Selecting %s version '%s'" %
(self.soft, self.versions[index]))
directory = self.versions[index]
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system("cd %s && %srm %s" % (self.soft_root, sudo, old_dir))
os.system("cd %s && %sln -s %s %s" % (self.soft_root, sudo, directory, old_dir))
def ask_version(self):
'''
Prompt user for software version in the list of installed versions
'''
# print version list
print('Please choose a version:')
index = 1
if self.current_version == 'current':
selected = self.SELECTED
else:
selected = ''
print("0: System"+selected)
for version in self.soft_paths:
number = version[len(self.soft_root)+1:]
if number == self.current_version:
selected = self.SELECTED
else:
selected = ''
print(str(index)+': '+str(number)+selected)
index += 1
# ask for the version
chosen = None
maximum = len(self.soft_paths)
while not chosen:
try:
choice = input()
except KeyboardInterrupt:
print("\nUser abort!")
sys.exit(0)
if re.match('\\d+', choice) and int(choice) <= maximum and \
int(choice) >= 0:
index = int(choice) - 1
chosen = True
elif choice == '':
print("Keeping current")
sys.exit(0)
else:
print("Bad version, please choose a number between 0 and %s" %
str(maximum))
# return index in version table
return index
@staticmethod
def run():
'''
Read software name on command line and run version selection
'''
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])
except getopt.GetoptError as exception:
print('Error parsing command line: %s' % exception)
print(Version.HELP)
sys.exit(1)
for option, _ in opts:
if option in ('-h', '--help'):
print(Version.HELP)
sys.exit(0)
else:
print("Error parsing command line: Unhandled option '%s'" % option)
print(Version.HELP)
sys.exit(2)
if len(args) != 1:
print("Error parsing command line: You must pass software")
print(Version.HELP)
sys.exit(1)
soft = args[0]
version = Version(soft)
version.set_version(version.ask_version())
if __name__ == '__main__':
Version.run()
|
[
"#!/usr/bin/env python\n# encoding: UTF-8\n\n'''\nScript to select current version for a given soft (python, ruby or java).\n'''\n\nimport os\nimport re\nimport sys\nimport glob\nimport getopt\n\n\n# fix input in Python 2 and 3\ntry:\n input = raw_input # pylint: disable=redefined-builtin,invalid-name\nexcept NameError:\n pass\n\n\nclass Version(object): # pylint: disable=useless-object-inheritance\n '''\n Software management class\n '''\n\n HELP = '''version [-h] software\nSelect software version in a menu:\n-h To print this help screen.\nsoftware Software version to choose.'''\n SELECTED = ' *'\n\n def __init__(self, soft):\n '''\n Constructor that takes software name\n '''\n self.soft = soft\n self.app_dir = os.environ.get('APP_DIR')\n if self.app_dir is None:\n self.app_dir = '/opt'\n self.sudo = True\n if os.access(self.app_dir, os.W_OK):\n self.sudo = False\n self.soft_root = os.path.join(self.app_dir, self.soft)\n self.soft_paths = sorted(glob.glob(self.soft_root+'/[0-9]*'))\n self.versions = [v[len(self.soft_root)+1:] for v in self.soft_paths]\n path = os.path.realpath(\"%s/current\" % self.soft_root)\n self.current_version = path[path.rindex(os.path.sep)+1:]\n\n def set_version(self, index):\n '''\n Set software version by index\n '''\n sudo = 'sudo ' if self.sudo else ''\n old_dir = \"current\"\n if index == -1:\n print(\"Selecting system version\")\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system(\"cd %s && %srm %s\" % (self.soft_root, sudo, old_dir))\n else:\n print(\"Selecting %s version '%s'\" %\n (self.soft, self.versions[index]))\n directory = self.versions[index]\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system(\"cd %s && %srm %s\" % (self.soft_root, sudo, old_dir))\n os.system(\"cd %s && %sln -s %s %s\" % (self.soft_root, sudo, directory, old_dir))\n\n def ask_version(self):\n '''\n Prompt user for software version in the list of installed versions\n '''\n # print version list\n print('Please choose a version:')\n index = 1\n if self.current_version == 'current':\n selected = self.SELECTED\n else:\n selected = ''\n print(\"0: System\"+selected)\n for version in self.soft_paths:\n number = version[len(self.soft_root)+1:]\n if number == self.current_version:\n selected = self.SELECTED\n else:\n selected = ''\n print(str(index)+': '+str(number)+selected)\n index += 1\n # ask for the version\n chosen = None\n maximum = len(self.soft_paths)\n while not chosen:\n try:\n choice = input()\n except KeyboardInterrupt:\n print(\"\\nUser abort!\")\n sys.exit(0)\n if re.match('\\\\d+', choice) and int(choice) <= maximum and \\\n int(choice) >= 0:\n index = int(choice) - 1\n chosen = True\n elif choice == '':\n print(\"Keeping current\")\n sys.exit(0)\n else:\n print(\"Bad version, please choose a number between 0 and %s\" %\n str(maximum))\n # return index in version table\n return index\n\n @staticmethod\n def run():\n '''\n Read software name on command line and run version selection\n '''\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])\n except getopt.GetoptError as exception:\n print('Error parsing command line: %s' % exception)\n print(Version.HELP)\n sys.exit(1)\n for option, _ in opts:\n if option in ('-h', '--help'):\n print(Version.HELP)\n sys.exit(0)\n else:\n print(\"Error parsing command line: Unhandled option '%s'\" % option)\n print(Version.HELP)\n sys.exit(2)\n if len(args) != 1:\n print(\"Error parsing command line: You must pass software\")\n print(Version.HELP)\n sys.exit(1)\n soft = args[0]\n version = Version(soft)\n version.set_version(version.ask_version())\n\n\nif __name__ == '__main__':\n Version.run()\n",
"<docstring token>\nimport os\nimport re\nimport sys\nimport glob\nimport getopt\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n\nclass Version(object):\n \"\"\"\n Software management class\n \"\"\"\n HELP = \"\"\"version [-h] software\nSelect software version in a menu:\n-h To print this help screen.\nsoftware Software version to choose.\"\"\"\n SELECTED = ' *'\n\n def __init__(self, soft):\n \"\"\"\n Constructor that takes software name\n \"\"\"\n self.soft = soft\n self.app_dir = os.environ.get('APP_DIR')\n if self.app_dir is None:\n self.app_dir = '/opt'\n self.sudo = True\n if os.access(self.app_dir, os.W_OK):\n self.sudo = False\n self.soft_root = os.path.join(self.app_dir, self.soft)\n self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))\n self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]\n path = os.path.realpath('%s/current' % self.soft_root)\n self.current_version = path[path.rindex(os.path.sep) + 1:]\n\n def set_version(self, index):\n \"\"\"\n Set software version by index\n \"\"\"\n sudo = 'sudo ' if self.sudo else ''\n old_dir = 'current'\n if index == -1:\n print('Selecting system version')\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n else:\n print(\"Selecting %s version '%s'\" % (self.soft, self.versions[\n index]))\n directory = self.versions[index]\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,\n directory, old_dir))\n\n def ask_version(self):\n \"\"\"\n Prompt user for software version in the list of installed versions\n \"\"\"\n print('Please choose a version:')\n index = 1\n if self.current_version == 'current':\n selected = self.SELECTED\n else:\n selected = ''\n print('0: System' + selected)\n for version in self.soft_paths:\n number = version[len(self.soft_root) + 1:]\n if number == self.current_version:\n selected = self.SELECTED\n else:\n selected = ''\n print(str(index) + ': ' + str(number) + selected)\n index += 1\n chosen = None\n maximum = len(self.soft_paths)\n while not chosen:\n try:\n choice = input()\n except KeyboardInterrupt:\n print('\\nUser abort!')\n sys.exit(0)\n if re.match('\\\\d+', choice) and int(choice) <= maximum and int(\n choice) >= 0:\n index = int(choice) - 1\n chosen = True\n elif choice == '':\n print('Keeping current')\n sys.exit(0)\n else:\n print(\n 'Bad version, please choose a number between 0 and %s' %\n str(maximum))\n return index\n\n @staticmethod\n def run():\n \"\"\"\n Read software name on command line and run version selection\n \"\"\"\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])\n except getopt.GetoptError as exception:\n print('Error parsing command line: %s' % exception)\n print(Version.HELP)\n sys.exit(1)\n for option, _ in opts:\n if option in ('-h', '--help'):\n print(Version.HELP)\n sys.exit(0)\n else:\n print(\"Error parsing command line: Unhandled option '%s'\" %\n option)\n print(Version.HELP)\n sys.exit(2)\n if len(args) != 1:\n print('Error parsing command line: You must pass software')\n print(Version.HELP)\n sys.exit(1)\n soft = args[0]\n version = Version(soft)\n version.set_version(version.ask_version())\n\n\nif __name__ == '__main__':\n Version.run()\n",
"<docstring token>\n<import token>\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n\nclass Version(object):\n \"\"\"\n Software management class\n \"\"\"\n HELP = \"\"\"version [-h] software\nSelect software version in a menu:\n-h To print this help screen.\nsoftware Software version to choose.\"\"\"\n SELECTED = ' *'\n\n def __init__(self, soft):\n \"\"\"\n Constructor that takes software name\n \"\"\"\n self.soft = soft\n self.app_dir = os.environ.get('APP_DIR')\n if self.app_dir is None:\n self.app_dir = '/opt'\n self.sudo = True\n if os.access(self.app_dir, os.W_OK):\n self.sudo = False\n self.soft_root = os.path.join(self.app_dir, self.soft)\n self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))\n self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]\n path = os.path.realpath('%s/current' % self.soft_root)\n self.current_version = path[path.rindex(os.path.sep) + 1:]\n\n def set_version(self, index):\n \"\"\"\n Set software version by index\n \"\"\"\n sudo = 'sudo ' if self.sudo else ''\n old_dir = 'current'\n if index == -1:\n print('Selecting system version')\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n else:\n print(\"Selecting %s version '%s'\" % (self.soft, self.versions[\n index]))\n directory = self.versions[index]\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,\n directory, old_dir))\n\n def ask_version(self):\n \"\"\"\n Prompt user for software version in the list of installed versions\n \"\"\"\n print('Please choose a version:')\n index = 1\n if self.current_version == 'current':\n selected = self.SELECTED\n else:\n selected = ''\n print('0: System' + selected)\n for version in self.soft_paths:\n number = version[len(self.soft_root) + 1:]\n if number == self.current_version:\n selected = self.SELECTED\n else:\n selected = ''\n print(str(index) + ': ' + str(number) + selected)\n index += 1\n chosen = None\n maximum = len(self.soft_paths)\n while not chosen:\n try:\n choice = input()\n except KeyboardInterrupt:\n print('\\nUser abort!')\n sys.exit(0)\n if re.match('\\\\d+', choice) and int(choice) <= maximum and int(\n choice) >= 0:\n index = int(choice) - 1\n chosen = True\n elif choice == '':\n print('Keeping current')\n sys.exit(0)\n else:\n print(\n 'Bad version, please choose a number between 0 and %s' %\n str(maximum))\n return index\n\n @staticmethod\n def run():\n \"\"\"\n Read software name on command line and run version selection\n \"\"\"\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])\n except getopt.GetoptError as exception:\n print('Error parsing command line: %s' % exception)\n print(Version.HELP)\n sys.exit(1)\n for option, _ in opts:\n if option in ('-h', '--help'):\n print(Version.HELP)\n sys.exit(0)\n else:\n print(\"Error parsing command line: Unhandled option '%s'\" %\n option)\n print(Version.HELP)\n sys.exit(2)\n if len(args) != 1:\n print('Error parsing command line: You must pass software')\n print(Version.HELP)\n sys.exit(1)\n soft = args[0]\n version = Version(soft)\n version.set_version(version.ask_version())\n\n\nif __name__ == '__main__':\n Version.run()\n",
"<docstring token>\n<import token>\n<code token>\n\n\nclass Version(object):\n \"\"\"\n Software management class\n \"\"\"\n HELP = \"\"\"version [-h] software\nSelect software version in a menu:\n-h To print this help screen.\nsoftware Software version to choose.\"\"\"\n SELECTED = ' *'\n\n def __init__(self, soft):\n \"\"\"\n Constructor that takes software name\n \"\"\"\n self.soft = soft\n self.app_dir = os.environ.get('APP_DIR')\n if self.app_dir is None:\n self.app_dir = '/opt'\n self.sudo = True\n if os.access(self.app_dir, os.W_OK):\n self.sudo = False\n self.soft_root = os.path.join(self.app_dir, self.soft)\n self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))\n self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]\n path = os.path.realpath('%s/current' % self.soft_root)\n self.current_version = path[path.rindex(os.path.sep) + 1:]\n\n def set_version(self, index):\n \"\"\"\n Set software version by index\n \"\"\"\n sudo = 'sudo ' if self.sudo else ''\n old_dir = 'current'\n if index == -1:\n print('Selecting system version')\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n else:\n print(\"Selecting %s version '%s'\" % (self.soft, self.versions[\n index]))\n directory = self.versions[index]\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,\n directory, old_dir))\n\n def ask_version(self):\n \"\"\"\n Prompt user for software version in the list of installed versions\n \"\"\"\n print('Please choose a version:')\n index = 1\n if self.current_version == 'current':\n selected = self.SELECTED\n else:\n selected = ''\n print('0: System' + selected)\n for version in self.soft_paths:\n number = version[len(self.soft_root) + 1:]\n if number == self.current_version:\n selected = self.SELECTED\n else:\n selected = ''\n print(str(index) + ': ' + str(number) + selected)\n index += 1\n chosen = None\n maximum = len(self.soft_paths)\n while not chosen:\n try:\n choice = input()\n except KeyboardInterrupt:\n print('\\nUser abort!')\n sys.exit(0)\n if re.match('\\\\d+', choice) and int(choice) <= maximum and int(\n choice) >= 0:\n index = int(choice) - 1\n chosen = True\n elif choice == '':\n print('Keeping current')\n sys.exit(0)\n else:\n print(\n 'Bad version, please choose a number between 0 and %s' %\n str(maximum))\n return index\n\n @staticmethod\n def run():\n \"\"\"\n Read software name on command line and run version selection\n \"\"\"\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])\n except getopt.GetoptError as exception:\n print('Error parsing command line: %s' % exception)\n print(Version.HELP)\n sys.exit(1)\n for option, _ in opts:\n if option in ('-h', '--help'):\n print(Version.HELP)\n sys.exit(0)\n else:\n print(\"Error parsing command line: Unhandled option '%s'\" %\n option)\n print(Version.HELP)\n sys.exit(2)\n if len(args) != 1:\n print('Error parsing command line: You must pass software')\n print(Version.HELP)\n sys.exit(1)\n soft = args[0]\n version = Version(soft)\n version.set_version(version.ask_version())\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n\n\nclass Version(object):\n <docstring token>\n HELP = \"\"\"version [-h] software\nSelect software version in a menu:\n-h To print this help screen.\nsoftware Software version to choose.\"\"\"\n SELECTED = ' *'\n\n def __init__(self, soft):\n \"\"\"\n Constructor that takes software name\n \"\"\"\n self.soft = soft\n self.app_dir = os.environ.get('APP_DIR')\n if self.app_dir is None:\n self.app_dir = '/opt'\n self.sudo = True\n if os.access(self.app_dir, os.W_OK):\n self.sudo = False\n self.soft_root = os.path.join(self.app_dir, self.soft)\n self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))\n self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]\n path = os.path.realpath('%s/current' % self.soft_root)\n self.current_version = path[path.rindex(os.path.sep) + 1:]\n\n def set_version(self, index):\n \"\"\"\n Set software version by index\n \"\"\"\n sudo = 'sudo ' if self.sudo else ''\n old_dir = 'current'\n if index == -1:\n print('Selecting system version')\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n else:\n print(\"Selecting %s version '%s'\" % (self.soft, self.versions[\n index]))\n directory = self.versions[index]\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,\n directory, old_dir))\n\n def ask_version(self):\n \"\"\"\n Prompt user for software version in the list of installed versions\n \"\"\"\n print('Please choose a version:')\n index = 1\n if self.current_version == 'current':\n selected = self.SELECTED\n else:\n selected = ''\n print('0: System' + selected)\n for version in self.soft_paths:\n number = version[len(self.soft_root) + 1:]\n if number == self.current_version:\n selected = self.SELECTED\n else:\n selected = ''\n print(str(index) + ': ' + str(number) + selected)\n index += 1\n chosen = None\n maximum = len(self.soft_paths)\n while not chosen:\n try:\n choice = input()\n except KeyboardInterrupt:\n print('\\nUser abort!')\n sys.exit(0)\n if re.match('\\\\d+', choice) and int(choice) <= maximum and int(\n choice) >= 0:\n index = int(choice) - 1\n chosen = True\n elif choice == '':\n print('Keeping current')\n sys.exit(0)\n else:\n print(\n 'Bad version, please choose a number between 0 and %s' %\n str(maximum))\n return index\n\n @staticmethod\n def run():\n \"\"\"\n Read software name on command line and run version selection\n \"\"\"\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])\n except getopt.GetoptError as exception:\n print('Error parsing command line: %s' % exception)\n print(Version.HELP)\n sys.exit(1)\n for option, _ in opts:\n if option in ('-h', '--help'):\n print(Version.HELP)\n sys.exit(0)\n else:\n print(\"Error parsing command line: Unhandled option '%s'\" %\n option)\n print(Version.HELP)\n sys.exit(2)\n if len(args) != 1:\n print('Error parsing command line: You must pass software')\n print(Version.HELP)\n sys.exit(1)\n soft = args[0]\n version = Version(soft)\n version.set_version(version.ask_version())\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n\n\nclass Version(object):\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, soft):\n \"\"\"\n Constructor that takes software name\n \"\"\"\n self.soft = soft\n self.app_dir = os.environ.get('APP_DIR')\n if self.app_dir is None:\n self.app_dir = '/opt'\n self.sudo = True\n if os.access(self.app_dir, os.W_OK):\n self.sudo = False\n self.soft_root = os.path.join(self.app_dir, self.soft)\n self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))\n self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]\n path = os.path.realpath('%s/current' % self.soft_root)\n self.current_version = path[path.rindex(os.path.sep) + 1:]\n\n def set_version(self, index):\n \"\"\"\n Set software version by index\n \"\"\"\n sudo = 'sudo ' if self.sudo else ''\n old_dir = 'current'\n if index == -1:\n print('Selecting system version')\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n else:\n print(\"Selecting %s version '%s'\" % (self.soft, self.versions[\n index]))\n directory = self.versions[index]\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,\n directory, old_dir))\n\n def ask_version(self):\n \"\"\"\n Prompt user for software version in the list of installed versions\n \"\"\"\n print('Please choose a version:')\n index = 1\n if self.current_version == 'current':\n selected = self.SELECTED\n else:\n selected = ''\n print('0: System' + selected)\n for version in self.soft_paths:\n number = version[len(self.soft_root) + 1:]\n if number == self.current_version:\n selected = self.SELECTED\n else:\n selected = ''\n print(str(index) + ': ' + str(number) + selected)\n index += 1\n chosen = None\n maximum = len(self.soft_paths)\n while not chosen:\n try:\n choice = input()\n except KeyboardInterrupt:\n print('\\nUser abort!')\n sys.exit(0)\n if re.match('\\\\d+', choice) and int(choice) <= maximum and int(\n choice) >= 0:\n index = int(choice) - 1\n chosen = True\n elif choice == '':\n print('Keeping current')\n sys.exit(0)\n else:\n print(\n 'Bad version, please choose a number between 0 and %s' %\n str(maximum))\n return index\n\n @staticmethod\n def run():\n \"\"\"\n Read software name on command line and run version selection\n \"\"\"\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])\n except getopt.GetoptError as exception:\n print('Error parsing command line: %s' % exception)\n print(Version.HELP)\n sys.exit(1)\n for option, _ in opts:\n if option in ('-h', '--help'):\n print(Version.HELP)\n sys.exit(0)\n else:\n print(\"Error parsing command line: Unhandled option '%s'\" %\n option)\n print(Version.HELP)\n sys.exit(2)\n if len(args) != 1:\n print('Error parsing command line: You must pass software')\n print(Version.HELP)\n sys.exit(1)\n soft = args[0]\n version = Version(soft)\n version.set_version(version.ask_version())\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n\n\nclass Version(object):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def set_version(self, index):\n \"\"\"\n Set software version by index\n \"\"\"\n sudo = 'sudo ' if self.sudo else ''\n old_dir = 'current'\n if index == -1:\n print('Selecting system version')\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n else:\n print(\"Selecting %s version '%s'\" % (self.soft, self.versions[\n index]))\n directory = self.versions[index]\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,\n directory, old_dir))\n\n def ask_version(self):\n \"\"\"\n Prompt user for software version in the list of installed versions\n \"\"\"\n print('Please choose a version:')\n index = 1\n if self.current_version == 'current':\n selected = self.SELECTED\n else:\n selected = ''\n print('0: System' + selected)\n for version in self.soft_paths:\n number = version[len(self.soft_root) + 1:]\n if number == self.current_version:\n selected = self.SELECTED\n else:\n selected = ''\n print(str(index) + ': ' + str(number) + selected)\n index += 1\n chosen = None\n maximum = len(self.soft_paths)\n while not chosen:\n try:\n choice = input()\n except KeyboardInterrupt:\n print('\\nUser abort!')\n sys.exit(0)\n if re.match('\\\\d+', choice) and int(choice) <= maximum and int(\n choice) >= 0:\n index = int(choice) - 1\n chosen = True\n elif choice == '':\n print('Keeping current')\n sys.exit(0)\n else:\n print(\n 'Bad version, please choose a number between 0 and %s' %\n str(maximum))\n return index\n\n @staticmethod\n def run():\n \"\"\"\n Read software name on command line and run version selection\n \"\"\"\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])\n except getopt.GetoptError as exception:\n print('Error parsing command line: %s' % exception)\n print(Version.HELP)\n sys.exit(1)\n for option, _ in opts:\n if option in ('-h', '--help'):\n print(Version.HELP)\n sys.exit(0)\n else:\n print(\"Error parsing command line: Unhandled option '%s'\" %\n option)\n print(Version.HELP)\n sys.exit(2)\n if len(args) != 1:\n print('Error parsing command line: You must pass software')\n print(Version.HELP)\n sys.exit(1)\n soft = args[0]\n version = Version(soft)\n version.set_version(version.ask_version())\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n\n\nclass Version(object):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def set_version(self, index):\n \"\"\"\n Set software version by index\n \"\"\"\n sudo = 'sudo ' if self.sudo else ''\n old_dir = 'current'\n if index == -1:\n print('Selecting system version')\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n else:\n print(\"Selecting %s version '%s'\" % (self.soft, self.versions[\n index]))\n directory = self.versions[index]\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,\n directory, old_dir))\n <function token>\n\n @staticmethod\n def run():\n \"\"\"\n Read software name on command line and run version selection\n \"\"\"\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])\n except getopt.GetoptError as exception:\n print('Error parsing command line: %s' % exception)\n print(Version.HELP)\n sys.exit(1)\n for option, _ in opts:\n if option in ('-h', '--help'):\n print(Version.HELP)\n sys.exit(0)\n else:\n print(\"Error parsing command line: Unhandled option '%s'\" %\n option)\n print(Version.HELP)\n sys.exit(2)\n if len(args) != 1:\n print('Error parsing command line: You must pass software')\n print(Version.HELP)\n sys.exit(1)\n soft = args[0]\n version = Version(soft)\n version.set_version(version.ask_version())\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n\n\nclass Version(object):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n def set_version(self, index):\n \"\"\"\n Set software version by index\n \"\"\"\n sudo = 'sudo ' if self.sudo else ''\n old_dir = 'current'\n if index == -1:\n print('Selecting system version')\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n else:\n print(\"Selecting %s version '%s'\" % (self.soft, self.versions[\n index]))\n directory = self.versions[index]\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,\n directory, old_dir))\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n\n\nclass Version(object):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<class token>\n<code token>\n"
] | false |
1,010 |
55c2bf914a77c573d1b6835f54c82921d9fa6ad6
|
from ED63RDScenarioHelper import *
def main():
SetCodePage("ms932")
CreateScenaFile(
FileName = 'C2219 ._SN',
MapName = 'Ruan',
Location = 'C2219.x',
MapIndex = 84,
MapDefaultBGM = "ed60015",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'ED6_DT21/C2219 ._SN',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'Vogt', # 9
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH01000 ._CH', # 00
)
AddCharChipPat(
'ED6_DT07/CH01000P._CP', # 00
)
DeclNpc(
X = -2870,
Z = 0,
Y = 202000,
Direction = 270,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 3,
)
ScpFunction(
"Function_0_D2", # 00, 0
"Function_1_D3", # 01, 1
"Function_2_DD", # 02, 2
"Function_3_25A", # 03, 3
"Function_4_AEC", # 04, 4
"Function_5_B4D", # 05, 5
)
def Function_0_D2(): pass
label("Function_0_D2")
Return()
# Function_0_D2 end
def Function_1_D3(): pass
label("Function_1_D3")
OP_B0(0x0, 0x78)
OP_1C(0x0, 0x0, 0x5)
Return()
# Function_1_D3 end
def Function_2_DD(): pass
label("Function_2_DD")
RunExpression(0x1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0xE), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_102")
OP_99(0xFE, 0x0, 0x7, 0x672)
Jump("loc_244")
label("loc_102")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_11B")
OP_99(0xFE, 0x1, 0x7, 0x640)
Jump("loc_244")
label("loc_11B")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_134")
OP_99(0xFE, 0x2, 0x7, 0x60E)
Jump("loc_244")
label("loc_134")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_14D")
OP_99(0xFE, 0x3, 0x7, 0x5DC)
Jump("loc_244")
label("loc_14D")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_166")
OP_99(0xFE, 0x4, 0x7, 0x5AA)
Jump("loc_244")
label("loc_166")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x5), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_17F")
OP_99(0xFE, 0x5, 0x7, 0x578)
Jump("loc_244")
label("loc_17F")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_198")
OP_99(0xFE, 0x6, 0x7, 0x546)
Jump("loc_244")
label("loc_198")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1B1")
OP_99(0xFE, 0x0, 0x7, 0x677)
Jump("loc_244")
label("loc_1B1")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1CA")
OP_99(0xFE, 0x1, 0x7, 0x645)
Jump("loc_244")
label("loc_1CA")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x9), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1E3")
OP_99(0xFE, 0x2, 0x7, 0x613)
Jump("loc_244")
label("loc_1E3")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xA), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1FC")
OP_99(0xFE, 0x3, 0x7, 0x5E1)
Jump("loc_244")
label("loc_1FC")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_215")
OP_99(0xFE, 0x4, 0x7, 0x5AF)
Jump("loc_244")
label("loc_215")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xC), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_22E")
OP_99(0xFE, 0x5, 0x7, 0x57D)
Jump("loc_244")
label("loc_22E")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xD), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_244")
OP_99(0xFE, 0x6, 0x7, 0x54B)
label("loc_244")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_259")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("loc_244")
label("loc_259")
Return()
# Function_2_DD end
def Function_3_25A(): pass
label("Function_3_25A")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E4, 2)), scpexpr(EXPR_END)), "loc_6C4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 3)), scpexpr(EXPR_END)), "loc_34F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_2B2")
ChrTalk( #0
0xFE,
(
"I reckon my happiness is right here in this\x01",
"lighthouse.\x02",
)
)
CloseMessageWindow()
Jump("loc_34C")
label("loc_2B2")
ChrTalk( #1
0xFE,
(
"There's actually a shining stone here in this\x01",
"lighthouse, though, even if it's not what you\x01",
"are looking for.\x02",
)
)
CloseMessageWindow()
ChrTalk( #2
0xFE,
"I reckon that's my happiness...\x02",
)
CloseMessageWindow()
OP_A2(0x0)
label("loc_34C")
Jump("loc_6C1")
label("loc_34F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 4)), scpexpr(EXPR_END)), "loc_477")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_3DF")
ChrTalk( #3
0xFE,
(
"There's no shame in relying on others for\x01",
"help if you need it! Grab 'em by the collar\x01",
"and scream for help if you need it!\x02",
)
)
CloseMessageWindow()
Jump("loc_474")
label("loc_3DF")
ChrTalk( #4
0xFE,
"You lookin' for some help, young lady?\x02",
)
CloseMessageWindow()
ChrTalk( #5
0xFE,
"What do you need?\x02",
)
CloseMessageWindow()
ChrTalk( #6
0x14E,
(
"#1714FN-No. I'll be fine, honestly...\x02\x03",
"#1713FThank you for offering, sir.\x02",
)
)
CloseMessageWindow()
OP_A2(0x0)
label("loc_474")
Jump("loc_6C1")
label("loc_477")
EventBegin(0x1)
OP_8C(0xFE, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(0x14E, -1280, 0, 202300, 270)
Sleep(1000)
ChrTalk( #7
0xFE,
(
"I swear, this is EXACTLY what's wrong\x01",
"with youngins these days...\x02",
)
)
CloseMessageWindow()
OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
OP_8C(0xFE, 90, 500)
Sleep(500)
ChrTalk( #8
0xFE,
"Wh-What are you doing here, young lady?\x02",
)
CloseMessageWindow()
ChrTalk( #9
0x14E,
(
"#1712FU-Umm... Excuse me, sir...\x02\x03",
"You haven't seen a young girl other\x01",
"than me in here recently have you?\x02",
)
)
CloseMessageWindow()
ChrTalk( #10
0xFE,
"A young girl? 'Fraid not.\x02",
)
CloseMessageWindow()
ChrTalk( #11
0x14E,
(
"#1713FI-I see...\x02\x03",
"Sorry for troubling you...\x02",
)
)
CloseMessageWindow()
def lambda_639():
label("loc_639")
TurnDirection(0xFE, 0x14E, 0)
OP_48()
Jump("loc_639")
QueueWorkItem2(0x10, 3, lambda_639)
OP_43(0x14E, 0x3, 0x0, 0x4)
Sleep(3000)
OP_62(0x10, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
Sleep(3000)
OP_63(0x10)
ChrTalk( #12
0xFE,
"I swear, kids these days...\x02",
)
CloseMessageWindow()
ChrTalk( #13
0xFE,
"They sure are a pain.\x02",
)
CloseMessageWindow()
OP_A2(0x2F44)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(0x10, 0x3)
NewScene("ED6_DT21/C2219 ._SN", 107, 0, 0)
IdleLoop()
label("loc_6C1")
Jump("loc_AE8")
label("loc_6C4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E4, 0)), scpexpr(EXPR_END)), "loc_AE1")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 3)), scpexpr(EXPR_END)), "loc_721")
ChrTalk( #14
0xFE,
"A happiness stone, you say?\x02",
)
CloseMessageWindow()
ChrTalk( #15
0xFE,
"You think somethin' like that exists?\x02",
)
CloseMessageWindow()
Jump("loc_ADE")
label("loc_721")
EventBegin(0x1)
OP_8C(0xFE, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(0x14E, -1250, 0, 202480, 270)
SetChrPos(0x14F, -1060, 0, 201620, 270)
Sleep(1000)
ChrTalk( #16
0xFE,
"I swear, kids these days...\x02",
)
CloseMessageWindow()
OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
OP_8C(0xFE, 90, 500)
Sleep(500)
ChrTalk( #17
0xFE,
"Wh-What might you two be doing here?\x02",
)
CloseMessageWindow()
ChrTalk( #18
0x14E,
"#1718FHello!\x02",
)
CloseMessageWindow()
OP_62(0x14E, 0x0, 1600, 0x26, 0x27, 0xFA, 0x1)
Sleep(500)
OP_63(0x14E)
ChrTalk( #19
0x14E,
(
"#1714FActually, lighthouses are pretty high up,\x01",
"aren't they?\x02\x03",
"#1718FSir, you haven't seen a happiness stone before,\x01",
"have you?\x02",
)
)
CloseMessageWindow()
ChrTalk( #20
0xFE,
"A-A happiness stone?!\x02",
)
CloseMessageWindow()
ChrTalk( #21
0x14F,
"#1730FThey're really shiny and pretty!\x02",
)
CloseMessageWindow()
ChrTalk( #22
0xFE,
(
"N-No, I don't recall ever seein' any\x01",
"such thing in all my years...\x02",
)
)
CloseMessageWindow()
ChrTalk( #23
0x14E,
(
"#1716FOh... That's too bad...\x02\x03",
"#1710FWell, thank you, anyway.\x02",
)
)
CloseMessageWindow()
TurnDirection(0x14E, 0x14F, 400)
Sleep(400)
ChrTalk( #24
0x14E,
"#1718FLet's keep looking, Polly! \x02",
)
CloseMessageWindow()
OP_43(0x14E, 0x3, 0x0, 0x4)
Sleep(2000)
ChrTalk( #25
0x14F,
"#1731FI hope your back feels better, mister!\x02",
)
CloseMessageWindow()
OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
def lambda_A1A():
label("loc_A1A")
TurnDirection(0xFE, 0x14F, 0)
OP_48()
Jump("loc_A1A")
QueueWorkItem2(0x10, 3, lambda_A1A)
OP_43(0x14F, 0x3, 0x0, 0x4)
Sleep(3000)
OP_62(0x10, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
Sleep(3000)
OP_63(0x10)
ChrTalk( #26
0xFE,
"I swear, kids these days...\x02",
)
CloseMessageWindow()
ChrTalk( #27
0xFE,
"...They're sharp little devils, aren't they?\x02",
)
CloseMessageWindow()
Sleep(500)
ChrTalk( #28
0xFE,
"A happiness stone, hmm...?\x02",
)
CloseMessageWindow()
OP_A2(0x2F43)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(0x10, 0x3)
NewScene("ED6_DT21/C2219 ._SN", 107, 0, 0)
IdleLoop()
label("loc_ADE")
Jump("loc_AE8")
label("loc_AE1")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E2, 7)), scpexpr(EXPR_END)), "loc_AE8")
label("loc_AE8")
TalkEnd(0xFE)
Return()
# Function_3_25A end
def Function_4_AEC(): pass
label("Function_4_AEC")
def lambda_AF2():
OP_8E(0xFE, 0xB04, 0x0, 0x32104, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_AF2)
WaitChrThread(0xFE, 0x1)
def lambda_B12():
OP_8E(0xFE, 0xB04, 0x0, 0x3283E, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_B12)
WaitChrThread(0xFE, 0x1)
def lambda_B32():
OP_8E(0xFE, 0xFFFFF254, 0xFFFFF830, 0x328F2, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_B32)
WaitChrThread(0xFE, 0x1)
Return()
# Function_4_AEC end
def Function_5_B4D(): pass
label("Function_5_B4D")
TalkBegin(0xFF)
TalkEnd(0xFF)
Return()
# Function_5_B4D end
SaveToFile()
Try(main)
|
[
"from ED63RDScenarioHelper import *\n\ndef main():\n SetCodePage(\"ms932\")\n\n CreateScenaFile(\n FileName = 'C2219 ._SN',\n MapName = 'Ruan',\n Location = 'C2219.x',\n MapIndex = 84,\n MapDefaultBGM = \"ed60015\",\n Flags = 0,\n EntryFunctionIndex = 0xFFFF,\n Reserved = 0,\n IncludedScenario = [\n 'ED6_DT21/C2219 ._SN',\n '',\n '',\n '',\n '',\n '',\n '',\n ''\n ],\n )\n\n BuildStringList(\n '@FileName', # 8\n 'Vogt', # 9\n )\n\n DeclEntryPoint(\n Unknown_00 = 0,\n Unknown_04 = 0,\n Unknown_08 = 6000,\n Unknown_0C = 4,\n Unknown_0E = 0,\n Unknown_10 = 0,\n Unknown_14 = 9500,\n Unknown_18 = -10000,\n Unknown_1C = 0,\n Unknown_20 = 0,\n Unknown_24 = 0,\n Unknown_28 = 2800,\n Unknown_2C = 262,\n Unknown_30 = 45,\n Unknown_32 = 0,\n Unknown_34 = 360,\n Unknown_36 = 0,\n Unknown_38 = 0,\n Unknown_3A = 0,\n InitScenaIndex = 0,\n InitFunctionIndex = 0,\n EntryScenaIndex = 0,\n EntryFunctionIndex = 1,\n )\n\n\n AddCharChip(\n 'ED6_DT07/CH01000 ._CH', # 00\n )\n\n AddCharChipPat(\n 'ED6_DT07/CH01000P._CP', # 00\n )\n\n DeclNpc(\n X = -2870,\n Z = 0,\n Y = 202000,\n Direction = 270,\n Unknown2 = 0,\n Unknown3 = 0,\n ChipIndex = 0x0,\n NpcIndex = 0x101,\n InitFunctionIndex = 0,\n InitScenaIndex = 2,\n TalkFunctionIndex = 0,\n TalkScenaIndex = 3,\n )\n\n\n ScpFunction(\n \"Function_0_D2\", # 00, 0\n \"Function_1_D3\", # 01, 1\n \"Function_2_DD\", # 02, 2\n \"Function_3_25A\", # 03, 3\n \"Function_4_AEC\", # 04, 4\n \"Function_5_B4D\", # 05, 5\n )\n\n\n def Function_0_D2(): pass\n\n label(\"Function_0_D2\")\n\n Return()\n\n # Function_0_D2 end\n\n def Function_1_D3(): pass\n\n label(\"Function_1_D3\")\n\n OP_B0(0x0, 0x78)\n OP_1C(0x0, 0x0, 0x5)\n Return()\n\n # Function_1_D3 end\n\n def Function_2_DD(): pass\n\n label(\"Function_2_DD\")\n\n RunExpression(0x1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0xE), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_102\")\n OP_99(0xFE, 0x0, 0x7, 0x672)\n Jump(\"loc_244\")\n\n label(\"loc_102\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_11B\")\n OP_99(0xFE, 0x1, 0x7, 0x640)\n Jump(\"loc_244\")\n\n label(\"loc_11B\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_134\")\n OP_99(0xFE, 0x2, 0x7, 0x60E)\n Jump(\"loc_244\")\n\n label(\"loc_134\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_14D\")\n OP_99(0xFE, 0x3, 0x7, 0x5DC)\n Jump(\"loc_244\")\n\n label(\"loc_14D\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_166\")\n OP_99(0xFE, 0x4, 0x7, 0x5AA)\n Jump(\"loc_244\")\n\n label(\"loc_166\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x5), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_17F\")\n OP_99(0xFE, 0x5, 0x7, 0x578)\n Jump(\"loc_244\")\n\n label(\"loc_17F\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_198\")\n OP_99(0xFE, 0x6, 0x7, 0x546)\n Jump(\"loc_244\")\n\n label(\"loc_198\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_1B1\")\n OP_99(0xFE, 0x0, 0x7, 0x677)\n Jump(\"loc_244\")\n\n label(\"loc_1B1\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_1CA\")\n OP_99(0xFE, 0x1, 0x7, 0x645)\n Jump(\"loc_244\")\n\n label(\"loc_1CA\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x9), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_1E3\")\n OP_99(0xFE, 0x2, 0x7, 0x613)\n Jump(\"loc_244\")\n\n label(\"loc_1E3\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xA), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_1FC\")\n OP_99(0xFE, 0x3, 0x7, 0x5E1)\n Jump(\"loc_244\")\n\n label(\"loc_1FC\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_215\")\n OP_99(0xFE, 0x4, 0x7, 0x5AF)\n Jump(\"loc_244\")\n\n label(\"loc_215\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xC), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_22E\")\n OP_99(0xFE, 0x5, 0x7, 0x57D)\n Jump(\"loc_244\")\n\n label(\"loc_22E\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xD), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_244\")\n OP_99(0xFE, 0x6, 0x7, 0x54B)\n\n label(\"loc_244\")\n\n Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), \"loc_259\")\n OP_99(0xFE, 0x0, 0x7, 0x5DC)\n Jump(\"loc_244\")\n\n label(\"loc_259\")\n\n Return()\n\n # Function_2_DD end\n\n def Function_3_25A(): pass\n\n label(\"Function_3_25A\")\n\n TalkBegin(0xFE)\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E4, 2)), scpexpr(EXPR_END)), \"loc_6C4\")\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 3)), scpexpr(EXPR_END)), \"loc_34F\")\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), \"loc_2B2\")\n\n ChrTalk( #0\n 0xFE,\n (\n \"I reckon my happiness is right here in this\\x01\",\n \"lighthouse.\\x02\",\n )\n )\n\n CloseMessageWindow()\n Jump(\"loc_34C\")\n\n label(\"loc_2B2\")\n\n\n ChrTalk( #1\n 0xFE,\n (\n \"There's actually a shining stone here in this\\x01\",\n \"lighthouse, though, even if it's not what you\\x01\",\n \"are looking for.\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n ChrTalk( #2\n 0xFE,\n \"I reckon that's my happiness...\\x02\",\n )\n\n CloseMessageWindow()\n OP_A2(0x0)\n\n label(\"loc_34C\")\n\n Jump(\"loc_6C1\")\n\n label(\"loc_34F\")\n\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 4)), scpexpr(EXPR_END)), \"loc_477\")\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), \"loc_3DF\")\n\n ChrTalk( #3\n 0xFE,\n (\n \"There's no shame in relying on others for\\x01\",\n \"help if you need it! Grab 'em by the collar\\x01\",\n \"and scream for help if you need it!\\x02\",\n )\n )\n\n CloseMessageWindow()\n Jump(\"loc_474\")\n\n label(\"loc_3DF\")\n\n\n ChrTalk( #4\n 0xFE,\n \"You lookin' for some help, young lady?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #5\n 0xFE,\n \"What do you need?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #6\n 0x14E,\n (\n \"#1714FN-No. I'll be fine, honestly...\\x02\\x03\",\n\n \"#1713FThank you for offering, sir.\\x02\",\n )\n )\n\n CloseMessageWindow()\n OP_A2(0x0)\n\n label(\"loc_474\")\n\n Jump(\"loc_6C1\")\n\n label(\"loc_477\")\n\n EventBegin(0x1)\n OP_8C(0xFE, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(0x14E, -1280, 0, 202300, 270)\n Sleep(1000)\n\n ChrTalk( #7\n 0xFE,\n (\n \"I swear, this is EXACTLY what's wrong\\x01\",\n \"with youngins these days...\\x02\",\n )\n )\n\n CloseMessageWindow()\n OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)\n OP_22(0x27, 0x0, 0x64)\n Sleep(1000)\n OP_8C(0xFE, 90, 500)\n Sleep(500)\n\n ChrTalk( #8\n 0xFE,\n \"Wh-What are you doing here, young lady?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #9\n 0x14E,\n (\n \"#1712FU-Umm... Excuse me, sir...\\x02\\x03\",\n\n \"You haven't seen a young girl other\\x01\",\n \"than me in here recently have you?\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n ChrTalk( #10\n 0xFE,\n \"A young girl? 'Fraid not.\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #11\n 0x14E,\n (\n \"#1713FI-I see...\\x02\\x03\",\n\n \"Sorry for troubling you...\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n def lambda_639():\n\n label(\"loc_639\")\n\n TurnDirection(0xFE, 0x14E, 0)\n OP_48()\n Jump(\"loc_639\")\n\n QueueWorkItem2(0x10, 3, lambda_639)\n OP_43(0x14E, 0x3, 0x0, 0x4)\n Sleep(3000)\n OP_62(0x10, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)\n Sleep(3000)\n OP_63(0x10)\n\n ChrTalk( #12\n 0xFE,\n \"I swear, kids these days...\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #13\n 0xFE,\n \"They sure are a pain.\\x02\",\n )\n\n CloseMessageWindow()\n OP_A2(0x2F44)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(0x10, 0x3)\n NewScene(\"ED6_DT21/C2219 ._SN\", 107, 0, 0)\n IdleLoop()\n\n label(\"loc_6C1\")\n\n Jump(\"loc_AE8\")\n\n label(\"loc_6C4\")\n\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E4, 0)), scpexpr(EXPR_END)), \"loc_AE1\")\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 3)), scpexpr(EXPR_END)), \"loc_721\")\n\n ChrTalk( #14\n 0xFE,\n \"A happiness stone, you say?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #15\n 0xFE,\n \"You think somethin' like that exists?\\x02\",\n )\n\n CloseMessageWindow()\n Jump(\"loc_ADE\")\n\n label(\"loc_721\")\n\n EventBegin(0x1)\n OP_8C(0xFE, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(0x14E, -1250, 0, 202480, 270)\n SetChrPos(0x14F, -1060, 0, 201620, 270)\n Sleep(1000)\n\n ChrTalk( #16\n 0xFE,\n \"I swear, kids these days...\\x02\",\n )\n\n CloseMessageWindow()\n OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)\n OP_22(0x27, 0x0, 0x64)\n Sleep(1000)\n OP_8C(0xFE, 90, 500)\n Sleep(500)\n\n ChrTalk( #17\n 0xFE,\n \"Wh-What might you two be doing here?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #18\n 0x14E,\n \"#1718FHello!\\x02\",\n )\n\n CloseMessageWindow()\n OP_62(0x14E, 0x0, 1600, 0x26, 0x27, 0xFA, 0x1)\n Sleep(500)\n OP_63(0x14E)\n\n ChrTalk( #19\n 0x14E,\n (\n \"#1714FActually, lighthouses are pretty high up,\\x01\",\n \"aren't they?\\x02\\x03\",\n\n \"#1718FSir, you haven't seen a happiness stone before,\\x01\",\n \"have you?\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n ChrTalk( #20\n 0xFE,\n \"A-A happiness stone?!\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #21\n 0x14F,\n \"#1730FThey're really shiny and pretty!\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #22\n 0xFE,\n (\n \"N-No, I don't recall ever seein' any\\x01\",\n \"such thing in all my years...\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n ChrTalk( #23\n 0x14E,\n (\n \"#1716FOh... That's too bad...\\x02\\x03\",\n\n \"#1710FWell, thank you, anyway.\\x02\",\n )\n )\n\n CloseMessageWindow()\n TurnDirection(0x14E, 0x14F, 400)\n Sleep(400)\n\n ChrTalk( #24\n 0x14E,\n \"#1718FLet's keep looking, Polly! \\x02\",\n )\n\n CloseMessageWindow()\n OP_43(0x14E, 0x3, 0x0, 0x4)\n Sleep(2000)\n\n ChrTalk( #25\n 0x14F,\n \"#1731FI hope your back feels better, mister!\\x02\",\n )\n\n CloseMessageWindow()\n OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)\n OP_22(0x27, 0x0, 0x64)\n Sleep(1000)\n\n def lambda_A1A():\n\n label(\"loc_A1A\")\n\n TurnDirection(0xFE, 0x14F, 0)\n OP_48()\n Jump(\"loc_A1A\")\n\n QueueWorkItem2(0x10, 3, lambda_A1A)\n OP_43(0x14F, 0x3, 0x0, 0x4)\n Sleep(3000)\n OP_62(0x10, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)\n Sleep(3000)\n OP_63(0x10)\n\n ChrTalk( #26\n 0xFE,\n \"I swear, kids these days...\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #27\n 0xFE,\n \"...They're sharp little devils, aren't they?\\x02\",\n )\n\n CloseMessageWindow()\n Sleep(500)\n\n ChrTalk( #28\n 0xFE,\n \"A happiness stone, hmm...?\\x02\",\n )\n\n CloseMessageWindow()\n OP_A2(0x2F43)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(0x10, 0x3)\n NewScene(\"ED6_DT21/C2219 ._SN\", 107, 0, 0)\n IdleLoop()\n\n label(\"loc_ADE\")\n\n Jump(\"loc_AE8\")\n\n label(\"loc_AE1\")\n\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E2, 7)), scpexpr(EXPR_END)), \"loc_AE8\")\n\n label(\"loc_AE8\")\n\n TalkEnd(0xFE)\n Return()\n\n # Function_3_25A end\n\n def Function_4_AEC(): pass\n\n label(\"Function_4_AEC\")\n\n\n def lambda_AF2():\n OP_8E(0xFE, 0xB04, 0x0, 0x32104, 0x7D0, 0x0)\n ExitThread()\n\n QueueWorkItem(0xFE, 1, lambda_AF2)\n WaitChrThread(0xFE, 0x1)\n\n def lambda_B12():\n OP_8E(0xFE, 0xB04, 0x0, 0x3283E, 0x7D0, 0x0)\n ExitThread()\n\n QueueWorkItem(0xFE, 1, lambda_B12)\n WaitChrThread(0xFE, 0x1)\n\n def lambda_B32():\n OP_8E(0xFE, 0xFFFFF254, 0xFFFFF830, 0x328F2, 0x7D0, 0x0)\n ExitThread()\n\n QueueWorkItem(0xFE, 1, lambda_B32)\n WaitChrThread(0xFE, 0x1)\n Return()\n\n # Function_4_AEC end\n\n def Function_5_B4D(): pass\n\n label(\"Function_5_B4D\")\n\n TalkBegin(0xFF)\n TalkEnd(0xFF)\n Return()\n\n # Function_5_B4D end\n\n SaveToFile()\n\nTry(main)\n",
"from ED63RDScenarioHelper import *\n\n\ndef main():\n SetCodePage('ms932')\n CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=\n 'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,\n EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[\n 'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])\n BuildStringList('@FileName', 'Vogt')\n DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=\n 4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,\n Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,\n Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,\n Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,\n InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)\n AddCharChip('ED6_DT07/CH01000 ._CH')\n AddCharChipPat('ED6_DT07/CH01000P._CP')\n DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,\n ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,\n TalkFunctionIndex=0, TalkScenaIndex=3)\n ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',\n 'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')\n\n def Function_0_D2():\n pass\n label('Function_0_D2')\n Return()\n\n def Function_1_D3():\n pass\n label('Function_1_D3')\n OP_B0(0, 120)\n OP_1C(0, 0, 5)\n Return()\n\n def Function_2_DD():\n pass\n label('Function_2_DD')\n RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),\n scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')\n OP_99(254, 0, 7, 1650)\n Jump('loc_244')\n label('loc_102')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')\n OP_99(254, 1, 7, 1600)\n Jump('loc_244')\n label('loc_11B')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')\n OP_99(254, 2, 7, 1550)\n Jump('loc_244')\n label('loc_134')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')\n OP_99(254, 3, 7, 1500)\n Jump('loc_244')\n label('loc_14D')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')\n OP_99(254, 4, 7, 1450)\n Jump('loc_244')\n label('loc_166')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')\n OP_99(254, 5, 7, 1400)\n Jump('loc_244')\n label('loc_17F')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')\n OP_99(254, 6, 7, 1350)\n Jump('loc_244')\n label('loc_198')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')\n OP_99(254, 0, 7, 1655)\n Jump('loc_244')\n label('loc_1B1')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')\n OP_99(254, 1, 7, 1605)\n Jump('loc_244')\n label('loc_1CA')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')\n OP_99(254, 2, 7, 1555)\n Jump('loc_244')\n label('loc_1E3')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')\n OP_99(254, 3, 7, 1505)\n Jump('loc_244')\n label('loc_1FC')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')\n OP_99(254, 4, 7, 1455)\n Jump('loc_244')\n label('loc_215')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')\n OP_99(254, 5, 7, 1405)\n Jump('loc_244')\n label('loc_22E')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')\n OP_99(254, 6, 7, 1355)\n label('loc_244')\n Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')\n OP_99(254, 0, 7, 1500)\n Jump('loc_244')\n label('loc_259')\n Return()\n\n def Function_3_25A():\n pass\n label('Function_3_25A')\n TalkBegin(254)\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr\n (EXPR_END)), 'loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_2B2')\n ChrTalk(254, ('I reckon my happiness is right here in this\\x01',\n 'lighthouse.\\x02'))\n CloseMessageWindow()\n Jump('loc_34C')\n label('loc_2B2')\n ChrTalk(254, (\"There's actually a shining stone here in this\\x01\",\n \"lighthouse, though, even if it's not what you\\x01\",\n 'are looking for.\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"I reckon that's my happiness...\\x02\")\n CloseMessageWindow()\n OP_A2(0)\n label('loc_34C')\n Jump('loc_6C1')\n label('loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr\n (EXPR_END)), 'loc_477')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_3DF')\n ChrTalk(254, (\"There's no shame in relying on others for\\x01\",\n \"help if you need it! Grab 'em by the collar\\x01\",\n 'and scream for help if you need it!\\x02'))\n CloseMessageWindow()\n Jump('loc_474')\n label('loc_3DF')\n ChrTalk(254, \"You lookin' for some help, young lady?\\x02\")\n CloseMessageWindow()\n ChrTalk(254, 'What do you need?\\x02')\n CloseMessageWindow()\n ChrTalk(334, (\"#1714FN-No. I'll be fine, honestly...\\x02\\x03\",\n '#1713FThank you for offering, sir.\\x02'))\n CloseMessageWindow()\n OP_A2(0)\n label('loc_474')\n Jump('loc_6C1')\n label('loc_477')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1280, 0, 202300, 270)\n Sleep(1000)\n ChrTalk(254, (\"I swear, this is EXACTLY what's wrong\\x01\",\n 'with youngins these days...\\x02'))\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What are you doing here, young lady?\\x02')\n CloseMessageWindow()\n ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\\x02\\x03',\n \"You haven't seen a young girl other\\x01\",\n 'than me in here recently have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"A young girl? 'Fraid not.\\x02\")\n CloseMessageWindow()\n ChrTalk(334, ('#1713FI-I see...\\x02\\x03', 'Sorry for troubling you...\\x02')\n )\n CloseMessageWindow()\n\n def lambda_639():\n label('loc_639')\n TurnDirection(254, 334, 0)\n OP_48()\n Jump('loc_639')\n QueueWorkItem2(16, 3, lambda_639)\n OP_43(334, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, 'They sure are a pain.\\x02')\n CloseMessageWindow()\n OP_A2(12100)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_6C1')\n Jump('loc_AE8')\n label('loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr\n (EXPR_END)), 'loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_721')\n ChrTalk(254, 'A happiness stone, you say?\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"You think somethin' like that exists?\\x02\")\n CloseMessageWindow()\n Jump('loc_ADE')\n label('loc_721')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1250, 0, 202480, 270)\n SetChrPos(335, -1060, 0, 201620, 270)\n Sleep(1000)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What might you two be doing here?\\x02')\n CloseMessageWindow()\n ChrTalk(334, '#1718FHello!\\x02')\n CloseMessageWindow()\n OP_62(334, 0, 1600, 38, 39, 250, 1)\n Sleep(500)\n OP_63(334)\n ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\\x01',\n \"aren't they?\\x02\\x03\",\n \"#1718FSir, you haven't seen a happiness stone before,\\x01\",\n 'have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, 'A-A happiness stone?!\\x02')\n CloseMessageWindow()\n ChrTalk(335, \"#1730FThey're really shiny and pretty!\\x02\")\n CloseMessageWindow()\n ChrTalk(254, (\"N-No, I don't recall ever seein' any\\x01\",\n 'such thing in all my years...\\x02'))\n CloseMessageWindow()\n ChrTalk(334, (\"#1716FOh... That's too bad...\\x02\\x03\",\n '#1710FWell, thank you, anyway.\\x02'))\n CloseMessageWindow()\n TurnDirection(334, 335, 400)\n Sleep(400)\n ChrTalk(334, \"#1718FLet's keep looking, Polly! \\x02\")\n CloseMessageWindow()\n OP_43(334, 3, 0, 4)\n Sleep(2000)\n ChrTalk(335, '#1731FI hope your back feels better, mister!\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n\n def lambda_A1A():\n label('loc_A1A')\n TurnDirection(254, 335, 0)\n OP_48()\n Jump('loc_A1A')\n QueueWorkItem2(16, 3, lambda_A1A)\n OP_43(335, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"...They're sharp little devils, aren't they?\\x02\")\n CloseMessageWindow()\n Sleep(500)\n ChrTalk(254, 'A happiness stone, hmm...?\\x02')\n CloseMessageWindow()\n OP_A2(12099)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_ADE')\n Jump('loc_AE8')\n label('loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr\n (EXPR_END)), 'loc_AE8')\n label('loc_AE8')\n TalkEnd(254)\n Return()\n\n def Function_4_AEC():\n pass\n label('Function_4_AEC')\n\n def lambda_AF2():\n OP_8E(254, 2820, 0, 205060, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_AF2)\n WaitChrThread(254, 1)\n\n def lambda_B12():\n OP_8E(254, 2820, 0, 206910, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B12)\n WaitChrThread(254, 1)\n\n def lambda_B32():\n OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B32)\n WaitChrThread(254, 1)\n Return()\n\n def Function_5_B4D():\n pass\n label('Function_5_B4D')\n TalkBegin(255)\n TalkEnd(255)\n Return()\n SaveToFile()\n\n\nTry(main)\n",
"<import token>\n\n\ndef main():\n SetCodePage('ms932')\n CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=\n 'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,\n EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[\n 'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])\n BuildStringList('@FileName', 'Vogt')\n DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=\n 4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,\n Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,\n Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,\n Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,\n InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)\n AddCharChip('ED6_DT07/CH01000 ._CH')\n AddCharChipPat('ED6_DT07/CH01000P._CP')\n DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,\n ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,\n TalkFunctionIndex=0, TalkScenaIndex=3)\n ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',\n 'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')\n\n def Function_0_D2():\n pass\n label('Function_0_D2')\n Return()\n\n def Function_1_D3():\n pass\n label('Function_1_D3')\n OP_B0(0, 120)\n OP_1C(0, 0, 5)\n Return()\n\n def Function_2_DD():\n pass\n label('Function_2_DD')\n RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),\n scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')\n OP_99(254, 0, 7, 1650)\n Jump('loc_244')\n label('loc_102')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')\n OP_99(254, 1, 7, 1600)\n Jump('loc_244')\n label('loc_11B')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')\n OP_99(254, 2, 7, 1550)\n Jump('loc_244')\n label('loc_134')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')\n OP_99(254, 3, 7, 1500)\n Jump('loc_244')\n label('loc_14D')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')\n OP_99(254, 4, 7, 1450)\n Jump('loc_244')\n label('loc_166')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')\n OP_99(254, 5, 7, 1400)\n Jump('loc_244')\n label('loc_17F')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')\n OP_99(254, 6, 7, 1350)\n Jump('loc_244')\n label('loc_198')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')\n OP_99(254, 0, 7, 1655)\n Jump('loc_244')\n label('loc_1B1')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')\n OP_99(254, 1, 7, 1605)\n Jump('loc_244')\n label('loc_1CA')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')\n OP_99(254, 2, 7, 1555)\n Jump('loc_244')\n label('loc_1E3')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')\n OP_99(254, 3, 7, 1505)\n Jump('loc_244')\n label('loc_1FC')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')\n OP_99(254, 4, 7, 1455)\n Jump('loc_244')\n label('loc_215')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')\n OP_99(254, 5, 7, 1405)\n Jump('loc_244')\n label('loc_22E')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')\n OP_99(254, 6, 7, 1355)\n label('loc_244')\n Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')\n OP_99(254, 0, 7, 1500)\n Jump('loc_244')\n label('loc_259')\n Return()\n\n def Function_3_25A():\n pass\n label('Function_3_25A')\n TalkBegin(254)\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr\n (EXPR_END)), 'loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_2B2')\n ChrTalk(254, ('I reckon my happiness is right here in this\\x01',\n 'lighthouse.\\x02'))\n CloseMessageWindow()\n Jump('loc_34C')\n label('loc_2B2')\n ChrTalk(254, (\"There's actually a shining stone here in this\\x01\",\n \"lighthouse, though, even if it's not what you\\x01\",\n 'are looking for.\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"I reckon that's my happiness...\\x02\")\n CloseMessageWindow()\n OP_A2(0)\n label('loc_34C')\n Jump('loc_6C1')\n label('loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr\n (EXPR_END)), 'loc_477')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_3DF')\n ChrTalk(254, (\"There's no shame in relying on others for\\x01\",\n \"help if you need it! Grab 'em by the collar\\x01\",\n 'and scream for help if you need it!\\x02'))\n CloseMessageWindow()\n Jump('loc_474')\n label('loc_3DF')\n ChrTalk(254, \"You lookin' for some help, young lady?\\x02\")\n CloseMessageWindow()\n ChrTalk(254, 'What do you need?\\x02')\n CloseMessageWindow()\n ChrTalk(334, (\"#1714FN-No. I'll be fine, honestly...\\x02\\x03\",\n '#1713FThank you for offering, sir.\\x02'))\n CloseMessageWindow()\n OP_A2(0)\n label('loc_474')\n Jump('loc_6C1')\n label('loc_477')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1280, 0, 202300, 270)\n Sleep(1000)\n ChrTalk(254, (\"I swear, this is EXACTLY what's wrong\\x01\",\n 'with youngins these days...\\x02'))\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What are you doing here, young lady?\\x02')\n CloseMessageWindow()\n ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\\x02\\x03',\n \"You haven't seen a young girl other\\x01\",\n 'than me in here recently have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"A young girl? 'Fraid not.\\x02\")\n CloseMessageWindow()\n ChrTalk(334, ('#1713FI-I see...\\x02\\x03', 'Sorry for troubling you...\\x02')\n )\n CloseMessageWindow()\n\n def lambda_639():\n label('loc_639')\n TurnDirection(254, 334, 0)\n OP_48()\n Jump('loc_639')\n QueueWorkItem2(16, 3, lambda_639)\n OP_43(334, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, 'They sure are a pain.\\x02')\n CloseMessageWindow()\n OP_A2(12100)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_6C1')\n Jump('loc_AE8')\n label('loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr\n (EXPR_END)), 'loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_721')\n ChrTalk(254, 'A happiness stone, you say?\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"You think somethin' like that exists?\\x02\")\n CloseMessageWindow()\n Jump('loc_ADE')\n label('loc_721')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1250, 0, 202480, 270)\n SetChrPos(335, -1060, 0, 201620, 270)\n Sleep(1000)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What might you two be doing here?\\x02')\n CloseMessageWindow()\n ChrTalk(334, '#1718FHello!\\x02')\n CloseMessageWindow()\n OP_62(334, 0, 1600, 38, 39, 250, 1)\n Sleep(500)\n OP_63(334)\n ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\\x01',\n \"aren't they?\\x02\\x03\",\n \"#1718FSir, you haven't seen a happiness stone before,\\x01\",\n 'have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, 'A-A happiness stone?!\\x02')\n CloseMessageWindow()\n ChrTalk(335, \"#1730FThey're really shiny and pretty!\\x02\")\n CloseMessageWindow()\n ChrTalk(254, (\"N-No, I don't recall ever seein' any\\x01\",\n 'such thing in all my years...\\x02'))\n CloseMessageWindow()\n ChrTalk(334, (\"#1716FOh... That's too bad...\\x02\\x03\",\n '#1710FWell, thank you, anyway.\\x02'))\n CloseMessageWindow()\n TurnDirection(334, 335, 400)\n Sleep(400)\n ChrTalk(334, \"#1718FLet's keep looking, Polly! \\x02\")\n CloseMessageWindow()\n OP_43(334, 3, 0, 4)\n Sleep(2000)\n ChrTalk(335, '#1731FI hope your back feels better, mister!\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n\n def lambda_A1A():\n label('loc_A1A')\n TurnDirection(254, 335, 0)\n OP_48()\n Jump('loc_A1A')\n QueueWorkItem2(16, 3, lambda_A1A)\n OP_43(335, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"...They're sharp little devils, aren't they?\\x02\")\n CloseMessageWindow()\n Sleep(500)\n ChrTalk(254, 'A happiness stone, hmm...?\\x02')\n CloseMessageWindow()\n OP_A2(12099)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_ADE')\n Jump('loc_AE8')\n label('loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr\n (EXPR_END)), 'loc_AE8')\n label('loc_AE8')\n TalkEnd(254)\n Return()\n\n def Function_4_AEC():\n pass\n label('Function_4_AEC')\n\n def lambda_AF2():\n OP_8E(254, 2820, 0, 205060, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_AF2)\n WaitChrThread(254, 1)\n\n def lambda_B12():\n OP_8E(254, 2820, 0, 206910, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B12)\n WaitChrThread(254, 1)\n\n def lambda_B32():\n OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B32)\n WaitChrThread(254, 1)\n Return()\n\n def Function_5_B4D():\n pass\n label('Function_5_B4D')\n TalkBegin(255)\n TalkEnd(255)\n Return()\n SaveToFile()\n\n\nTry(main)\n",
"<import token>\n\n\ndef main():\n SetCodePage('ms932')\n CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=\n 'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,\n EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[\n 'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])\n BuildStringList('@FileName', 'Vogt')\n DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=\n 4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,\n Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,\n Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,\n Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,\n InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)\n AddCharChip('ED6_DT07/CH01000 ._CH')\n AddCharChipPat('ED6_DT07/CH01000P._CP')\n DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,\n ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,\n TalkFunctionIndex=0, TalkScenaIndex=3)\n ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',\n 'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')\n\n def Function_0_D2():\n pass\n label('Function_0_D2')\n Return()\n\n def Function_1_D3():\n pass\n label('Function_1_D3')\n OP_B0(0, 120)\n OP_1C(0, 0, 5)\n Return()\n\n def Function_2_DD():\n pass\n label('Function_2_DD')\n RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),\n scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')\n OP_99(254, 0, 7, 1650)\n Jump('loc_244')\n label('loc_102')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')\n OP_99(254, 1, 7, 1600)\n Jump('loc_244')\n label('loc_11B')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')\n OP_99(254, 2, 7, 1550)\n Jump('loc_244')\n label('loc_134')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')\n OP_99(254, 3, 7, 1500)\n Jump('loc_244')\n label('loc_14D')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')\n OP_99(254, 4, 7, 1450)\n Jump('loc_244')\n label('loc_166')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')\n OP_99(254, 5, 7, 1400)\n Jump('loc_244')\n label('loc_17F')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')\n OP_99(254, 6, 7, 1350)\n Jump('loc_244')\n label('loc_198')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')\n OP_99(254, 0, 7, 1655)\n Jump('loc_244')\n label('loc_1B1')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')\n OP_99(254, 1, 7, 1605)\n Jump('loc_244')\n label('loc_1CA')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')\n OP_99(254, 2, 7, 1555)\n Jump('loc_244')\n label('loc_1E3')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')\n OP_99(254, 3, 7, 1505)\n Jump('loc_244')\n label('loc_1FC')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')\n OP_99(254, 4, 7, 1455)\n Jump('loc_244')\n label('loc_215')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')\n OP_99(254, 5, 7, 1405)\n Jump('loc_244')\n label('loc_22E')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')\n OP_99(254, 6, 7, 1355)\n label('loc_244')\n Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')\n OP_99(254, 0, 7, 1500)\n Jump('loc_244')\n label('loc_259')\n Return()\n\n def Function_3_25A():\n pass\n label('Function_3_25A')\n TalkBegin(254)\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr\n (EXPR_END)), 'loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_2B2')\n ChrTalk(254, ('I reckon my happiness is right here in this\\x01',\n 'lighthouse.\\x02'))\n CloseMessageWindow()\n Jump('loc_34C')\n label('loc_2B2')\n ChrTalk(254, (\"There's actually a shining stone here in this\\x01\",\n \"lighthouse, though, even if it's not what you\\x01\",\n 'are looking for.\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"I reckon that's my happiness...\\x02\")\n CloseMessageWindow()\n OP_A2(0)\n label('loc_34C')\n Jump('loc_6C1')\n label('loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr\n (EXPR_END)), 'loc_477')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_3DF')\n ChrTalk(254, (\"There's no shame in relying on others for\\x01\",\n \"help if you need it! Grab 'em by the collar\\x01\",\n 'and scream for help if you need it!\\x02'))\n CloseMessageWindow()\n Jump('loc_474')\n label('loc_3DF')\n ChrTalk(254, \"You lookin' for some help, young lady?\\x02\")\n CloseMessageWindow()\n ChrTalk(254, 'What do you need?\\x02')\n CloseMessageWindow()\n ChrTalk(334, (\"#1714FN-No. I'll be fine, honestly...\\x02\\x03\",\n '#1713FThank you for offering, sir.\\x02'))\n CloseMessageWindow()\n OP_A2(0)\n label('loc_474')\n Jump('loc_6C1')\n label('loc_477')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1280, 0, 202300, 270)\n Sleep(1000)\n ChrTalk(254, (\"I swear, this is EXACTLY what's wrong\\x01\",\n 'with youngins these days...\\x02'))\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What are you doing here, young lady?\\x02')\n CloseMessageWindow()\n ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\\x02\\x03',\n \"You haven't seen a young girl other\\x01\",\n 'than me in here recently have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"A young girl? 'Fraid not.\\x02\")\n CloseMessageWindow()\n ChrTalk(334, ('#1713FI-I see...\\x02\\x03', 'Sorry for troubling you...\\x02')\n )\n CloseMessageWindow()\n\n def lambda_639():\n label('loc_639')\n TurnDirection(254, 334, 0)\n OP_48()\n Jump('loc_639')\n QueueWorkItem2(16, 3, lambda_639)\n OP_43(334, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, 'They sure are a pain.\\x02')\n CloseMessageWindow()\n OP_A2(12100)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_6C1')\n Jump('loc_AE8')\n label('loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr\n (EXPR_END)), 'loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_721')\n ChrTalk(254, 'A happiness stone, you say?\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"You think somethin' like that exists?\\x02\")\n CloseMessageWindow()\n Jump('loc_ADE')\n label('loc_721')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1250, 0, 202480, 270)\n SetChrPos(335, -1060, 0, 201620, 270)\n Sleep(1000)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What might you two be doing here?\\x02')\n CloseMessageWindow()\n ChrTalk(334, '#1718FHello!\\x02')\n CloseMessageWindow()\n OP_62(334, 0, 1600, 38, 39, 250, 1)\n Sleep(500)\n OP_63(334)\n ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\\x01',\n \"aren't they?\\x02\\x03\",\n \"#1718FSir, you haven't seen a happiness stone before,\\x01\",\n 'have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, 'A-A happiness stone?!\\x02')\n CloseMessageWindow()\n ChrTalk(335, \"#1730FThey're really shiny and pretty!\\x02\")\n CloseMessageWindow()\n ChrTalk(254, (\"N-No, I don't recall ever seein' any\\x01\",\n 'such thing in all my years...\\x02'))\n CloseMessageWindow()\n ChrTalk(334, (\"#1716FOh... That's too bad...\\x02\\x03\",\n '#1710FWell, thank you, anyway.\\x02'))\n CloseMessageWindow()\n TurnDirection(334, 335, 400)\n Sleep(400)\n ChrTalk(334, \"#1718FLet's keep looking, Polly! \\x02\")\n CloseMessageWindow()\n OP_43(334, 3, 0, 4)\n Sleep(2000)\n ChrTalk(335, '#1731FI hope your back feels better, mister!\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n\n def lambda_A1A():\n label('loc_A1A')\n TurnDirection(254, 335, 0)\n OP_48()\n Jump('loc_A1A')\n QueueWorkItem2(16, 3, lambda_A1A)\n OP_43(335, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"...They're sharp little devils, aren't they?\\x02\")\n CloseMessageWindow()\n Sleep(500)\n ChrTalk(254, 'A happiness stone, hmm...?\\x02')\n CloseMessageWindow()\n OP_A2(12099)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_ADE')\n Jump('loc_AE8')\n label('loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr\n (EXPR_END)), 'loc_AE8')\n label('loc_AE8')\n TalkEnd(254)\n Return()\n\n def Function_4_AEC():\n pass\n label('Function_4_AEC')\n\n def lambda_AF2():\n OP_8E(254, 2820, 0, 205060, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_AF2)\n WaitChrThread(254, 1)\n\n def lambda_B12():\n OP_8E(254, 2820, 0, 206910, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B12)\n WaitChrThread(254, 1)\n\n def lambda_B32():\n OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B32)\n WaitChrThread(254, 1)\n Return()\n\n def Function_5_B4D():\n pass\n label('Function_5_B4D')\n TalkBegin(255)\n TalkEnd(255)\n Return()\n SaveToFile()\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
1,011 |
ecbca04a58c19469e63ee2310e2b2f6b86c41199
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 8 15:05:51 2019
@author: Brian Heckman and Kyle Oprisko
"""
import csv
"""this file opens a csv file created in the csv creator class. The main purpose of this class is to
normalize the data in the csv file, so that it can be read by the neural network.
"""
class CSV_Normalize:
stock = ""
# Initialize the lists for the 4 parameters
close_prices = []
high_prices = []
prev_prices = []
sentiments = []
# Initialize max and min values for normalization calc
max_sent = 0.0
min_sent = 0.0
min_close = 1000
max_close = 0
min_high = 1000
max_high = 0
min_prev = 1000
max_prev = 0
# Initialize lists for normalized values of parameters
normalized_close = []
normalized_high = []
normalized_prev = []
normalized_sent = []
# Initialize output parameters
open_prices = []
# Initialize max and min for normalization calc
min_open= 1000
max_open = 0
# Initialize the normalized output list
normalized_open = []
# Create arrays to separate into training and testing lists
inputs = []
training_inputs = []
testing_inputs = []
training_outputs = []
testing_outputs = []
# Set name of stock
def set_stock(self,stock):
self.stock = stock
# Set input values
def set_input(self):
# Open CSV and read each row and append to specific list
with open(self.stock + '.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter = ',')
for row in readCSV:
self.close_prices.append(row[5])
self.high_prices.append(row[3])
self.prev_prices.append(row[2])
self.sentiments.append(row[7])
# Remove the headers and the last row because the data is trailing
self.close_prices = self.close_prices[1:-1]
self.high_prices = self.high_prices[1:-1]
self.prev_prices = self.prev_prices[1:-1]
self.sentiments = self.sentiments[1:-1]
# Turn data values into floats
for m in range(len(self.close_prices)):
if self.close_prices[m] != "Close":
self.close_prices[m] = float(self.close_prices[m])
for n in range(len(self.high_prices)):
if self.high_prices[n] != "High":
self.high_prices[n] = float(self.high_prices[n])
for pp in range(len(self.prev_prices)):
if self.prev_prices[pp] != "Open":
self.prev_prices[pp] = float(self.prev_prices[pp])
#Set Min and Max values for normalization
for p in range(len(self.close_prices)):
if self.close_prices[m] != "Close":
if (self.close_prices[p] > self.max_close):
self.max_close = self.close_prices[p]
if (self.close_prices[p] < self.min_close):
self.min_close = self.close_prices[p]
for q in range(len(self.high_prices)):
if (self.high_prices[q] > self.max_high):
self.max_high = self.high_prices[q]
if (self.high_prices[q] < self.min_high):
self.min_high = self.high_prices[q]
for s in range(len(self.prev_prices)):
if (self.prev_prices[s] > self.max_prev):
self.max_prev = self.prev_prices[s]
if (self.prev_prices[s] < self.min_prev):
self.min_prev = self.prev_prices[s]
for s in range(len(self.sentiments)):
self.sentiments[s] = float(self.sentiments[s])
if (self.max_sent > self.max_sent):
self.max_sent = self.sentiments[s]
if (self.sentiments[s] < self.min_sent):
self.min_sent = self.sentiments[s]
# Perform normalization calculation and set normalized inputs
def set_normalized_input(self):
# Call set_input function in case it was not called already
if (self.max_prev == 0):
self.set_input()
# Perform normalization calculation under the normalized_x = (x - min)/(max - min) model
for i1 in range(len(self.close_prices)):
self.normalized_close.append((self.close_prices[i1] - self.min_close)/(self.max_close - self.min_close))
for i2 in range(len(self.high_prices)):
self.normalized_high.append((self.high_prices[i2] - self.min_high)/(self.max_high - self.min_high))
for i4 in range(len(self.prev_prices)):
self.normalized_prev.append((self.prev_prices[i4] - self.min_prev)/(self.max_prev - self.min_prev))
for i5 in range(len(self.sentiments)):
diff = self.max_sent - self.min_sent
if diff == 0:
self.normalized_sent.append(0)
else:
self.normalized_sent.append((self.sentiments[i5] - self.min_sent)/(self.max_sent - self.min_sent))
# Organize the input into a zipped list
def get_input(self):
return (list(zip(self.close_prices,self.high_prices,self.prev_prices,self.sentiments)))
# Organize the normalized input into a zipped list
def get_nomralized_input(self):
return (list(zip(self.normalized_close,self.normalized_high,self.normalized_prev,self.sentiments)))
# Set the output data
def set_output(self):
# Open and read the output file and append the list
with open(self.stock + '.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter = ',')
for row in readCSV:
self.open_prices.append(row[2])
# Remove the first two rows (header and first data point)
self.open_prices = self.open_prices[2:]
#
for m in range(len(self.open_prices)):
self.open_prices[m] = float(self.open_prices[m])
for i in range(len(self.open_prices)):
if (self.open_prices[i] > self.max_open):
self.max_open = self.open_prices[i]
if (self.open_prices[i] < self.min_open):
self.min_open = self.open_prices[i]
#uses min max function
def set_normalized_output(self):
self.set_output()
for i1 in range(len(self.open_prices)):
self.normalized_open.append((self.open_prices[i1] - self.min_open)/(self.max_open - self.min_open))
#returns open_prices
def get_output(self):
return (self.open_prices)
#gets the normalized output
def get_normalized_output(self):
return (self.normalized_open)
#inverse function to get predicted values into actual values
def inverse(self,normalized):
return ((normalized * (self.max_open - self.min_open)) + self.min_open)
#retuns what the user input
def get_training_input(self):
self.set_training_input()
return self.training_inputs
#sets puts all of the data into a list as a tuple
def set_training_input(self):
for i in range(len(self.normalized_close)):
temp_list = [self.normalized_close[i],self.normalized_high[i],self.normalized_prev[i],self.normalized_sent[i]]
self.inputs.append(temp_list)
train_end = int(.7*len(self.inputs))
self.training_inputs = self.inputs[0:train_end]
def get_testing_input(self):
self.set_testing_input()
return self.testing_inputs
def get_training_output(self):
self.set_training_output()
return self.training_outputs
def set_testing_input(self):
train_end = int(.7*len(self.inputs))
self.testing_inputs = self.inputs[train_end:]
def set_training_output(self):
train_end = int(.7*len(self.normalized_open))
self.training_outputs = self.normalized_open[0:train_end]
def get_testing_output(self):
self.set_testing_output()
return self.testing_outputs
def set_testing_output(self):
train_end = int(.7*len(self.normalized_open))
self.testing_outputs = self.normalized_open[train_end:]
def clear_lists(self):
#everything is reinitialized
self.close_prices.clear()
self.high_prices.clear()
self.prev_prices.clear()
self.normalized_close.clear()
self.normalized_high.clear()
self.normalized_prev.clear()
self.open_prices.clear()
self.normalized_open.clear()
self.inputs.clear()
self.training_inputs.clear()
self.testing_inputs.clear()
self.training_outputs.clear()
self.testing_outputs.clear()
self.sentiments.clear()
self.normalized_sent = []
self.max_sent = 0.0
self.min_sent = 0.0
self.min_close = 1000
self.max_close = 0
self.min_high = 1000
self.max_high = 0
self.min_prev = 1000
self.max_prev = 0
self.min_open= 1000
self.max_open = 0
|
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 8 15:05:51 2019\r\n\r\n@author: Brian Heckman and Kyle Oprisko\r\n\"\"\"\r\nimport csv\r\n\r\n\"\"\"this file opens a csv file created in the csv creator class. The main purpose of this class is to \r\nnormalize the data in the csv file, so that it can be read by the neural network. \r\n\"\"\"\r\n\r\nclass CSV_Normalize:\r\n stock = \"\"\r\n\r\n # Initialize the lists for the 4 parameters\r\n \r\n close_prices = []\r\n high_prices = []\r\n prev_prices = []\r\n sentiments = []\r\n \r\n # Initialize max and min values for normalization calc\r\n \r\n max_sent = 0.0\r\n min_sent = 0.0\r\n min_close = 1000\r\n max_close = 0 \r\n min_high = 1000\r\n max_high = 0\r\n min_prev = 1000\r\n max_prev = 0\r\n\r\n # Initialize lists for normalized values of parameters\r\n \r\n normalized_close = []\r\n normalized_high = []\r\n normalized_prev = []\r\n normalized_sent = []\r\n \r\n # Initialize output parameters\r\n \r\n open_prices = []\r\n\r\n # Initialize max and min for normalization calc\r\n \r\n min_open= 1000\r\n max_open = 0\r\n\r\n # Initialize the normalized output list\r\n \r\n normalized_open = []\r\n\r\n # Create arrays to separate into training and testing lists\r\n \r\n inputs = []\r\n training_inputs = []\r\n testing_inputs = []\r\n\r\n \r\n training_outputs = []\r\n testing_outputs = []\r\n\r\n # Set name of stock\r\n \r\n def set_stock(self,stock):\r\n self.stock = stock\r\n \r\n # Set input values\r\n \r\n def set_input(self):\r\n \r\n # Open CSV and read each row and append to specific list\r\n \r\n with open(self.stock + '.csv') as csvfile:\r\n readCSV = csv.reader(csvfile, delimiter = ',')\r\n for row in readCSV:\r\n self.close_prices.append(row[5])\r\n self.high_prices.append(row[3])\r\n self.prev_prices.append(row[2])\r\n self.sentiments.append(row[7])\r\n\r\n # Remove the headers and the last row because the data is trailing\r\n \r\n self.close_prices = self.close_prices[1:-1]\r\n self.high_prices = self.high_prices[1:-1]\r\n self.prev_prices = self.prev_prices[1:-1]\r\n self.sentiments = self.sentiments[1:-1]\r\n\r\n # Turn data values into floats\r\n \r\n for m in range(len(self.close_prices)):\r\n if self.close_prices[m] != \"Close\":\r\n self.close_prices[m] = float(self.close_prices[m])\r\n for n in range(len(self.high_prices)):\r\n if self.high_prices[n] != \"High\":\r\n self.high_prices[n] = float(self.high_prices[n])\r\n for pp in range(len(self.prev_prices)):\r\n if self.prev_prices[pp] != \"Open\":\r\n self.prev_prices[pp] = float(self.prev_prices[pp])\r\n\r\n\r\n #Set Min and Max values for normalization\r\n\r\n for p in range(len(self.close_prices)):\r\n if self.close_prices[m] != \"Close\":\r\n if (self.close_prices[p] > self.max_close):\r\n self.max_close = self.close_prices[p]\r\n if (self.close_prices[p] < self.min_close):\r\n self.min_close = self.close_prices[p]\r\n for q in range(len(self.high_prices)):\r\n if (self.high_prices[q] > self.max_high):\r\n self.max_high = self.high_prices[q]\r\n if (self.high_prices[q] < self.min_high):\r\n self.min_high = self.high_prices[q] \r\n\r\n for s in range(len(self.prev_prices)):\r\n if (self.prev_prices[s] > self.max_prev):\r\n self.max_prev = self.prev_prices[s]\r\n if (self.prev_prices[s] < self.min_prev):\r\n self.min_prev = self.prev_prices[s]\r\n \r\n for s in range(len(self.sentiments)):\r\n self.sentiments[s] = float(self.sentiments[s])\r\n if (self.max_sent > self.max_sent):\r\n self.max_sent = self.sentiments[s]\r\n if (self.sentiments[s] < self.min_sent):\r\n self.min_sent = self.sentiments[s]\r\n\r\n # Perform normalization calculation and set normalized inputs \r\n def set_normalized_input(self):\r\n # Call set_input function in case it was not called already\r\n if (self.max_prev == 0):\r\n self.set_input()\r\n \r\n # Perform normalization calculation under the normalized_x = (x - min)/(max - min) model\r\n \r\n for i1 in range(len(self.close_prices)):\r\n self.normalized_close.append((self.close_prices[i1] - self.min_close)/(self.max_close - self.min_close))\r\n\r\n for i2 in range(len(self.high_prices)):\r\n self.normalized_high.append((self.high_prices[i2] - self.min_high)/(self.max_high - self.min_high))\r\n\r\n\r\n for i4 in range(len(self.prev_prices)):\r\n self.normalized_prev.append((self.prev_prices[i4] - self.min_prev)/(self.max_prev - self.min_prev))\r\n \r\n \r\n \r\n for i5 in range(len(self.sentiments)):\r\n diff = self.max_sent - self.min_sent\r\n if diff == 0:\r\n self.normalized_sent.append(0)\r\n else:\r\n self.normalized_sent.append((self.sentiments[i5] - self.min_sent)/(self.max_sent - self.min_sent))\r\n \r\n # Organize the input into a zipped list\r\n def get_input(self):\r\n return (list(zip(self.close_prices,self.high_prices,self.prev_prices,self.sentiments)))\r\n # Organize the normalized input into a zipped list\r\n def get_nomralized_input(self):\r\n return (list(zip(self.normalized_close,self.normalized_high,self.normalized_prev,self.sentiments)))\r\n\r\n # Set the output data\r\n def set_output(self):\r\n \r\n # Open and read the output file and append the list\r\n \r\n with open(self.stock + '.csv') as csvfile:\r\n readCSV = csv.reader(csvfile, delimiter = ',')\r\n for row in readCSV:\r\n self.open_prices.append(row[2])\r\n \r\n # Remove the first two rows (header and first data point)\r\n self.open_prices = self.open_prices[2:]\r\n\r\n #\r\n for m in range(len(self.open_prices)):\r\n self.open_prices[m] = float(self.open_prices[m])\r\n\r\n for i in range(len(self.open_prices)):\r\n if (self.open_prices[i] > self.max_open):\r\n self.max_open = self.open_prices[i]\r\n if (self.open_prices[i] < self.min_open):\r\n self.min_open = self.open_prices[i]\r\n\r\n\r\n #uses min max function\r\n def set_normalized_output(self):\r\n self.set_output()\r\n for i1 in range(len(self.open_prices)):\r\n self.normalized_open.append((self.open_prices[i1] - self.min_open)/(self.max_open - self.min_open))\r\n #returns open_prices\r\n def get_output(self):\r\n return (self.open_prices)\r\n #gets the normalized output\r\n def get_normalized_output(self):\r\n return (self.normalized_open)\r\n #inverse function to get predicted values into actual values\r\n def inverse(self,normalized):\r\n return ((normalized * (self.max_open - self.min_open)) + self.min_open)\r\n #retuns what the user input\r\n def get_training_input(self):\r\n self.set_training_input()\r\n return self.training_inputs\r\n \r\n #sets puts all of the data into a list as a tuple\r\n def set_training_input(self):\r\n for i in range(len(self.normalized_close)): \r\n temp_list = [self.normalized_close[i],self.normalized_high[i],self.normalized_prev[i],self.normalized_sent[i]]\r\n self.inputs.append(temp_list)\r\n train_end = int(.7*len(self.inputs))\r\n self.training_inputs = self.inputs[0:train_end]\r\n\r\n def get_testing_input(self):\r\n self.set_testing_input()\r\n return self.testing_inputs\r\n\r\n def get_training_output(self):\r\n self.set_training_output()\r\n return self.training_outputs\r\n \r\n def set_testing_input(self):\r\n train_end = int(.7*len(self.inputs))\r\n self.testing_inputs = self.inputs[train_end:]\r\n \r\n def set_training_output(self):\r\n train_end = int(.7*len(self.normalized_open))\r\n self.training_outputs = self.normalized_open[0:train_end]\r\n \r\n def get_testing_output(self):\r\n self.set_testing_output()\r\n return self.testing_outputs\r\n def set_testing_output(self):\r\n train_end = int(.7*len(self.normalized_open))\r\n self.testing_outputs = self.normalized_open[train_end:]\r\n \r\n def clear_lists(self):\r\n #everything is reinitialized \r\n self.close_prices.clear()\r\n self.high_prices.clear()\r\n self.prev_prices.clear()\r\n self.normalized_close.clear()\r\n self.normalized_high.clear()\r\n self.normalized_prev.clear()\r\n self.open_prices.clear()\r\n self.normalized_open.clear()\r\n self.inputs.clear()\r\n self.training_inputs.clear()\r\n self.testing_inputs.clear()\r\n self.training_outputs.clear()\r\n self.testing_outputs.clear()\r\n self.sentiments.clear()\r\n self.normalized_sent = []\r\n self.max_sent = 0.0\r\n self.min_sent = 0.0\r\n self.min_close = 1000\r\n self.max_close = 0 \r\n self.min_high = 1000\r\n self.max_high = 0\r\n self.min_prev = 1000\r\n self.max_prev = 0\r\n self.min_open= 1000\r\n self.max_open = 0",
"<docstring token>\nimport csv\n<docstring token>\n\n\nclass CSV_Normalize:\n stock = ''\n close_prices = []\n high_prices = []\n prev_prices = []\n sentiments = []\n max_sent = 0.0\n min_sent = 0.0\n min_close = 1000\n max_close = 0\n min_high = 1000\n max_high = 0\n min_prev = 1000\n max_prev = 0\n normalized_close = []\n normalized_high = []\n normalized_prev = []\n normalized_sent = []\n open_prices = []\n min_open = 1000\n max_open = 0\n normalized_open = []\n inputs = []\n training_inputs = []\n testing_inputs = []\n training_outputs = []\n testing_outputs = []\n\n def set_stock(self, stock):\n self.stock = stock\n\n def set_input(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.close_prices.append(row[5])\n self.high_prices.append(row[3])\n self.prev_prices.append(row[2])\n self.sentiments.append(row[7])\n self.close_prices = self.close_prices[1:-1]\n self.high_prices = self.high_prices[1:-1]\n self.prev_prices = self.prev_prices[1:-1]\n self.sentiments = self.sentiments[1:-1]\n for m in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n self.close_prices[m] = float(self.close_prices[m])\n for n in range(len(self.high_prices)):\n if self.high_prices[n] != 'High':\n self.high_prices[n] = float(self.high_prices[n])\n for pp in range(len(self.prev_prices)):\n if self.prev_prices[pp] != 'Open':\n self.prev_prices[pp] = float(self.prev_prices[pp])\n for p in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n if self.close_prices[p] > self.max_close:\n self.max_close = self.close_prices[p]\n if self.close_prices[p] < self.min_close:\n self.min_close = self.close_prices[p]\n for q in range(len(self.high_prices)):\n if self.high_prices[q] > self.max_high:\n self.max_high = self.high_prices[q]\n if self.high_prices[q] < self.min_high:\n self.min_high = self.high_prices[q]\n for s in range(len(self.prev_prices)):\n if self.prev_prices[s] > self.max_prev:\n self.max_prev = self.prev_prices[s]\n if self.prev_prices[s] < self.min_prev:\n self.min_prev = self.prev_prices[s]\n for s in range(len(self.sentiments)):\n self.sentiments[s] = float(self.sentiments[s])\n if self.max_sent > self.max_sent:\n self.max_sent = self.sentiments[s]\n if self.sentiments[s] < self.min_sent:\n self.min_sent = self.sentiments[s]\n\n def set_normalized_input(self):\n if self.max_prev == 0:\n self.set_input()\n for i1 in range(len(self.close_prices)):\n self.normalized_close.append((self.close_prices[i1] - self.\n min_close) / (self.max_close - self.min_close))\n for i2 in range(len(self.high_prices)):\n self.normalized_high.append((self.high_prices[i2] - self.\n min_high) / (self.max_high - self.min_high))\n for i4 in range(len(self.prev_prices)):\n self.normalized_prev.append((self.prev_prices[i4] - self.\n min_prev) / (self.max_prev - self.min_prev))\n for i5 in range(len(self.sentiments)):\n diff = self.max_sent - self.min_sent\n if diff == 0:\n self.normalized_sent.append(0)\n else:\n self.normalized_sent.append((self.sentiments[i5] - self.\n min_sent) / (self.max_sent - self.min_sent))\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n\n def get_nomralized_input(self):\n return list(zip(self.normalized_close, self.normalized_high, self.\n normalized_prev, self.sentiments))\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n\n def get_output(self):\n return self.open_prices\n\n def get_normalized_output(self):\n return self.normalized_open\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n\n def set_training_input(self):\n for i in range(len(self.normalized_close)):\n temp_list = [self.normalized_close[i], self.normalized_high[i],\n self.normalized_prev[i], self.normalized_sent[i]]\n self.inputs.append(temp_list)\n train_end = int(0.7 * len(self.inputs))\n self.training_inputs = self.inputs[0:train_end]\n\n def get_testing_input(self):\n self.set_testing_input()\n return self.testing_inputs\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n\n def set_testing_input(self):\n train_end = int(0.7 * len(self.inputs))\n self.testing_inputs = self.inputs[train_end:]\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n\n def get_testing_output(self):\n self.set_testing_output()\n return self.testing_outputs\n\n def set_testing_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.testing_outputs = self.normalized_open[train_end:]\n\n def clear_lists(self):\n self.close_prices.clear()\n self.high_prices.clear()\n self.prev_prices.clear()\n self.normalized_close.clear()\n self.normalized_high.clear()\n self.normalized_prev.clear()\n self.open_prices.clear()\n self.normalized_open.clear()\n self.inputs.clear()\n self.training_inputs.clear()\n self.testing_inputs.clear()\n self.training_outputs.clear()\n self.testing_outputs.clear()\n self.sentiments.clear()\n self.normalized_sent = []\n self.max_sent = 0.0\n self.min_sent = 0.0\n self.min_close = 1000\n self.max_close = 0\n self.min_high = 1000\n self.max_high = 0\n self.min_prev = 1000\n self.max_prev = 0\n self.min_open = 1000\n self.max_open = 0\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n stock = ''\n close_prices = []\n high_prices = []\n prev_prices = []\n sentiments = []\n max_sent = 0.0\n min_sent = 0.0\n min_close = 1000\n max_close = 0\n min_high = 1000\n max_high = 0\n min_prev = 1000\n max_prev = 0\n normalized_close = []\n normalized_high = []\n normalized_prev = []\n normalized_sent = []\n open_prices = []\n min_open = 1000\n max_open = 0\n normalized_open = []\n inputs = []\n training_inputs = []\n testing_inputs = []\n training_outputs = []\n testing_outputs = []\n\n def set_stock(self, stock):\n self.stock = stock\n\n def set_input(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.close_prices.append(row[5])\n self.high_prices.append(row[3])\n self.prev_prices.append(row[2])\n self.sentiments.append(row[7])\n self.close_prices = self.close_prices[1:-1]\n self.high_prices = self.high_prices[1:-1]\n self.prev_prices = self.prev_prices[1:-1]\n self.sentiments = self.sentiments[1:-1]\n for m in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n self.close_prices[m] = float(self.close_prices[m])\n for n in range(len(self.high_prices)):\n if self.high_prices[n] != 'High':\n self.high_prices[n] = float(self.high_prices[n])\n for pp in range(len(self.prev_prices)):\n if self.prev_prices[pp] != 'Open':\n self.prev_prices[pp] = float(self.prev_prices[pp])\n for p in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n if self.close_prices[p] > self.max_close:\n self.max_close = self.close_prices[p]\n if self.close_prices[p] < self.min_close:\n self.min_close = self.close_prices[p]\n for q in range(len(self.high_prices)):\n if self.high_prices[q] > self.max_high:\n self.max_high = self.high_prices[q]\n if self.high_prices[q] < self.min_high:\n self.min_high = self.high_prices[q]\n for s in range(len(self.prev_prices)):\n if self.prev_prices[s] > self.max_prev:\n self.max_prev = self.prev_prices[s]\n if self.prev_prices[s] < self.min_prev:\n self.min_prev = self.prev_prices[s]\n for s in range(len(self.sentiments)):\n self.sentiments[s] = float(self.sentiments[s])\n if self.max_sent > self.max_sent:\n self.max_sent = self.sentiments[s]\n if self.sentiments[s] < self.min_sent:\n self.min_sent = self.sentiments[s]\n\n def set_normalized_input(self):\n if self.max_prev == 0:\n self.set_input()\n for i1 in range(len(self.close_prices)):\n self.normalized_close.append((self.close_prices[i1] - self.\n min_close) / (self.max_close - self.min_close))\n for i2 in range(len(self.high_prices)):\n self.normalized_high.append((self.high_prices[i2] - self.\n min_high) / (self.max_high - self.min_high))\n for i4 in range(len(self.prev_prices)):\n self.normalized_prev.append((self.prev_prices[i4] - self.\n min_prev) / (self.max_prev - self.min_prev))\n for i5 in range(len(self.sentiments)):\n diff = self.max_sent - self.min_sent\n if diff == 0:\n self.normalized_sent.append(0)\n else:\n self.normalized_sent.append((self.sentiments[i5] - self.\n min_sent) / (self.max_sent - self.min_sent))\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n\n def get_nomralized_input(self):\n return list(zip(self.normalized_close, self.normalized_high, self.\n normalized_prev, self.sentiments))\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n\n def get_output(self):\n return self.open_prices\n\n def get_normalized_output(self):\n return self.normalized_open\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n\n def set_training_input(self):\n for i in range(len(self.normalized_close)):\n temp_list = [self.normalized_close[i], self.normalized_high[i],\n self.normalized_prev[i], self.normalized_sent[i]]\n self.inputs.append(temp_list)\n train_end = int(0.7 * len(self.inputs))\n self.training_inputs = self.inputs[0:train_end]\n\n def get_testing_input(self):\n self.set_testing_input()\n return self.testing_inputs\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n\n def set_testing_input(self):\n train_end = int(0.7 * len(self.inputs))\n self.testing_inputs = self.inputs[train_end:]\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n\n def get_testing_output(self):\n self.set_testing_output()\n return self.testing_outputs\n\n def set_testing_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.testing_outputs = self.normalized_open[train_end:]\n\n def clear_lists(self):\n self.close_prices.clear()\n self.high_prices.clear()\n self.prev_prices.clear()\n self.normalized_close.clear()\n self.normalized_high.clear()\n self.normalized_prev.clear()\n self.open_prices.clear()\n self.normalized_open.clear()\n self.inputs.clear()\n self.training_inputs.clear()\n self.testing_inputs.clear()\n self.training_outputs.clear()\n self.testing_outputs.clear()\n self.sentiments.clear()\n self.normalized_sent = []\n self.max_sent = 0.0\n self.min_sent = 0.0\n self.min_close = 1000\n self.max_close = 0\n self.min_high = 1000\n self.max_high = 0\n self.min_prev = 1000\n self.max_prev = 0\n self.min_open = 1000\n self.max_open = 0\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def set_stock(self, stock):\n self.stock = stock\n\n def set_input(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.close_prices.append(row[5])\n self.high_prices.append(row[3])\n self.prev_prices.append(row[2])\n self.sentiments.append(row[7])\n self.close_prices = self.close_prices[1:-1]\n self.high_prices = self.high_prices[1:-1]\n self.prev_prices = self.prev_prices[1:-1]\n self.sentiments = self.sentiments[1:-1]\n for m in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n self.close_prices[m] = float(self.close_prices[m])\n for n in range(len(self.high_prices)):\n if self.high_prices[n] != 'High':\n self.high_prices[n] = float(self.high_prices[n])\n for pp in range(len(self.prev_prices)):\n if self.prev_prices[pp] != 'Open':\n self.prev_prices[pp] = float(self.prev_prices[pp])\n for p in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n if self.close_prices[p] > self.max_close:\n self.max_close = self.close_prices[p]\n if self.close_prices[p] < self.min_close:\n self.min_close = self.close_prices[p]\n for q in range(len(self.high_prices)):\n if self.high_prices[q] > self.max_high:\n self.max_high = self.high_prices[q]\n if self.high_prices[q] < self.min_high:\n self.min_high = self.high_prices[q]\n for s in range(len(self.prev_prices)):\n if self.prev_prices[s] > self.max_prev:\n self.max_prev = self.prev_prices[s]\n if self.prev_prices[s] < self.min_prev:\n self.min_prev = self.prev_prices[s]\n for s in range(len(self.sentiments)):\n self.sentiments[s] = float(self.sentiments[s])\n if self.max_sent > self.max_sent:\n self.max_sent = self.sentiments[s]\n if self.sentiments[s] < self.min_sent:\n self.min_sent = self.sentiments[s]\n\n def set_normalized_input(self):\n if self.max_prev == 0:\n self.set_input()\n for i1 in range(len(self.close_prices)):\n self.normalized_close.append((self.close_prices[i1] - self.\n min_close) / (self.max_close - self.min_close))\n for i2 in range(len(self.high_prices)):\n self.normalized_high.append((self.high_prices[i2] - self.\n min_high) / (self.max_high - self.min_high))\n for i4 in range(len(self.prev_prices)):\n self.normalized_prev.append((self.prev_prices[i4] - self.\n min_prev) / (self.max_prev - self.min_prev))\n for i5 in range(len(self.sentiments)):\n diff = self.max_sent - self.min_sent\n if diff == 0:\n self.normalized_sent.append(0)\n else:\n self.normalized_sent.append((self.sentiments[i5] - self.\n min_sent) / (self.max_sent - self.min_sent))\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n\n def get_nomralized_input(self):\n return list(zip(self.normalized_close, self.normalized_high, self.\n normalized_prev, self.sentiments))\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n\n def get_output(self):\n return self.open_prices\n\n def get_normalized_output(self):\n return self.normalized_open\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n\n def set_training_input(self):\n for i in range(len(self.normalized_close)):\n temp_list = [self.normalized_close[i], self.normalized_high[i],\n self.normalized_prev[i], self.normalized_sent[i]]\n self.inputs.append(temp_list)\n train_end = int(0.7 * len(self.inputs))\n self.training_inputs = self.inputs[0:train_end]\n\n def get_testing_input(self):\n self.set_testing_input()\n return self.testing_inputs\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n\n def set_testing_input(self):\n train_end = int(0.7 * len(self.inputs))\n self.testing_inputs = self.inputs[train_end:]\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n\n def get_testing_output(self):\n self.set_testing_output()\n return self.testing_outputs\n\n def set_testing_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.testing_outputs = self.normalized_open[train_end:]\n\n def clear_lists(self):\n self.close_prices.clear()\n self.high_prices.clear()\n self.prev_prices.clear()\n self.normalized_close.clear()\n self.normalized_high.clear()\n self.normalized_prev.clear()\n self.open_prices.clear()\n self.normalized_open.clear()\n self.inputs.clear()\n self.training_inputs.clear()\n self.testing_inputs.clear()\n self.training_outputs.clear()\n self.testing_outputs.clear()\n self.sentiments.clear()\n self.normalized_sent = []\n self.max_sent = 0.0\n self.min_sent = 0.0\n self.min_close = 1000\n self.max_close = 0\n self.min_high = 1000\n self.max_high = 0\n self.min_prev = 1000\n self.max_prev = 0\n self.min_open = 1000\n self.max_open = 0\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def set_stock(self, stock):\n self.stock = stock\n\n def set_input(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.close_prices.append(row[5])\n self.high_prices.append(row[3])\n self.prev_prices.append(row[2])\n self.sentiments.append(row[7])\n self.close_prices = self.close_prices[1:-1]\n self.high_prices = self.high_prices[1:-1]\n self.prev_prices = self.prev_prices[1:-1]\n self.sentiments = self.sentiments[1:-1]\n for m in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n self.close_prices[m] = float(self.close_prices[m])\n for n in range(len(self.high_prices)):\n if self.high_prices[n] != 'High':\n self.high_prices[n] = float(self.high_prices[n])\n for pp in range(len(self.prev_prices)):\n if self.prev_prices[pp] != 'Open':\n self.prev_prices[pp] = float(self.prev_prices[pp])\n for p in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n if self.close_prices[p] > self.max_close:\n self.max_close = self.close_prices[p]\n if self.close_prices[p] < self.min_close:\n self.min_close = self.close_prices[p]\n for q in range(len(self.high_prices)):\n if self.high_prices[q] > self.max_high:\n self.max_high = self.high_prices[q]\n if self.high_prices[q] < self.min_high:\n self.min_high = self.high_prices[q]\n for s in range(len(self.prev_prices)):\n if self.prev_prices[s] > self.max_prev:\n self.max_prev = self.prev_prices[s]\n if self.prev_prices[s] < self.min_prev:\n self.min_prev = self.prev_prices[s]\n for s in range(len(self.sentiments)):\n self.sentiments[s] = float(self.sentiments[s])\n if self.max_sent > self.max_sent:\n self.max_sent = self.sentiments[s]\n if self.sentiments[s] < self.min_sent:\n self.min_sent = self.sentiments[s]\n\n def set_normalized_input(self):\n if self.max_prev == 0:\n self.set_input()\n for i1 in range(len(self.close_prices)):\n self.normalized_close.append((self.close_prices[i1] - self.\n min_close) / (self.max_close - self.min_close))\n for i2 in range(len(self.high_prices)):\n self.normalized_high.append((self.high_prices[i2] - self.\n min_high) / (self.max_high - self.min_high))\n for i4 in range(len(self.prev_prices)):\n self.normalized_prev.append((self.prev_prices[i4] - self.\n min_prev) / (self.max_prev - self.min_prev))\n for i5 in range(len(self.sentiments)):\n diff = self.max_sent - self.min_sent\n if diff == 0:\n self.normalized_sent.append(0)\n else:\n self.normalized_sent.append((self.sentiments[i5] - self.\n min_sent) / (self.max_sent - self.min_sent))\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n\n def get_nomralized_input(self):\n return list(zip(self.normalized_close, self.normalized_high, self.\n normalized_prev, self.sentiments))\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n\n def get_output(self):\n return self.open_prices\n\n def get_normalized_output(self):\n return self.normalized_open\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n\n def set_training_input(self):\n for i in range(len(self.normalized_close)):\n temp_list = [self.normalized_close[i], self.normalized_high[i],\n self.normalized_prev[i], self.normalized_sent[i]]\n self.inputs.append(temp_list)\n train_end = int(0.7 * len(self.inputs))\n self.training_inputs = self.inputs[0:train_end]\n\n def get_testing_input(self):\n self.set_testing_input()\n return self.testing_inputs\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n\n def set_testing_input(self):\n train_end = int(0.7 * len(self.inputs))\n self.testing_inputs = self.inputs[train_end:]\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n\n def get_testing_output(self):\n self.set_testing_output()\n return self.testing_outputs\n <function token>\n\n def clear_lists(self):\n self.close_prices.clear()\n self.high_prices.clear()\n self.prev_prices.clear()\n self.normalized_close.clear()\n self.normalized_high.clear()\n self.normalized_prev.clear()\n self.open_prices.clear()\n self.normalized_open.clear()\n self.inputs.clear()\n self.training_inputs.clear()\n self.testing_inputs.clear()\n self.training_outputs.clear()\n self.testing_outputs.clear()\n self.sentiments.clear()\n self.normalized_sent = []\n self.max_sent = 0.0\n self.min_sent = 0.0\n self.min_close = 1000\n self.max_close = 0\n self.min_high = 1000\n self.max_high = 0\n self.min_prev = 1000\n self.max_prev = 0\n self.min_open = 1000\n self.max_open = 0\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def set_stock(self, stock):\n self.stock = stock\n\n def set_input(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.close_prices.append(row[5])\n self.high_prices.append(row[3])\n self.prev_prices.append(row[2])\n self.sentiments.append(row[7])\n self.close_prices = self.close_prices[1:-1]\n self.high_prices = self.high_prices[1:-1]\n self.prev_prices = self.prev_prices[1:-1]\n self.sentiments = self.sentiments[1:-1]\n for m in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n self.close_prices[m] = float(self.close_prices[m])\n for n in range(len(self.high_prices)):\n if self.high_prices[n] != 'High':\n self.high_prices[n] = float(self.high_prices[n])\n for pp in range(len(self.prev_prices)):\n if self.prev_prices[pp] != 'Open':\n self.prev_prices[pp] = float(self.prev_prices[pp])\n for p in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n if self.close_prices[p] > self.max_close:\n self.max_close = self.close_prices[p]\n if self.close_prices[p] < self.min_close:\n self.min_close = self.close_prices[p]\n for q in range(len(self.high_prices)):\n if self.high_prices[q] > self.max_high:\n self.max_high = self.high_prices[q]\n if self.high_prices[q] < self.min_high:\n self.min_high = self.high_prices[q]\n for s in range(len(self.prev_prices)):\n if self.prev_prices[s] > self.max_prev:\n self.max_prev = self.prev_prices[s]\n if self.prev_prices[s] < self.min_prev:\n self.min_prev = self.prev_prices[s]\n for s in range(len(self.sentiments)):\n self.sentiments[s] = float(self.sentiments[s])\n if self.max_sent > self.max_sent:\n self.max_sent = self.sentiments[s]\n if self.sentiments[s] < self.min_sent:\n self.min_sent = self.sentiments[s]\n\n def set_normalized_input(self):\n if self.max_prev == 0:\n self.set_input()\n for i1 in range(len(self.close_prices)):\n self.normalized_close.append((self.close_prices[i1] - self.\n min_close) / (self.max_close - self.min_close))\n for i2 in range(len(self.high_prices)):\n self.normalized_high.append((self.high_prices[i2] - self.\n min_high) / (self.max_high - self.min_high))\n for i4 in range(len(self.prev_prices)):\n self.normalized_prev.append((self.prev_prices[i4] - self.\n min_prev) / (self.max_prev - self.min_prev))\n for i5 in range(len(self.sentiments)):\n diff = self.max_sent - self.min_sent\n if diff == 0:\n self.normalized_sent.append(0)\n else:\n self.normalized_sent.append((self.sentiments[i5] - self.\n min_sent) / (self.max_sent - self.min_sent))\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n\n def get_nomralized_input(self):\n return list(zip(self.normalized_close, self.normalized_high, self.\n normalized_prev, self.sentiments))\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n\n def get_normalized_output(self):\n return self.normalized_open\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n\n def set_training_input(self):\n for i in range(len(self.normalized_close)):\n temp_list = [self.normalized_close[i], self.normalized_high[i],\n self.normalized_prev[i], self.normalized_sent[i]]\n self.inputs.append(temp_list)\n train_end = int(0.7 * len(self.inputs))\n self.training_inputs = self.inputs[0:train_end]\n\n def get_testing_input(self):\n self.set_testing_input()\n return self.testing_inputs\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n\n def set_testing_input(self):\n train_end = int(0.7 * len(self.inputs))\n self.testing_inputs = self.inputs[train_end:]\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n\n def get_testing_output(self):\n self.set_testing_output()\n return self.testing_outputs\n <function token>\n\n def clear_lists(self):\n self.close_prices.clear()\n self.high_prices.clear()\n self.prev_prices.clear()\n self.normalized_close.clear()\n self.normalized_high.clear()\n self.normalized_prev.clear()\n self.open_prices.clear()\n self.normalized_open.clear()\n self.inputs.clear()\n self.training_inputs.clear()\n self.testing_inputs.clear()\n self.training_outputs.clear()\n self.testing_outputs.clear()\n self.sentiments.clear()\n self.normalized_sent = []\n self.max_sent = 0.0\n self.min_sent = 0.0\n self.min_close = 1000\n self.max_close = 0\n self.min_high = 1000\n self.max_high = 0\n self.min_prev = 1000\n self.max_prev = 0\n self.min_open = 1000\n self.max_open = 0\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def set_stock(self, stock):\n self.stock = stock\n\n def set_input(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.close_prices.append(row[5])\n self.high_prices.append(row[3])\n self.prev_prices.append(row[2])\n self.sentiments.append(row[7])\n self.close_prices = self.close_prices[1:-1]\n self.high_prices = self.high_prices[1:-1]\n self.prev_prices = self.prev_prices[1:-1]\n self.sentiments = self.sentiments[1:-1]\n for m in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n self.close_prices[m] = float(self.close_prices[m])\n for n in range(len(self.high_prices)):\n if self.high_prices[n] != 'High':\n self.high_prices[n] = float(self.high_prices[n])\n for pp in range(len(self.prev_prices)):\n if self.prev_prices[pp] != 'Open':\n self.prev_prices[pp] = float(self.prev_prices[pp])\n for p in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n if self.close_prices[p] > self.max_close:\n self.max_close = self.close_prices[p]\n if self.close_prices[p] < self.min_close:\n self.min_close = self.close_prices[p]\n for q in range(len(self.high_prices)):\n if self.high_prices[q] > self.max_high:\n self.max_high = self.high_prices[q]\n if self.high_prices[q] < self.min_high:\n self.min_high = self.high_prices[q]\n for s in range(len(self.prev_prices)):\n if self.prev_prices[s] > self.max_prev:\n self.max_prev = self.prev_prices[s]\n if self.prev_prices[s] < self.min_prev:\n self.min_prev = self.prev_prices[s]\n for s in range(len(self.sentiments)):\n self.sentiments[s] = float(self.sentiments[s])\n if self.max_sent > self.max_sent:\n self.max_sent = self.sentiments[s]\n if self.sentiments[s] < self.min_sent:\n self.min_sent = self.sentiments[s]\n\n def set_normalized_input(self):\n if self.max_prev == 0:\n self.set_input()\n for i1 in range(len(self.close_prices)):\n self.normalized_close.append((self.close_prices[i1] - self.\n min_close) / (self.max_close - self.min_close))\n for i2 in range(len(self.high_prices)):\n self.normalized_high.append((self.high_prices[i2] - self.\n min_high) / (self.max_high - self.min_high))\n for i4 in range(len(self.prev_prices)):\n self.normalized_prev.append((self.prev_prices[i4] - self.\n min_prev) / (self.max_prev - self.min_prev))\n for i5 in range(len(self.sentiments)):\n diff = self.max_sent - self.min_sent\n if diff == 0:\n self.normalized_sent.append(0)\n else:\n self.normalized_sent.append((self.sentiments[i5] - self.\n min_sent) / (self.max_sent - self.min_sent))\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n\n def get_nomralized_input(self):\n return list(zip(self.normalized_close, self.normalized_high, self.\n normalized_prev, self.sentiments))\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n\n def get_normalized_output(self):\n return self.normalized_open\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n\n def set_training_input(self):\n for i in range(len(self.normalized_close)):\n temp_list = [self.normalized_close[i], self.normalized_high[i],\n self.normalized_prev[i], self.normalized_sent[i]]\n self.inputs.append(temp_list)\n train_end = int(0.7 * len(self.inputs))\n self.training_inputs = self.inputs[0:train_end]\n <function token>\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n\n def set_testing_input(self):\n train_end = int(0.7 * len(self.inputs))\n self.testing_inputs = self.inputs[train_end:]\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n\n def get_testing_output(self):\n self.set_testing_output()\n return self.testing_outputs\n <function token>\n\n def clear_lists(self):\n self.close_prices.clear()\n self.high_prices.clear()\n self.prev_prices.clear()\n self.normalized_close.clear()\n self.normalized_high.clear()\n self.normalized_prev.clear()\n self.open_prices.clear()\n self.normalized_open.clear()\n self.inputs.clear()\n self.training_inputs.clear()\n self.testing_inputs.clear()\n self.training_outputs.clear()\n self.testing_outputs.clear()\n self.sentiments.clear()\n self.normalized_sent = []\n self.max_sent = 0.0\n self.min_sent = 0.0\n self.min_close = 1000\n self.max_close = 0\n self.min_high = 1000\n self.max_high = 0\n self.min_prev = 1000\n self.max_prev = 0\n self.min_open = 1000\n self.max_open = 0\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def set_stock(self, stock):\n self.stock = stock\n\n def set_input(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.close_prices.append(row[5])\n self.high_prices.append(row[3])\n self.prev_prices.append(row[2])\n self.sentiments.append(row[7])\n self.close_prices = self.close_prices[1:-1]\n self.high_prices = self.high_prices[1:-1]\n self.prev_prices = self.prev_prices[1:-1]\n self.sentiments = self.sentiments[1:-1]\n for m in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n self.close_prices[m] = float(self.close_prices[m])\n for n in range(len(self.high_prices)):\n if self.high_prices[n] != 'High':\n self.high_prices[n] = float(self.high_prices[n])\n for pp in range(len(self.prev_prices)):\n if self.prev_prices[pp] != 'Open':\n self.prev_prices[pp] = float(self.prev_prices[pp])\n for p in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n if self.close_prices[p] > self.max_close:\n self.max_close = self.close_prices[p]\n if self.close_prices[p] < self.min_close:\n self.min_close = self.close_prices[p]\n for q in range(len(self.high_prices)):\n if self.high_prices[q] > self.max_high:\n self.max_high = self.high_prices[q]\n if self.high_prices[q] < self.min_high:\n self.min_high = self.high_prices[q]\n for s in range(len(self.prev_prices)):\n if self.prev_prices[s] > self.max_prev:\n self.max_prev = self.prev_prices[s]\n if self.prev_prices[s] < self.min_prev:\n self.min_prev = self.prev_prices[s]\n for s in range(len(self.sentiments)):\n self.sentiments[s] = float(self.sentiments[s])\n if self.max_sent > self.max_sent:\n self.max_sent = self.sentiments[s]\n if self.sentiments[s] < self.min_sent:\n self.min_sent = self.sentiments[s]\n\n def set_normalized_input(self):\n if self.max_prev == 0:\n self.set_input()\n for i1 in range(len(self.close_prices)):\n self.normalized_close.append((self.close_prices[i1] - self.\n min_close) / (self.max_close - self.min_close))\n for i2 in range(len(self.high_prices)):\n self.normalized_high.append((self.high_prices[i2] - self.\n min_high) / (self.max_high - self.min_high))\n for i4 in range(len(self.prev_prices)):\n self.normalized_prev.append((self.prev_prices[i4] - self.\n min_prev) / (self.max_prev - self.min_prev))\n for i5 in range(len(self.sentiments)):\n diff = self.max_sent - self.min_sent\n if diff == 0:\n self.normalized_sent.append(0)\n else:\n self.normalized_sent.append((self.sentiments[i5] - self.\n min_sent) / (self.max_sent - self.min_sent))\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n\n def get_nomralized_input(self):\n return list(zip(self.normalized_close, self.normalized_high, self.\n normalized_prev, self.sentiments))\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n\n def get_normalized_output(self):\n return self.normalized_open\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n\n def set_training_input(self):\n for i in range(len(self.normalized_close)):\n temp_list = [self.normalized_close[i], self.normalized_high[i],\n self.normalized_prev[i], self.normalized_sent[i]]\n self.inputs.append(temp_list)\n train_end = int(0.7 * len(self.inputs))\n self.training_inputs = self.inputs[0:train_end]\n <function token>\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n\n def set_testing_input(self):\n train_end = int(0.7 * len(self.inputs))\n self.testing_inputs = self.inputs[train_end:]\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n <function token>\n <function token>\n\n def clear_lists(self):\n self.close_prices.clear()\n self.high_prices.clear()\n self.prev_prices.clear()\n self.normalized_close.clear()\n self.normalized_high.clear()\n self.normalized_prev.clear()\n self.open_prices.clear()\n self.normalized_open.clear()\n self.inputs.clear()\n self.training_inputs.clear()\n self.testing_inputs.clear()\n self.training_outputs.clear()\n self.testing_outputs.clear()\n self.sentiments.clear()\n self.normalized_sent = []\n self.max_sent = 0.0\n self.min_sent = 0.0\n self.min_close = 1000\n self.max_close = 0\n self.min_high = 1000\n self.max_high = 0\n self.min_prev = 1000\n self.max_prev = 0\n self.min_open = 1000\n self.max_open = 0\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def set_stock(self, stock):\n self.stock = stock\n\n def set_input(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.close_prices.append(row[5])\n self.high_prices.append(row[3])\n self.prev_prices.append(row[2])\n self.sentiments.append(row[7])\n self.close_prices = self.close_prices[1:-1]\n self.high_prices = self.high_prices[1:-1]\n self.prev_prices = self.prev_prices[1:-1]\n self.sentiments = self.sentiments[1:-1]\n for m in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n self.close_prices[m] = float(self.close_prices[m])\n for n in range(len(self.high_prices)):\n if self.high_prices[n] != 'High':\n self.high_prices[n] = float(self.high_prices[n])\n for pp in range(len(self.prev_prices)):\n if self.prev_prices[pp] != 'Open':\n self.prev_prices[pp] = float(self.prev_prices[pp])\n for p in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n if self.close_prices[p] > self.max_close:\n self.max_close = self.close_prices[p]\n if self.close_prices[p] < self.min_close:\n self.min_close = self.close_prices[p]\n for q in range(len(self.high_prices)):\n if self.high_prices[q] > self.max_high:\n self.max_high = self.high_prices[q]\n if self.high_prices[q] < self.min_high:\n self.min_high = self.high_prices[q]\n for s in range(len(self.prev_prices)):\n if self.prev_prices[s] > self.max_prev:\n self.max_prev = self.prev_prices[s]\n if self.prev_prices[s] < self.min_prev:\n self.min_prev = self.prev_prices[s]\n for s in range(len(self.sentiments)):\n self.sentiments[s] = float(self.sentiments[s])\n if self.max_sent > self.max_sent:\n self.max_sent = self.sentiments[s]\n if self.sentiments[s] < self.min_sent:\n self.min_sent = self.sentiments[s]\n\n def set_normalized_input(self):\n if self.max_prev == 0:\n self.set_input()\n for i1 in range(len(self.close_prices)):\n self.normalized_close.append((self.close_prices[i1] - self.\n min_close) / (self.max_close - self.min_close))\n for i2 in range(len(self.high_prices)):\n self.normalized_high.append((self.high_prices[i2] - self.\n min_high) / (self.max_high - self.min_high))\n for i4 in range(len(self.prev_prices)):\n self.normalized_prev.append((self.prev_prices[i4] - self.\n min_prev) / (self.max_prev - self.min_prev))\n for i5 in range(len(self.sentiments)):\n diff = self.max_sent - self.min_sent\n if diff == 0:\n self.normalized_sent.append(0)\n else:\n self.normalized_sent.append((self.sentiments[i5] - self.\n min_sent) / (self.max_sent - self.min_sent))\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n\n def get_nomralized_input(self):\n return list(zip(self.normalized_close, self.normalized_high, self.\n normalized_prev, self.sentiments))\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n\n def get_normalized_output(self):\n return self.normalized_open\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n <function token>\n <function token>\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n\n def set_testing_input(self):\n train_end = int(0.7 * len(self.inputs))\n self.testing_inputs = self.inputs[train_end:]\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n <function token>\n <function token>\n\n def clear_lists(self):\n self.close_prices.clear()\n self.high_prices.clear()\n self.prev_prices.clear()\n self.normalized_close.clear()\n self.normalized_high.clear()\n self.normalized_prev.clear()\n self.open_prices.clear()\n self.normalized_open.clear()\n self.inputs.clear()\n self.training_inputs.clear()\n self.testing_inputs.clear()\n self.training_outputs.clear()\n self.testing_outputs.clear()\n self.sentiments.clear()\n self.normalized_sent = []\n self.max_sent = 0.0\n self.min_sent = 0.0\n self.min_close = 1000\n self.max_close = 0\n self.min_high = 1000\n self.max_high = 0\n self.min_prev = 1000\n self.max_prev = 0\n self.min_open = 1000\n self.max_open = 0\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def set_stock(self, stock):\n self.stock = stock\n\n def set_input(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.close_prices.append(row[5])\n self.high_prices.append(row[3])\n self.prev_prices.append(row[2])\n self.sentiments.append(row[7])\n self.close_prices = self.close_prices[1:-1]\n self.high_prices = self.high_prices[1:-1]\n self.prev_prices = self.prev_prices[1:-1]\n self.sentiments = self.sentiments[1:-1]\n for m in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n self.close_prices[m] = float(self.close_prices[m])\n for n in range(len(self.high_prices)):\n if self.high_prices[n] != 'High':\n self.high_prices[n] = float(self.high_prices[n])\n for pp in range(len(self.prev_prices)):\n if self.prev_prices[pp] != 'Open':\n self.prev_prices[pp] = float(self.prev_prices[pp])\n for p in range(len(self.close_prices)):\n if self.close_prices[m] != 'Close':\n if self.close_prices[p] > self.max_close:\n self.max_close = self.close_prices[p]\n if self.close_prices[p] < self.min_close:\n self.min_close = self.close_prices[p]\n for q in range(len(self.high_prices)):\n if self.high_prices[q] > self.max_high:\n self.max_high = self.high_prices[q]\n if self.high_prices[q] < self.min_high:\n self.min_high = self.high_prices[q]\n for s in range(len(self.prev_prices)):\n if self.prev_prices[s] > self.max_prev:\n self.max_prev = self.prev_prices[s]\n if self.prev_prices[s] < self.min_prev:\n self.min_prev = self.prev_prices[s]\n for s in range(len(self.sentiments)):\n self.sentiments[s] = float(self.sentiments[s])\n if self.max_sent > self.max_sent:\n self.max_sent = self.sentiments[s]\n if self.sentiments[s] < self.min_sent:\n self.min_sent = self.sentiments[s]\n\n def set_normalized_input(self):\n if self.max_prev == 0:\n self.set_input()\n for i1 in range(len(self.close_prices)):\n self.normalized_close.append((self.close_prices[i1] - self.\n min_close) / (self.max_close - self.min_close))\n for i2 in range(len(self.high_prices)):\n self.normalized_high.append((self.high_prices[i2] - self.\n min_high) / (self.max_high - self.min_high))\n for i4 in range(len(self.prev_prices)):\n self.normalized_prev.append((self.prev_prices[i4] - self.\n min_prev) / (self.max_prev - self.min_prev))\n for i5 in range(len(self.sentiments)):\n diff = self.max_sent - self.min_sent\n if diff == 0:\n self.normalized_sent.append(0)\n else:\n self.normalized_sent.append((self.sentiments[i5] - self.\n min_sent) / (self.max_sent - self.min_sent))\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n <function token>\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n\n def get_normalized_output(self):\n return self.normalized_open\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n <function token>\n <function token>\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n\n def set_testing_input(self):\n train_end = int(0.7 * len(self.inputs))\n self.testing_inputs = self.inputs[train_end:]\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n <function token>\n <function token>\n\n def clear_lists(self):\n self.close_prices.clear()\n self.high_prices.clear()\n self.prev_prices.clear()\n self.normalized_close.clear()\n self.normalized_high.clear()\n self.normalized_prev.clear()\n self.open_prices.clear()\n self.normalized_open.clear()\n self.inputs.clear()\n self.training_inputs.clear()\n self.testing_inputs.clear()\n self.training_outputs.clear()\n self.testing_outputs.clear()\n self.sentiments.clear()\n self.normalized_sent = []\n self.max_sent = 0.0\n self.min_sent = 0.0\n self.min_close = 1000\n self.max_close = 0\n self.min_high = 1000\n self.max_high = 0\n self.min_prev = 1000\n self.max_prev = 0\n self.min_open = 1000\n self.max_open = 0\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def set_stock(self, stock):\n self.stock = stock\n <function token>\n\n def set_normalized_input(self):\n if self.max_prev == 0:\n self.set_input()\n for i1 in range(len(self.close_prices)):\n self.normalized_close.append((self.close_prices[i1] - self.\n min_close) / (self.max_close - self.min_close))\n for i2 in range(len(self.high_prices)):\n self.normalized_high.append((self.high_prices[i2] - self.\n min_high) / (self.max_high - self.min_high))\n for i4 in range(len(self.prev_prices)):\n self.normalized_prev.append((self.prev_prices[i4] - self.\n min_prev) / (self.max_prev - self.min_prev))\n for i5 in range(len(self.sentiments)):\n diff = self.max_sent - self.min_sent\n if diff == 0:\n self.normalized_sent.append(0)\n else:\n self.normalized_sent.append((self.sentiments[i5] - self.\n min_sent) / (self.max_sent - self.min_sent))\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n <function token>\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n\n def get_normalized_output(self):\n return self.normalized_open\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n <function token>\n <function token>\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n\n def set_testing_input(self):\n train_end = int(0.7 * len(self.inputs))\n self.testing_inputs = self.inputs[train_end:]\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n <function token>\n <function token>\n\n def clear_lists(self):\n self.close_prices.clear()\n self.high_prices.clear()\n self.prev_prices.clear()\n self.normalized_close.clear()\n self.normalized_high.clear()\n self.normalized_prev.clear()\n self.open_prices.clear()\n self.normalized_open.clear()\n self.inputs.clear()\n self.training_inputs.clear()\n self.testing_inputs.clear()\n self.training_outputs.clear()\n self.testing_outputs.clear()\n self.sentiments.clear()\n self.normalized_sent = []\n self.max_sent = 0.0\n self.min_sent = 0.0\n self.min_close = 1000\n self.max_close = 0\n self.min_high = 1000\n self.max_high = 0\n self.min_prev = 1000\n self.max_prev = 0\n self.min_open = 1000\n self.max_open = 0\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def set_normalized_input(self):\n if self.max_prev == 0:\n self.set_input()\n for i1 in range(len(self.close_prices)):\n self.normalized_close.append((self.close_prices[i1] - self.\n min_close) / (self.max_close - self.min_close))\n for i2 in range(len(self.high_prices)):\n self.normalized_high.append((self.high_prices[i2] - self.\n min_high) / (self.max_high - self.min_high))\n for i4 in range(len(self.prev_prices)):\n self.normalized_prev.append((self.prev_prices[i4] - self.\n min_prev) / (self.max_prev - self.min_prev))\n for i5 in range(len(self.sentiments)):\n diff = self.max_sent - self.min_sent\n if diff == 0:\n self.normalized_sent.append(0)\n else:\n self.normalized_sent.append((self.sentiments[i5] - self.\n min_sent) / (self.max_sent - self.min_sent))\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n <function token>\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n\n def get_normalized_output(self):\n return self.normalized_open\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n <function token>\n <function token>\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n\n def set_testing_input(self):\n train_end = int(0.7 * len(self.inputs))\n self.testing_inputs = self.inputs[train_end:]\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n <function token>\n <function token>\n\n def clear_lists(self):\n self.close_prices.clear()\n self.high_prices.clear()\n self.prev_prices.clear()\n self.normalized_close.clear()\n self.normalized_high.clear()\n self.normalized_prev.clear()\n self.open_prices.clear()\n self.normalized_open.clear()\n self.inputs.clear()\n self.training_inputs.clear()\n self.testing_inputs.clear()\n self.training_outputs.clear()\n self.testing_outputs.clear()\n self.sentiments.clear()\n self.normalized_sent = []\n self.max_sent = 0.0\n self.min_sent = 0.0\n self.min_close = 1000\n self.max_close = 0\n self.min_high = 1000\n self.max_high = 0\n self.min_prev = 1000\n self.max_prev = 0\n self.min_open = 1000\n self.max_open = 0\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n <function token>\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n\n def get_normalized_output(self):\n return self.normalized_open\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n <function token>\n <function token>\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n\n def set_testing_input(self):\n train_end = int(0.7 * len(self.inputs))\n self.testing_inputs = self.inputs[train_end:]\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n <function token>\n <function token>\n\n def clear_lists(self):\n self.close_prices.clear()\n self.high_prices.clear()\n self.prev_prices.clear()\n self.normalized_close.clear()\n self.normalized_high.clear()\n self.normalized_prev.clear()\n self.open_prices.clear()\n self.normalized_open.clear()\n self.inputs.clear()\n self.training_inputs.clear()\n self.testing_inputs.clear()\n self.training_outputs.clear()\n self.testing_outputs.clear()\n self.sentiments.clear()\n self.normalized_sent = []\n self.max_sent = 0.0\n self.min_sent = 0.0\n self.min_close = 1000\n self.max_close = 0\n self.min_high = 1000\n self.max_high = 0\n self.min_prev = 1000\n self.max_prev = 0\n self.min_open = 1000\n self.max_open = 0\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n <function token>\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n <function token>\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n <function token>\n <function token>\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n\n def set_testing_input(self):\n train_end = int(0.7 * len(self.inputs))\n self.testing_inputs = self.inputs[train_end:]\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n <function token>\n <function token>\n\n def clear_lists(self):\n self.close_prices.clear()\n self.high_prices.clear()\n self.prev_prices.clear()\n self.normalized_close.clear()\n self.normalized_high.clear()\n self.normalized_prev.clear()\n self.open_prices.clear()\n self.normalized_open.clear()\n self.inputs.clear()\n self.training_inputs.clear()\n self.testing_inputs.clear()\n self.training_outputs.clear()\n self.testing_outputs.clear()\n self.sentiments.clear()\n self.normalized_sent = []\n self.max_sent = 0.0\n self.min_sent = 0.0\n self.min_close = 1000\n self.max_close = 0\n self.min_high = 1000\n self.max_high = 0\n self.min_prev = 1000\n self.max_prev = 0\n self.min_open = 1000\n self.max_open = 0\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n <function token>\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n <function token>\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n <function token>\n <function token>\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n <function token>\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n <function token>\n <function token>\n\n def clear_lists(self):\n self.close_prices.clear()\n self.high_prices.clear()\n self.prev_prices.clear()\n self.normalized_close.clear()\n self.normalized_high.clear()\n self.normalized_prev.clear()\n self.open_prices.clear()\n self.normalized_open.clear()\n self.inputs.clear()\n self.training_inputs.clear()\n self.testing_inputs.clear()\n self.training_outputs.clear()\n self.testing_outputs.clear()\n self.sentiments.clear()\n self.normalized_sent = []\n self.max_sent = 0.0\n self.min_sent = 0.0\n self.min_close = 1000\n self.max_close = 0\n self.min_high = 1000\n self.max_high = 0\n self.min_prev = 1000\n self.max_prev = 0\n self.min_open = 1000\n self.max_open = 0\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n <function token>\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n <function token>\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n <function token>\n <function token>\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n <function token>\n\n def set_training_output(self):\n train_end = int(0.7 * len(self.normalized_open))\n self.training_outputs = self.normalized_open[0:train_end]\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n <function token>\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n <function token>\n\n def inverse(self, normalized):\n return normalized * (self.max_open - self.min_open) + self.min_open\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n <function token>\n <function token>\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n <function token>\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n <function token>\n <function token>\n\n def get_training_input(self):\n self.set_training_input()\n return self.training_inputs\n <function token>\n <function token>\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def get_input(self):\n return list(zip(self.close_prices, self.high_prices, self.\n prev_prices, self.sentiments))\n <function token>\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_training_output(self):\n self.set_training_output()\n return self.training_outputs\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n\n def set_normalized_output(self):\n self.set_output()\n for i1 in range(len(self.open_prices)):\n self.normalized_open.append((self.open_prices[i1] - self.\n min_open) / (self.max_open - self.min_open))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def set_output(self):\n with open(self.stock + '.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n self.open_prices.append(row[2])\n self.open_prices = self.open_prices[2:]\n for m in range(len(self.open_prices)):\n self.open_prices[m] = float(self.open_prices[m])\n for i in range(len(self.open_prices)):\n if self.open_prices[i] > self.max_open:\n self.max_open = self.open_prices[i]\n if self.open_prices[i] < self.min_open:\n self.min_open = self.open_prices[i]\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\nclass CSV_Normalize:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<docstring token>\n<class token>\n"
] | false |
1,012 |
9a7994a1e51c9cf7fe7d8b50ab26fa3d789fc8e5
|
#
# tests/middleware/test_static.py
#
import pytest
import growler
from pathlib import Path
from unittest import mock
from sys import version_info
from growler.middleware.static import Static
@pytest.fixture
def static(tmpdir):
return Static(str(tmpdir))
def test_static_fixture(static, tmpdir):
assert isinstance(static, Static)
assert str(static.path) == str(tmpdir)
def test_construct_with_list(tmpdir):
s = Static(['/'] + str(tmpdir).split('/'))
assert str(s.path) == str(tmpdir)
def test_error_on_missing_dir():
err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError
with pytest.raises(err):
Static("/does/not/exist")
def test_static_construct_requires_directory(tmpdir):
name = "foo"
foo = tmpdir / name
foo.write('')
with pytest.raises(NotADirectoryError):
Static(str(foo))
def test_call(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
static(req, res)
res.set_type.assert_called_with('text/plain')
res.send_file.assert_called_with(file_path)
def test_call_invalid_path(static):
req, res = mock.Mock(), mock.Mock()
req.path = '/foo/../bar'
static(req, res)
assert not res.set_type.called
assert not res.send_file.called
assert not res.end.called
def test_call_with_etag(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
req.headers = {'IF-NONE-MATCH': etag}
static(req, res)
assert res.status_code == 304
assert not res.set_type.called
assert not res.send_file.called
|
[
"#\n# tests/middleware/test_static.py\n#\n\nimport pytest\nimport growler\nfrom pathlib import Path\nfrom unittest import mock\nfrom sys import version_info\nfrom growler.middleware.static import Static\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\ndef test_construct_with_list(tmpdir):\n s = Static(['/'] + str(tmpdir).split('/'))\n assert str(s.path) == str(tmpdir)\n\n\ndef test_error_on_missing_dir():\n err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError\n with pytest.raises(err):\n Static(\"/does/not/exist\")\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = \"foo\"\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n\n file_contents = b'This is some text in teh file'\n\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n\n file_path = Path(str(f))\n\n etag = static.calculate_etag(file_path)\n\n req.path = '/foo/bar/file.txt'\n\n static(req, res)\n\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\ndef test_call_invalid_path(static):\n req, res = mock.Mock(), mock.Mock()\n\n req.path = '/foo/../bar'\n static(req, res)\n\n assert not res.set_type.called\n assert not res.send_file.called\n assert not res.end.called\n\n\ndef test_call_with_etag(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n\n file_contents = b'This is some text in teh file'\n\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n\n etag = static.calculate_etag(file_path)\n\n req.path = '/foo/bar/file.txt'\n\n req.headers = {'IF-NONE-MATCH': etag}\n\n static(req, res)\n\n assert res.status_code == 304\n\n assert not res.set_type.called\n assert not res.send_file.called\n",
"import pytest\nimport growler\nfrom pathlib import Path\nfrom unittest import mock\nfrom sys import version_info\nfrom growler.middleware.static import Static\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\ndef test_construct_with_list(tmpdir):\n s = Static(['/'] + str(tmpdir).split('/'))\n assert str(s.path) == str(tmpdir)\n\n\ndef test_error_on_missing_dir():\n err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError\n with pytest.raises(err):\n Static('/does/not/exist')\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n static(req, res)\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\ndef test_call_invalid_path(static):\n req, res = mock.Mock(), mock.Mock()\n req.path = '/foo/../bar'\n static(req, res)\n assert not res.set_type.called\n assert not res.send_file.called\n assert not res.end.called\n\n\ndef test_call_with_etag(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n req.headers = {'IF-NONE-MATCH': etag}\n static(req, res)\n assert res.status_code == 304\n assert not res.set_type.called\n assert not res.send_file.called\n",
"<import token>\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\ndef test_construct_with_list(tmpdir):\n s = Static(['/'] + str(tmpdir).split('/'))\n assert str(s.path) == str(tmpdir)\n\n\ndef test_error_on_missing_dir():\n err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError\n with pytest.raises(err):\n Static('/does/not/exist')\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n static(req, res)\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\ndef test_call_invalid_path(static):\n req, res = mock.Mock(), mock.Mock()\n req.path = '/foo/../bar'\n static(req, res)\n assert not res.set_type.called\n assert not res.send_file.called\n assert not res.end.called\n\n\ndef test_call_with_etag(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n req.headers = {'IF-NONE-MATCH': etag}\n static(req, res)\n assert res.status_code == 304\n assert not res.set_type.called\n assert not res.send_file.called\n",
"<import token>\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\ndef test_construct_with_list(tmpdir):\n s = Static(['/'] + str(tmpdir).split('/'))\n assert str(s.path) == str(tmpdir)\n\n\n<function token>\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n static(req, res)\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\ndef test_call_invalid_path(static):\n req, res = mock.Mock(), mock.Mock()\n req.path = '/foo/../bar'\n static(req, res)\n assert not res.set_type.called\n assert not res.send_file.called\n assert not res.end.called\n\n\ndef test_call_with_etag(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n req.headers = {'IF-NONE-MATCH': etag}\n static(req, res)\n assert res.status_code == 304\n assert not res.set_type.called\n assert not res.send_file.called\n",
"<import token>\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\n<function token>\n<function token>\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n static(req, res)\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\ndef test_call_invalid_path(static):\n req, res = mock.Mock(), mock.Mock()\n req.path = '/foo/../bar'\n static(req, res)\n assert not res.set_type.called\n assert not res.send_file.called\n assert not res.end.called\n\n\ndef test_call_with_etag(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n req.headers = {'IF-NONE-MATCH': etag}\n static(req, res)\n assert res.status_code == 304\n assert not res.set_type.called\n assert not res.send_file.called\n",
"<import token>\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n static(req, res)\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\ndef test_call_invalid_path(static):\n req, res = mock.Mock(), mock.Mock()\n req.path = '/foo/../bar'\n static(req, res)\n assert not res.set_type.called\n assert not res.send_file.called\n assert not res.end.called\n\n\ndef test_call_with_etag(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n req.headers = {'IF-NONE-MATCH': etag}\n static(req, res)\n assert res.status_code == 304\n assert not res.set_type.called\n assert not res.send_file.called\n",
"<import token>\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n static(req, res)\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\ndef test_call_invalid_path(static):\n req, res = mock.Mock(), mock.Mock()\n req.path = '/foo/../bar'\n static(req, res)\n assert not res.set_type.called\n assert not res.send_file.called\n assert not res.end.called\n\n\n<function token>\n",
"<import token>\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\n<function token>\n\n\ndef test_call_invalid_path(static):\n req, res = mock.Mock(), mock.Mock()\n req.path = '/foo/../bar'\n static(req, res)\n assert not res.set_type.called\n assert not res.send_file.called\n assert not res.end.called\n\n\n<function token>\n",
"<import token>\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
1,013 |
a319ebb05e9034f19aef39bd46830c8a607ed121
|
animals = ['bear', 'python', 'peacock', 'kangaroo', 'whale', 'platypus']
The animal at 1.
The third (3rd) animal.
The first (1st) animal.
The animal at 3.
The fifth (5th) animal.
The animal at 2.
The sixth (6th) animal.
The animal at 4.
|
[
"animals = ['bear', 'python', 'peacock', 'kangaroo', 'whale', 'platypus']\nThe animal at 1.\nThe third (3rd) animal.\nThe first (1st) animal.\nThe animal at 3.\nThe fifth (5th) animal.\nThe animal at 2.\nThe sixth (6th) animal.\nThe animal at 4.\n\n"
] | true |
1,014 |
cc6cef70381bb08247720ec32b7e8fe79ed7123d
|
#!/usr/bin/python
import sys
OPEN_BRACES = ['{', '(', '[']
CLOSE_BRACES = ['}', ')', ']']
def match_paranthesis (s, pos):
stack = []
for i,c in enumerate(s):
if not c in OPEN_BRACES and not c in CLOSE_BRACES:
continue
if c in OPEN_BRACES:
stack.append((i, c))
else:
idx = CLOSE_BRACES.index(c)
oi, oc = stack[len(stack)-1]
if oc == OPEN_BRACES[idx]:
if oi == pos:
print "FOUND MATCHING CLOSE %u:%u %c:%c" % (oi, i, s[oi], s[i])
return
stack.pop()
if len(stack):
print "BRACES NOT MATCHING"
if __name__ == "__main__":
match_paranthesis("Sometimes (when I nest them (my parentheticals ) too much (like this (and this))) they get confusing.", 10)
|
[
"#!/usr/bin/python\n\nimport sys\n\nOPEN_BRACES = ['{', '(', '[']\nCLOSE_BRACES = ['}', ')', ']']\n\ndef match_paranthesis (s, pos):\n stack = []\n\n for i,c in enumerate(s):\n if not c in OPEN_BRACES and not c in CLOSE_BRACES:\n continue\n\n if c in OPEN_BRACES:\n stack.append((i, c))\n else:\n idx = CLOSE_BRACES.index(c)\n oi, oc = stack[len(stack)-1]\n if oc == OPEN_BRACES[idx]:\n if oi == pos:\n print \"FOUND MATCHING CLOSE %u:%u %c:%c\" % (oi, i, s[oi], s[i])\n return\n stack.pop()\n\n if len(stack):\n print \"BRACES NOT MATCHING\"\n\nif __name__ == \"__main__\":\n match_paranthesis(\"Sometimes (when I nest them (my parentheticals ) too much (like this (and this))) they get confusing.\", 10)\n"
] | true |
1,015 |
ddaba7a8b53072da36224dd4618696ebf0e9a4e4
|
from __future__ import print_function
import os
import shutil
import pymake
import flopy
# set up paths
dstpth = os.path.join('temp')
if not os.path.exists(dstpth):
os.makedirs(dstpth)
mp6pth = os.path.join(dstpth, 'Modpath_7_1_000')
expth = os.path.join(mp6pth, 'examples')
exe_name = 'mp7'
srcpth = os.path.join(mp6pth, 'source')
target = os.path.join(dstpth, exe_name)
def compile_code():
# Remove the existing modpath6 directory if it exists
if os.path.isdir(mp6pth):
shutil.rmtree(mp6pth)
# Download the MODFLOW-2005 distribution
url = "https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip"
pymake.download_and_unzip(url, pth=dstpth)
# modify source files that prevent compiling with gfortran
pth = os.path.join(srcpth, 'utl7u1.f')
if os.path.isfile(pth):
os.remove(pth)
fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace('location.', 'location%')
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fname1 = os.path.join(srcpth, 'ModpathCellData.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')
line = line.replace('dimension(grid%GetReducedConnectionCount())',
'dimension(:)')
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fname1 = os.path.join(srcpth, 'MPath7.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'MPath7_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace("form='binary', access='stream'",
"form='unformatted', access='stream'")
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
# allow line lengths greater than 132 columns
fflags = 'ffree-line-length-512'
# make modpath 7
pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True,
expedite=False, dryrun=False, double=False, debug=False,
fflags=fflags)
assert os.path.isfile(target), 'Target does not exist.'
def get_simfiles():
dirs = [name for name in os.listdir(expth) if
os.path.isdir(os.path.join(expth, name))]
simfiles = []
for d in dirs:
pth = os.path.join(expth, d, 'original')
simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if
f.endswith('.mpsim')]
return simfiles
def replace_files():
dirs = [name for name in os.listdir(expth) if
os.path.isdir(os.path.join(expth, name))]
# rename a few files for linux
replace_files = ['example_1.BUD', 'Zones_layer_3.txt',
'Retardation_layer_1.txt']
for d in dirs:
pth = os.path.join(expth, d, 'original')
for rf in replace_files:
fname1 = os.path.join(pth, rf)
if rf in os.listdir(pth):
fname2 = os.path.join(pth, 'temp')
print('copy {} to {}'.format(os.path.basename(fname1),
os.path.basename(fname2)))
shutil.copy(fname1, fname2)
print('deleting {}'.format(os.path.basename(fname1)))
os.remove(fname1)
fname1 = os.path.join(pth, rf.lower())
print('rename {} to {}'.format(os.path.basename(fname2),
os.path.basename(fname1)))
os.rename(fname2, fname1)
def run_modpath7(fn):
# run the model
print('running model...{}'.format(fn))
exe = os.path.abspath(target)
fpth = os.path.basename(fn)
model_ws = os.path.dirname(fn)
success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)
assert success, 'could not run...{}'.format(os.path.basename(fn))
return
def clean_up():
# clean up
print('Removing folder ' + mp6pth)
shutil.rmtree(mp6pth)
print('Removing ' + target)
os.remove(target)
return
def test_compile():
# compile MODPATH 7
compile_code()
def test_modpath7():
simfiles = get_simfiles()
replace_files()
for fn in simfiles:
yield run_modpath7, fn
def test_clean_up():
yield clean_up
if __name__ == "__main__":
compile_code()
simfiles = get_simfiles()
replace_files()
for fn in simfiles:
run_modpath7(fn)
clean_up()
|
[
"from __future__ import print_function\nimport os\nimport shutil\nimport pymake\nimport flopy\n\n# set up paths\ndstpth = os.path.join('temp')\nif not os.path.exists(dstpth):\n os.makedirs(dstpth)\nmp6pth = os.path.join(dstpth, 'Modpath_7_1_000')\nexpth = os.path.join(mp6pth, 'examples')\n\nexe_name = 'mp7'\nsrcpth = os.path.join(mp6pth, 'source')\ntarget = os.path.join(dstpth, exe_name)\n\n\ndef compile_code():\n # Remove the existing modpath6 directory if it exists\n if os.path.isdir(mp6pth):\n shutil.rmtree(mp6pth)\n\n # Download the MODFLOW-2005 distribution\n url = \"https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip\"\n pymake.download_and_unzip(url, pth=dstpth)\n\n # modify source files that prevent compiling with gfortran\n pth = os.path.join(srcpth, 'utl7u1.f')\n if os.path.isfile(pth):\n os.remove(pth)\n\n fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('location.', 'location%')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n\n fname1 = os.path.join(srcpth, 'ModpathCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')\n line = line.replace('dimension(grid%GetReducedConnectionCount())',\n 'dimension(:)')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n\n fname1 = os.path.join(srcpth, 'MPath7.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'MPath7_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace(\"form='binary', access='stream'\",\n \"form='unformatted', access='stream'\")\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n\n # allow line lengths greater than 132 columns\n fflags = 'ffree-line-length-512'\n\n # make modpath 7\n pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True,\n expedite=False, dryrun=False, double=False, debug=False,\n fflags=fflags)\n\n assert os.path.isfile(target), 'Target does not exist.'\n\n\ndef get_simfiles():\n dirs = [name for name in os.listdir(expth) if\n os.path.isdir(os.path.join(expth, name))]\n simfiles = []\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if\n f.endswith('.mpsim')]\n return simfiles\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if\n os.path.isdir(os.path.join(expth, name))]\n # rename a few files for linux\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1),\n os.path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2),\n os.path.basename(fname1)))\n os.rename(fname2, fname1)\n\ndef run_modpath7(fn):\n # run the model\n print('running model...{}'.format(fn))\n exe = os.path.abspath(target)\n fpth = os.path.basename(fn)\n model_ws = os.path.dirname(fn)\n success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)\n assert success, 'could not run...{}'.format(os.path.basename(fn))\n return\n\n\ndef clean_up():\n # clean up\n print('Removing folder ' + mp6pth)\n shutil.rmtree(mp6pth)\n print('Removing ' + target)\n os.remove(target)\n return\n\n\ndef test_compile():\n # compile MODPATH 7\n compile_code()\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\nif __name__ == \"__main__\":\n compile_code()\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n run_modpath7(fn)\n clean_up()\n",
"from __future__ import print_function\nimport os\nimport shutil\nimport pymake\nimport flopy\ndstpth = os.path.join('temp')\nif not os.path.exists(dstpth):\n os.makedirs(dstpth)\nmp6pth = os.path.join(dstpth, 'Modpath_7_1_000')\nexpth = os.path.join(mp6pth, 'examples')\nexe_name = 'mp7'\nsrcpth = os.path.join(mp6pth, 'source')\ntarget = os.path.join(dstpth, exe_name)\n\n\ndef compile_code():\n if os.path.isdir(mp6pth):\n shutil.rmtree(mp6pth)\n url = 'https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip'\n pymake.download_and_unzip(url, pth=dstpth)\n pth = os.path.join(srcpth, 'utl7u1.f')\n if os.path.isfile(pth):\n os.remove(pth)\n fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('location.', 'location%')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'ModpathCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')\n line = line.replace('dimension(grid%GetReducedConnectionCount())',\n 'dimension(:)')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'MPath7.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'MPath7_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace(\"form='binary', access='stream'\",\n \"form='unformatted', access='stream'\")\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fflags = 'ffree-line-length-512'\n pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True, expedite\n =False, dryrun=False, double=False, debug=False, fflags=fflags)\n assert os.path.isfile(target), 'Target does not exist.'\n\n\ndef get_simfiles():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n simfiles = []\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if f.\n endswith('.mpsim')]\n return simfiles\n\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1), os.\n path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2), os\n .path.basename(fname1)))\n os.rename(fname2, fname1)\n\n\ndef run_modpath7(fn):\n print('running model...{}'.format(fn))\n exe = os.path.abspath(target)\n fpth = os.path.basename(fn)\n model_ws = os.path.dirname(fn)\n success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)\n assert success, 'could not run...{}'.format(os.path.basename(fn))\n return\n\n\ndef clean_up():\n print('Removing folder ' + mp6pth)\n shutil.rmtree(mp6pth)\n print('Removing ' + target)\n os.remove(target)\n return\n\n\ndef test_compile():\n compile_code()\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\nif __name__ == '__main__':\n compile_code()\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n run_modpath7(fn)\n clean_up()\n",
"<import token>\ndstpth = os.path.join('temp')\nif not os.path.exists(dstpth):\n os.makedirs(dstpth)\nmp6pth = os.path.join(dstpth, 'Modpath_7_1_000')\nexpth = os.path.join(mp6pth, 'examples')\nexe_name = 'mp7'\nsrcpth = os.path.join(mp6pth, 'source')\ntarget = os.path.join(dstpth, exe_name)\n\n\ndef compile_code():\n if os.path.isdir(mp6pth):\n shutil.rmtree(mp6pth)\n url = 'https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip'\n pymake.download_and_unzip(url, pth=dstpth)\n pth = os.path.join(srcpth, 'utl7u1.f')\n if os.path.isfile(pth):\n os.remove(pth)\n fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('location.', 'location%')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'ModpathCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')\n line = line.replace('dimension(grid%GetReducedConnectionCount())',\n 'dimension(:)')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'MPath7.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'MPath7_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace(\"form='binary', access='stream'\",\n \"form='unformatted', access='stream'\")\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fflags = 'ffree-line-length-512'\n pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True, expedite\n =False, dryrun=False, double=False, debug=False, fflags=fflags)\n assert os.path.isfile(target), 'Target does not exist.'\n\n\ndef get_simfiles():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n simfiles = []\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if f.\n endswith('.mpsim')]\n return simfiles\n\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1), os.\n path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2), os\n .path.basename(fname1)))\n os.rename(fname2, fname1)\n\n\ndef run_modpath7(fn):\n print('running model...{}'.format(fn))\n exe = os.path.abspath(target)\n fpth = os.path.basename(fn)\n model_ws = os.path.dirname(fn)\n success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)\n assert success, 'could not run...{}'.format(os.path.basename(fn))\n return\n\n\ndef clean_up():\n print('Removing folder ' + mp6pth)\n shutil.rmtree(mp6pth)\n print('Removing ' + target)\n os.remove(target)\n return\n\n\ndef test_compile():\n compile_code()\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\nif __name__ == '__main__':\n compile_code()\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n run_modpath7(fn)\n clean_up()\n",
"<import token>\n<assignment token>\nif not os.path.exists(dstpth):\n os.makedirs(dstpth)\n<assignment token>\n\n\ndef compile_code():\n if os.path.isdir(mp6pth):\n shutil.rmtree(mp6pth)\n url = 'https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip'\n pymake.download_and_unzip(url, pth=dstpth)\n pth = os.path.join(srcpth, 'utl7u1.f')\n if os.path.isfile(pth):\n os.remove(pth)\n fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('location.', 'location%')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'ModpathCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')\n line = line.replace('dimension(grid%GetReducedConnectionCount())',\n 'dimension(:)')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'MPath7.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'MPath7_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace(\"form='binary', access='stream'\",\n \"form='unformatted', access='stream'\")\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fflags = 'ffree-line-length-512'\n pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True, expedite\n =False, dryrun=False, double=False, debug=False, fflags=fflags)\n assert os.path.isfile(target), 'Target does not exist.'\n\n\ndef get_simfiles():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n simfiles = []\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if f.\n endswith('.mpsim')]\n return simfiles\n\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1), os.\n path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2), os\n .path.basename(fname1)))\n os.rename(fname2, fname1)\n\n\ndef run_modpath7(fn):\n print('running model...{}'.format(fn))\n exe = os.path.abspath(target)\n fpth = os.path.basename(fn)\n model_ws = os.path.dirname(fn)\n success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)\n assert success, 'could not run...{}'.format(os.path.basename(fn))\n return\n\n\ndef clean_up():\n print('Removing folder ' + mp6pth)\n shutil.rmtree(mp6pth)\n print('Removing ' + target)\n os.remove(target)\n return\n\n\ndef test_compile():\n compile_code()\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\nif __name__ == '__main__':\n compile_code()\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n run_modpath7(fn)\n clean_up()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef compile_code():\n if os.path.isdir(mp6pth):\n shutil.rmtree(mp6pth)\n url = 'https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip'\n pymake.download_and_unzip(url, pth=dstpth)\n pth = os.path.join(srcpth, 'utl7u1.f')\n if os.path.isfile(pth):\n os.remove(pth)\n fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('location.', 'location%')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'ModpathCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')\n line = line.replace('dimension(grid%GetReducedConnectionCount())',\n 'dimension(:)')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'MPath7.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'MPath7_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace(\"form='binary', access='stream'\",\n \"form='unformatted', access='stream'\")\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fflags = 'ffree-line-length-512'\n pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True, expedite\n =False, dryrun=False, double=False, debug=False, fflags=fflags)\n assert os.path.isfile(target), 'Target does not exist.'\n\n\ndef get_simfiles():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n simfiles = []\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if f.\n endswith('.mpsim')]\n return simfiles\n\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1), os.\n path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2), os\n .path.basename(fname1)))\n os.rename(fname2, fname1)\n\n\ndef run_modpath7(fn):\n print('running model...{}'.format(fn))\n exe = os.path.abspath(target)\n fpth = os.path.basename(fn)\n model_ws = os.path.dirname(fn)\n success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)\n assert success, 'could not run...{}'.format(os.path.basename(fn))\n return\n\n\ndef clean_up():\n print('Removing folder ' + mp6pth)\n shutil.rmtree(mp6pth)\n print('Removing ' + target)\n os.remove(target)\n return\n\n\ndef test_compile():\n compile_code()\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef get_simfiles():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n simfiles = []\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if f.\n endswith('.mpsim')]\n return simfiles\n\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1), os.\n path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2), os\n .path.basename(fname1)))\n os.rename(fname2, fname1)\n\n\ndef run_modpath7(fn):\n print('running model...{}'.format(fn))\n exe = os.path.abspath(target)\n fpth = os.path.basename(fn)\n model_ws = os.path.dirname(fn)\n success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)\n assert success, 'could not run...{}'.format(os.path.basename(fn))\n return\n\n\ndef clean_up():\n print('Removing folder ' + mp6pth)\n shutil.rmtree(mp6pth)\n print('Removing ' + target)\n os.remove(target)\n return\n\n\ndef test_compile():\n compile_code()\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef get_simfiles():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n simfiles = []\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if f.\n endswith('.mpsim')]\n return simfiles\n\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1), os.\n path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2), os\n .path.basename(fname1)))\n os.rename(fname2, fname1)\n\n\n<function token>\n\n\ndef clean_up():\n print('Removing folder ' + mp6pth)\n shutil.rmtree(mp6pth)\n print('Removing ' + target)\n os.remove(target)\n return\n\n\ndef test_compile():\n compile_code()\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1), os.\n path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2), os\n .path.basename(fname1)))\n os.rename(fname2, fname1)\n\n\n<function token>\n\n\ndef clean_up():\n print('Removing folder ' + mp6pth)\n shutil.rmtree(mp6pth)\n print('Removing ' + target)\n os.remove(target)\n return\n\n\ndef test_compile():\n compile_code()\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1), os.\n path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2), os\n .path.basename(fname1)))\n os.rename(fname2, fname1)\n\n\n<function token>\n<function token>\n\n\ndef test_compile():\n compile_code()\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1), os.\n path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2), os\n .path.basename(fname1)))\n os.rename(fname2, fname1)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_clean_up():\n yield clean_up\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
1,016 |
da3be0d3b815e11d292a7c7e8f5ce32b35580f98
|
# Let's look at the lowercase letters.
import string
alphabet = " " + string.ascii_lowercase
|
[
"# Let's look at the lowercase letters.\nimport string\nalphabet = \" \" + string.ascii_lowercase\n",
"import string\nalphabet = ' ' + string.ascii_lowercase\n",
"<import token>\nalphabet = ' ' + string.ascii_lowercase\n",
"<import token>\n<assignment token>\n"
] | false |
1,017 |
299432b095f16c3cb4949319705800d06f534cf9
|
from __future__ import with_statement # this is to work with python2.5
from pyps import workspace, module
def invoke_function(fu, ws):
return fu._get_code(activate = module.print_code_out_regions)
if __name__=="__main__":
workspace.delete('paws_out_regions')
with workspace('paws_out_regions.c',name='paws_out_regions',deleteOnClose=True) as ws:
for fu in ws.fun:
print invoke_function(fu, ws)
|
[
"from __future__ import with_statement # this is to work with python2.5\nfrom pyps import workspace, module\n\ndef invoke_function(fu, ws):\n return fu._get_code(activate = module.print_code_out_regions)\n\nif __name__==\"__main__\":\n\tworkspace.delete('paws_out_regions')\n\twith workspace('paws_out_regions.c',name='paws_out_regions',deleteOnClose=True) as ws:\n \tfor fu in ws.fun:\n \tprint invoke_function(fu, ws)\n\n"
] | true |
1,018 |
c1bb7b579e6b251ddce41384aef1243e411c5d0e
|
# coding: utf-8
# ## Estimating Travel Time
#
#
# The objective of this document is proposing a prediction model for estimating the travel time of two
# specified locations at a given departure time. The main idea here is predicting the velocity of the trip. Given the distance between starting and ending point of the trip, it is possible to easily compute the Travel Time.
# According to the given data, different features including the time of the day, day of the week, month, travel distance, and distance to the center of the city (New York) are used.
# Different prediction models (Linear, GLM and Deep Neural Network) are compared, and the GLM is used for genrating the final results.
# ## Preparation
# Import required libraries
# In[136]:
import numpy as np
import pandas as pd
from geopy.distance import vincenty
from datetime import datetime
from datetime import timedelta
from datetime import time
import statsmodels.api as sm
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers.normalization import BatchNormalization
get_ipython().magic('matplotlib inline')
# ## Reading data
# In[169]:
df_train = pd.read_csv('train.csv',index_col= 'row_id')
df_test = pd.read_csv('test.csv',index_col= 'row_id')
df_train.head()
# ## Feature engineering
#
# It is clear that the travel time of trip depends on the starting and ending point. In other words, the most uncertain component in the prediction of travel time is the velocity of the trip. Given the velocity and the distance, it is easy to compute the duration of the travel.
#
# Also, I observed all travels in both train and test dataset are happening around New York City. Therefore, the main component in determining the velocity of is the city traffic. We know that traffic is a time-dependent phenomenon which depends on the time of the day, the day of the week, and month of the year. In addition, the traffic is usually heavier in Manhattan (downtown of the city) in comparing to the other point of the city. Therefore, if the starting or ending point of the travel is close to the Manhattan we expect higher traffic comparing to the other neighborhoods. In visualization section, I provide enough evidence from the data set to support the aforementioned claims.
#
# According to this observation the following features are computted by using the raw data and added to the dataframe.
#
# * Distance between starting and ending computted by vincenty formula
# * The time of the day of travel (in sec far from the midnight)
# * The day of the week (Monday, Tuesday, etc). For this categorical data, six dummy variables are added to datafram
# * The month of the travel to capture seasnolity effect.
# * The sequare of distance
# * The velocity is used as the predication variable.
#
# In[156]:
def distance(row):
source = (row['start_lat'], row['start_lng'])
dest = ( row['end_lat'], row['end_lng'])
return vincenty(source,dest).miles
Manhattan = (40.7831, -73.9712)
def pickup_to_MH(row):
'''find the distance between pick up point and Manhattan center'''
source = (row['start_lat'], row['start_lng'])
return vincenty(source,Manhattan).miles
def dropoff_to_MH(row):
'''find the distance between dropoff point and Manhattan center'''
dest = ( row['end_lat'], row['end_lng'])
return vincenty(dest,Manhattan).miles
def day_of_week(ep):
return datetime.fromtimestamp(ep).strftime("%A")
def month(ep):
return datetime.fromtimestamp(ep).month
def time_of_day(ep):
ref = datetime(2015, 1, 1, 0, 0, 0)
sec = (datetime.fromtimestamp(ep)- ref).seconds
return min(sec, 86400- sec)
def year(ep):
return datetime.fromtimestamp(ep).year
def add_features(df_train_s):
# Add day of the week and the dummy variable
DD = df_train_s['start_timestamp'].map(day_of_week)
df_train_s['day'] = DD
DD = pd.get_dummies( DD,prefix='day', drop_first=True)
df_train_s = pd.concat([df_train_s, DD],axis =1 )
# Month, time of the dat, df_train_s
df_train_s['month'] = df_train_s['start_timestamp'].map(month)
df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)
# distance between start and end of the trip
df_train_s['distance'] = df_train_s.apply(lambda x :distance(x), axis=1 )
df_train_s['distance2'] = df_train_s['distance']**2
# distance between start, end, and center of Manhatan
df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1 )
df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1 )
return df_train_s
# Now, we can easily add all of the above features to both traing and test data set. Due to time limtation and calculation power I only used 10% of the traing data.
# In[24]:
np.random.seed(42)
df_train_s = df_train.sample(frac=0.01, replace=False)
df_train_s = add_features(df_train_s)
df_train_s['velocity'] = np.array(df_train_s['distance']/(df_train_s['duration']/3600))
# In[25]:
df_train_s.head()
# In[170]:
# adding the feature to test set.
df_test = add_features(df_test)
# ## Removing Outlires
# The following functions are used to compute these features. Considering the speed limit and the fact the usual trafic in New York, it is reseanable to assume that always the speed show not exceed 90 mph. Therefore, I remove the points with more than this number as the outlires. Also, I removed the data with less than .5 mph. Specificlly, there exists many samples with zero distance between starting and ending point which might happen becouse GPS problem.
# In[41]:
df_train_s = df_train_s[df_train_s['velocity']<90]
df_train_s = df_train_s[df_train_s['velocity']>.5]
# ## Data Visulazation
#
# First we look at the starting and ending point of the trips which happens in New York.
#
#
#
# In[30]:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
ax = df_train_s.plot.scatter( 'start_lat','start_lng',
ax = axes[0],
title='Start point of travel')
ax.set(xlabel="latitude", ylabel='longitude')
ax = df_train_s.plot.scatter('end_lng','end_lat',
ax = axes[1],
title='Destination of the travel')
ax.set(xlabel="latitude", ylabel='longitude')
plt.show()
# Here are some statitcs about the volacity, distance of each trip and its duration. Also, we looked at the density function of the volacity. A log-normal or Gamma distribution are approprate candiatdes for this distribution.
# In[42]:
df_train_s[['distance', 'duration','velocity']].describe()
# In[43]:
df_train_s['velocity'].hist(bins=1000,normed=True)
# ### Corrolation matrix
# In[44]:
corr = df_train_s.corr()
# generate a mask for the lower triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# set up the matplotlib figure
f, ax = plt.subplots(figsize=(18, 18))
# generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
plt.show()
# In[53]:
df_train_s.plot.scatter( 'distance','velocity')
# In[48]:
### Seanility and time Effect on Velocity
gr= df_train_s[['velocity','month']].groupby(by='month')
gr.mean().plot.bar(yerr=gr.std())
# ## Data preprocessing
#
# Let's split our data to train and test set in fraction of $\frac{4}{1}$ to facilate comparing the results.
# This test set is differenet from the given test set.
# In[105]:
cl = list(set(df_train_s.keys())-{'velocity','duration','day'})
X = np.array(df_train_s[cl])
X1 = np.insert(X, 0, 1, axis=1)
y = np.array(df_train_s['velocity'])
X_train, X_test, y_train, y_test = train_test_split(X1, y, test_size=0.2, random_state=42)
dist_train = X_train[:,1]
dist_test = X_test[:,1]
# In[106]:
list(enumerate(cl))
dist_train.mean()
# ## Linear Model
# In[204]:
model_sk = LinearRegression()
model_sk.fit(X_train, y_train)
plt.figure(figsize=(12, 8))
plt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)
plt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')
plt.xlim([-1, model_sk.coef_.shape[0]])
plt.title("Linear model coefficients")
plt.show()
# The folling chart also provide better understading. Excepet X12 (dummy for sunday) all the other variables are significant; the p-value is zero and null-hypothesis is rejected.
# In[205]:
linear_model = sm.OLS(y_train, X_train)
linear_results = linear_model.fit()
print(linear_results.summary())
# ## Generalized Linear Model
# I tried GLM with gamma fammaly.
# In[206]:
gamma_model = sm.GLM( y_train, X_train,family=sm.families.Gamma())
gamma_results = gamma_model.fit()
print(gamma_results.summary())
# ## Deep Neural Network (DNN)
#
# Here, I am useing a DNN as a prediction model. I am using the Keras package to train the network. Network includes 3 layers. Also, between each two layer a dropout layer is add. RELU and softmax are used as the activation functions. Here, I define the model.
#
# I normilized the data the input data to imporve the performance.
# In[195]:
DNN_model = Sequential()
DNN_model.add(Dense(100,input_dim=X_train.shape[1],init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(50,init='uniform',activation='softmax'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(100,init='uniform',activation='relu'))
DNN_model.add(Dropout(0.5))
DNN_model.add(Dense(1,init='uniform',activation='relu'))
DNN_model.summary()
# ### Fitting the DNN
# In[196]:
mn = X1.mean(axis=0)
#model.compile(loss='mean_absolute_error',optimizer='adam',metrics='[accuracy]')
DNN_model.compile(loss='mean_absolute_error',optimizer='adam')
history = DNN_model.fit(X_train/mn,y_train,
validation_data=(X_test/mn, y_test),
epochs =100,
batch_size=100,
verbose=2)
# In[197]:
plt.figure(figsize=(10, 8))
plt.title("Dense model training", fontsize=12)
plt.plot(history.history["loss"], label="Train")
plt.plot(history.history["val_loss"], label="Test")
plt.grid("on")
plt.xlabel("Epoch", fontsize=12)
plt.ylabel("loss", fontsize=12)
plt.legend(loc="upper right")
# ## Evalution
#
# In this part, I compare the propsed models and choose the best one. I compare the results based on mean absolute
# error of predicted versus actual durations, and also mean absolute percentage error which is the percantge of the error. Note that here we compare based on duration as asked in the question and not the velocity.
#
# In[207]:
preds_test, preds_train = {}, {}
#Linear Model
preds_test['linear'] = linear_results.predict(X_test)
preds_train['linear'] = linear_results.predict(X_train)
#GLM (Gamma Model)
preds_test['GLM'] = gamma_results.predict(X_test)
preds_train['GLM'] = gamma_results.predict(X_train)
#Deep Learning
preds_test['DL'] = np.squeeze(DNN_model.predict(X_test/mn))
preds_train['DL'] = np.squeeze(DNN_model.predict(X_train/mn))
# The functions are used for evalution
# In[84]:
def mean_absolute_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(dist/y_true - dist/y_pred)
err = err[np.isfinite(err)]
return np.mean(err) *3600
def mean_absolute_percentage_error(dist,y_true, y_pred ):
"""
Args:
dist(ndarray) : distance between pick up and drop off
y_true(ndarray) : true velocity
y_pred(ndarray) : the prediction value of velocity
"""
err = np.abs(y_true/y_pred - 1)
err = err[np.isfinite(err)]
return np.mean(err)*100
def evalute(dist,y_true,prediction):
MAE, MAPE= {}, {}
for kys, y_pred in prediction.items():
MAE[kys] = mean_absolute_error(dist,y_true, y_pred )
MAPE[kys] = mean_absolute_percentage_error(dist,y_true, y_pred )
return MAE, MAPE
# In[209]:
MAE_train, MAPE_train = evalute(dist_train,y_train, preds_train)
MAE_test, MAPE_test = evalute(dist_test,y_test, preds_test)
pd.DataFrame([MAE_test,MAE_train, MAPE_test, MAPE_train],
index= ['MAE_test', 'MAE_train', 'MAPE_test', 'MAPE_train'] ).transpose()
# In[201]:
dist_train.mean()
# ## Generate Prediction for Test Set
#
# By comparing the three models (linear, GLM, DNN), I choose GLM for generating the predication for the given test set.
# In[212]:
XX = np.array(df_test[cl])
XX = np.insert(XX, 0, 1, axis=1)
dist_x = XX[:,1]
#DNN_TD = dist_x/np.squeeze(DNN_model.predict(XX/mn))*3600
GLM_TD = dist_x/gamma_results.predict(XX)*3600
df_ans= pd.DataFrame(GLM_TD, columns =['duration'])
df_ans.index.name = 'row_id'
df_ans.to_csv('answer.csv')
df_ans= pd.DataFrame(TD, columns =['duration'])
# ## Extention and Further Idea
# Here, we only use the vincenty, but by conteccting to google API and fidning the real distance between start and end point the preditor defenitlly can be improved. Also, here I only used 10% of data points becouse of the limitation on runnig the DNN. By using GPU or running over the cloud we can use all the samples.
#
#
#
#
|
[
"\n# coding: utf-8\n\n# ## Estimating Travel Time\n# \n# \n# The objective of this document is proposing a prediction model for estimating the travel time of two\n# specified locations at a given departure time. The main idea here is predicting the velocity of the trip. Given the distance between starting and ending point of the trip, it is possible to easily compute the Travel Time. \n# According to the given data, different features including the time of the day, day of the week, month, travel distance, and distance to the center of the city (New York) are used.\n# Different prediction models (Linear, GLM and Deep Neural Network) are compared, and the GLM is used for genrating the final results.\n\n# ## Preparation\n# Import required libraries\n\n# In[136]:\n\nimport numpy as np\nimport pandas as pd\nfrom geopy.distance import vincenty\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom datetime import time\n\nimport statsmodels.api as sm\n \n\n\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.cross_validation import KFold\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\n\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers.normalization import BatchNormalization\n\n\nget_ipython().magic('matplotlib inline')\n\n\n# ## Reading data\n\n# In[169]:\n\ndf_train = pd.read_csv('train.csv',index_col= 'row_id')\ndf_test = pd.read_csv('test.csv',index_col= 'row_id')\ndf_train.head()\n\n\n# ## Feature engineering\n# \n# It is clear that the travel time of trip depends on the starting and ending point. In other words, the most uncertain component in the prediction of travel time is the velocity of the trip. Given the velocity and the distance, it is easy to compute the duration of the travel. \n# \n# Also, I observed all travels in both train and test dataset are happening around New York City. Therefore, the main component in determining the velocity of is the city traffic. We know that traffic is a time-dependent phenomenon which depends on the time of the day, the day of the week, and month of the year. In addition, the traffic is usually heavier in Manhattan (downtown of the city) in comparing to the other point of the city. Therefore, if the starting or ending point of the travel is close to the Manhattan we expect higher traffic comparing to the other neighborhoods. In visualization section, I provide enough evidence from the data set to support the aforementioned claims. \n# \n# According to this observation the following features are computted by using the raw data and added to the dataframe.\n# \n# * Distance between starting and ending computted by vincenty formula\n# * The time of the day of travel (in sec far from the midnight) \n# * The day of the week (Monday, Tuesday, etc). For this categorical data, six dummy variables are added to datafram\n# * The month of the travel to capture seasnolity effect.\n# * The sequare of distance\n# * The velocity is used as the predication variable.\n# \n\n# In[156]:\n\ndef distance(row):\n source = (row['start_lat'], row['start_lng'])\n dest = ( row['end_lat'], row['end_lng'])\n return vincenty(source,dest).miles\n\n\nManhattan = (40.7831, -73.9712)\ndef pickup_to_MH(row):\n '''find the distance between pick up point and Manhattan center'''\n source = (row['start_lat'], row['start_lng'])\n return vincenty(source,Manhattan).miles\n\ndef dropoff_to_MH(row):\n '''find the distance between dropoff point and Manhattan center'''\n dest = ( row['end_lat'], row['end_lng'])\n return vincenty(dest,Manhattan).miles\n\ndef day_of_week(ep):\n return datetime.fromtimestamp(ep).strftime(\"%A\")\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\ndef time_of_day(ep):\n ref = datetime(2015, 1, 1, 0, 0, 0)\n sec = (datetime.fromtimestamp(ep)- ref).seconds\n return min(sec, 86400- sec)\n \ndef year(ep):\n return datetime.fromtimestamp(ep).year\n\ndef add_features(df_train_s):\n \n # Add day of the week and the dummy variable\n DD = df_train_s['start_timestamp'].map(day_of_week)\n df_train_s['day'] = DD\n \n DD = pd.get_dummies( DD,prefix='day', drop_first=True)\n df_train_s = pd.concat([df_train_s, DD],axis =1 )\n\n # Month, time of the dat, df_train_s\n df_train_s['month'] = df_train_s['start_timestamp'].map(month)\n df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)\n \n # distance between start and end of the trip\n df_train_s['distance'] = df_train_s.apply(lambda x :distance(x), axis=1 )\n df_train_s['distance2'] = df_train_s['distance']**2\n\n # distance between start, end, and center of Manhatan \n df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1 )\n df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1 )\n return df_train_s\n\n\n# Now, we can easily add all of the above features to both traing and test data set. Due to time limtation and calculation power I only used 10% of the traing data.\n\n# In[24]:\n\nnp.random.seed(42)\ndf_train_s = df_train.sample(frac=0.01, replace=False)\ndf_train_s = add_features(df_train_s)\ndf_train_s['velocity'] = np.array(df_train_s['distance']/(df_train_s['duration']/3600))\n\n\n# In[25]:\n\ndf_train_s.head()\n\n\n# In[170]:\n\n# adding the feature to test set.\ndf_test = add_features(df_test)\n\n\n# ## Removing Outlires\n# The following functions are used to compute these features. Considering the speed limit and the fact the usual trafic in New York, it is reseanable to assume that always the speed show not exceed 90 mph. Therefore, I remove the points with more than this number as the outlires. Also, I removed the data with less than .5 mph. Specificlly, there exists many samples with zero distance between starting and ending point which might happen becouse GPS problem.\n\n# In[41]:\n\ndf_train_s = df_train_s[df_train_s['velocity']<90]\ndf_train_s = df_train_s[df_train_s['velocity']>.5]\n\n\n# ## Data Visulazation\n# \n# First we look at the starting and ending point of the trips which happens in New York.\n# \n# \n# \n\n# In[30]:\n\nfig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))\n\nax = df_train_s.plot.scatter( 'start_lat','start_lng',\n ax = axes[0],\n title='Start point of travel')\nax.set(xlabel=\"latitude\", ylabel='longitude')\nax = df_train_s.plot.scatter('end_lng','end_lat',\n ax = axes[1],\n title='Destination of the travel')\nax.set(xlabel=\"latitude\", ylabel='longitude')\nplt.show()\n\n\n# Here are some statitcs about the volacity, distance of each trip and its duration. Also, we looked at the density function of the volacity. A log-normal or Gamma distribution are approprate candiatdes for this distribution.\n\n# In[42]:\n\ndf_train_s[['distance', 'duration','velocity']].describe()\n\n\n# In[43]:\n\ndf_train_s['velocity'].hist(bins=1000,normed=True)\n\n\n# ### Corrolation matrix\n\n# In[44]:\n\ncorr = df_train_s.corr()\n\n# generate a mask for the lower triangle\nmask = np.zeros_like(corr, dtype=np.bool)\nmask[np.triu_indices_from(mask)] = True\n\n# set up the matplotlib figure\nf, ax = plt.subplots(figsize=(18, 18))\n\n# generate a custom diverging colormap\ncmap = sns.diverging_palette(220, 10, as_cmap=True)\n\n# draw the heatmap with the mask and correct aspect ratio\nsns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,\n square=True, \n linewidths=.5, cbar_kws={\"shrink\": .5}, ax=ax)\n\nplt.show()\n\n\n# In[53]:\n\ndf_train_s.plot.scatter( 'distance','velocity')\n\n\n# In[48]:\n\n### Seanility and time Effect on Velocity\ngr= df_train_s[['velocity','month']].groupby(by='month')\ngr.mean().plot.bar(yerr=gr.std())\n\n\n# ## Data preprocessing\n# \n# Let's split our data to train and test set in fraction of $\\frac{4}{1}$ to facilate comparing the results. \n# This test set is differenet from the given test set.\n\n# In[105]:\n\ncl = list(set(df_train_s.keys())-{'velocity','duration','day'})\nX = np.array(df_train_s[cl])\nX1 = np.insert(X, 0, 1, axis=1)\ny = np.array(df_train_s['velocity'])\n\n\nX_train, X_test, y_train, y_test = train_test_split(X1, y, test_size=0.2, random_state=42)\n\ndist_train = X_train[:,1]\ndist_test = X_test[:,1]\n\n\n# In[106]:\n\nlist(enumerate(cl))\ndist_train.mean()\n\n\n# ## Linear Model \n\n# In[204]:\n\nmodel_sk = LinearRegression()\nmodel_sk.fit(X_train, y_train)\n\nplt.figure(figsize=(12, 8))\nplt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)\nplt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')\nplt.xlim([-1, model_sk.coef_.shape[0]])\nplt.title(\"Linear model coefficients\")\nplt.show()\n\n\n# The folling chart also provide better understading. Excepet X12 (dummy for sunday) all the other variables are significant; the p-value is zero and null-hypothesis is rejected.\n\n# In[205]:\n\nlinear_model = sm.OLS(y_train, X_train)\nlinear_results = linear_model.fit()\nprint(linear_results.summary())\n\n\n# ## Generalized Linear Model\n# I tried GLM with gamma fammaly. \n\n# In[206]:\n\ngamma_model = sm.GLM( y_train, X_train,family=sm.families.Gamma())\ngamma_results = gamma_model.fit()\nprint(gamma_results.summary())\n\n\n# ## Deep Neural Network (DNN)\n# \n# Here, I am useing a DNN as a prediction model. I am using the Keras package to train the network. Network includes 3 layers. Also, between each two layer a dropout layer is add. RELU and softmax are used as the activation functions. Here, I define the model. \n# \n# I normilized the data the input data to imporve the performance. \n\n# In[195]:\n\nDNN_model = Sequential()\nDNN_model.add(Dense(100,input_dim=X_train.shape[1],init='uniform',activation='relu'))\nDNN_model.add(Dropout(0.5))\nDNN_model.add(Dense(50,init='uniform',activation='softmax'))\nDNN_model.add(Dropout(0.5))\nDNN_model.add(Dense(100,init='uniform',activation='relu'))\nDNN_model.add(Dropout(0.5))\nDNN_model.add(Dense(1,init='uniform',activation='relu'))\n\nDNN_model.summary()\n\n\n# ### Fitting the DNN\n\n# In[196]:\n\nmn = X1.mean(axis=0)\n#model.compile(loss='mean_absolute_error',optimizer='adam',metrics='[accuracy]')\nDNN_model.compile(loss='mean_absolute_error',optimizer='adam')\nhistory = DNN_model.fit(X_train/mn,y_train, \n validation_data=(X_test/mn, y_test),\n epochs =100,\n batch_size=100,\n verbose=2)\n\n\n\n# In[197]:\n\nplt.figure(figsize=(10, 8))\nplt.title(\"Dense model training\", fontsize=12)\nplt.plot(history.history[\"loss\"], label=\"Train\")\nplt.plot(history.history[\"val_loss\"], label=\"Test\")\nplt.grid(\"on\")\nplt.xlabel(\"Epoch\", fontsize=12)\nplt.ylabel(\"loss\", fontsize=12)\nplt.legend(loc=\"upper right\")\n\n\n# ## Evalution\n# \n# In this part, I compare the propsed models and choose the best one. I compare the results based on mean absolute\n# error of predicted versus actual durations, and also mean absolute percentage error which is the percantge of the error. Note that here we compare based on duration as asked in the question and not the velocity. \n# \n\n# In[207]:\n\npreds_test, preds_train = {}, {}\n\n#Linear Model\npreds_test['linear'] = linear_results.predict(X_test)\npreds_train['linear'] = linear_results.predict(X_train)\n\n#GLM (Gamma Model)\n\npreds_test['GLM'] = gamma_results.predict(X_test)\npreds_train['GLM'] = gamma_results.predict(X_train)\n\n#Deep Learning\npreds_test['DL'] = np.squeeze(DNN_model.predict(X_test/mn))\npreds_train['DL'] = np.squeeze(DNN_model.predict(X_train/mn))\n\n\n\n\n# The functions are used for evalution\n\n# In[84]:\n\ndef mean_absolute_error(dist,y_true, y_pred ): \n \"\"\"\n Args: \n dist(ndarray) : distance between pick up and drop off \n y_true(ndarray) : true velocity\n y_pred(ndarray) : the prediction value of velocity\n\n \"\"\"\n err = np.abs(dist/y_true - dist/y_pred)\n err = err[np.isfinite(err)]\n return np.mean(err) *3600\n\n\ndef mean_absolute_percentage_error(dist,y_true, y_pred ): \n \"\"\"\n Args: \n dist(ndarray) : distance between pick up and drop off \n y_true(ndarray) : true velocity\n y_pred(ndarray) : the prediction value of velocity\n\n \"\"\"\n err = np.abs(y_true/y_pred - 1)\n err = err[np.isfinite(err)]\n return np.mean(err)*100\n\n\ndef evalute(dist,y_true,prediction):\n MAE, MAPE= {}, {}\n for kys, y_pred in prediction.items():\n MAE[kys] = mean_absolute_error(dist,y_true, y_pred )\n MAPE[kys] = mean_absolute_percentage_error(dist,y_true, y_pred )\n \n \n return MAE, MAPE\n\n\n# In[209]:\n\nMAE_train, MAPE_train = evalute(dist_train,y_train, preds_train)\nMAE_test, MAPE_test = evalute(dist_test,y_test, preds_test)\n\n\npd.DataFrame([MAE_test,MAE_train, MAPE_test, MAPE_train], \n index= ['MAE_test', 'MAE_train', 'MAPE_test', 'MAPE_train'] ).transpose()\n\n\n# In[201]:\n\ndist_train.mean()\n\n\n# ## Generate Prediction for Test Set\n# \n# By comparing the three models (linear, GLM, DNN), I choose GLM for generating the predication for the given test set. \n\n# In[212]:\n\nXX = np.array(df_test[cl])\nXX = np.insert(XX, 0, 1, axis=1)\n\ndist_x = XX[:,1]\n#DNN_TD = dist_x/np.squeeze(DNN_model.predict(XX/mn))*3600\nGLM_TD = dist_x/gamma_results.predict(XX)*3600\ndf_ans= pd.DataFrame(GLM_TD, columns =['duration'])\n\n\ndf_ans.index.name = 'row_id'\ndf_ans.to_csv('answer.csv')\ndf_ans= pd.DataFrame(TD, columns =['duration'])\n\n\n# ## Extention and Further Idea\n# Here, we only use the vincenty, but by conteccting to google API and fidning the real distance between start and end point the preditor defenitlly can be improved. Also, here I only used 10% of data points becouse of the limitation on runnig the DNN. By using GPU or running over the cloud we can use all the samples. \n# \n# \n# \n# \n",
"import numpy as np\nimport pandas as pd\nfrom geopy.distance import vincenty\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom datetime import time\nimport statsmodels.api as sm\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.cross_validation import KFold\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers.normalization import BatchNormalization\nget_ipython().magic('matplotlib inline')\ndf_train = pd.read_csv('train.csv', index_col='row_id')\ndf_test = pd.read_csv('test.csv', index_col='row_id')\ndf_train.head()\n\n\ndef distance(row):\n source = row['start_lat'], row['start_lng']\n dest = row['end_lat'], row['end_lng']\n return vincenty(source, dest).miles\n\n\nManhattan = 40.7831, -73.9712\n\n\ndef pickup_to_MH(row):\n \"\"\"find the distance between pick up point and Manhattan center\"\"\"\n source = row['start_lat'], row['start_lng']\n return vincenty(source, Manhattan).miles\n\n\ndef dropoff_to_MH(row):\n \"\"\"find the distance between dropoff point and Manhattan center\"\"\"\n dest = row['end_lat'], row['end_lng']\n return vincenty(dest, Manhattan).miles\n\n\ndef day_of_week(ep):\n return datetime.fromtimestamp(ep).strftime('%A')\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\n\ndef time_of_day(ep):\n ref = datetime(2015, 1, 1, 0, 0, 0)\n sec = (datetime.fromtimestamp(ep) - ref).seconds\n return min(sec, 86400 - sec)\n\n\ndef year(ep):\n return datetime.fromtimestamp(ep).year\n\n\ndef add_features(df_train_s):\n DD = df_train_s['start_timestamp'].map(day_of_week)\n df_train_s['day'] = DD\n DD = pd.get_dummies(DD, prefix='day', drop_first=True)\n df_train_s = pd.concat([df_train_s, DD], axis=1)\n df_train_s['month'] = df_train_s['start_timestamp'].map(month)\n df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)\n df_train_s['distance'] = df_train_s.apply(lambda x: distance(x), axis=1)\n df_train_s['distance2'] = df_train_s['distance'] ** 2\n df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1)\n df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1)\n return df_train_s\n\n\nnp.random.seed(42)\ndf_train_s = df_train.sample(frac=0.01, replace=False)\ndf_train_s = add_features(df_train_s)\ndf_train_s['velocity'] = np.array(df_train_s['distance'] / (df_train_s[\n 'duration'] / 3600))\ndf_train_s.head()\ndf_test = add_features(df_test)\ndf_train_s = df_train_s[df_train_s['velocity'] < 90]\ndf_train_s = df_train_s[df_train_s['velocity'] > 0.5]\nfig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))\nax = df_train_s.plot.scatter('start_lat', 'start_lng', ax=axes[0], title=\n 'Start point of travel')\nax.set(xlabel='latitude', ylabel='longitude')\nax = df_train_s.plot.scatter('end_lng', 'end_lat', ax=axes[1], title=\n 'Destination of the travel')\nax.set(xlabel='latitude', ylabel='longitude')\nplt.show()\ndf_train_s[['distance', 'duration', 'velocity']].describe()\ndf_train_s['velocity'].hist(bins=1000, normed=True)\ncorr = df_train_s.corr()\nmask = np.zeros_like(corr, dtype=np.bool)\nmask[np.triu_indices_from(mask)] = True\nf, ax = plt.subplots(figsize=(18, 18))\ncmap = sns.diverging_palette(220, 10, as_cmap=True)\nsns.heatmap(corr, mask=mask, cmap=cmap, vmax=0.3, square=True, linewidths=\n 0.5, cbar_kws={'shrink': 0.5}, ax=ax)\nplt.show()\ndf_train_s.plot.scatter('distance', 'velocity')\ngr = df_train_s[['velocity', 'month']].groupby(by='month')\ngr.mean().plot.bar(yerr=gr.std())\ncl = list(set(df_train_s.keys()) - {'velocity', 'duration', 'day'})\nX = np.array(df_train_s[cl])\nX1 = np.insert(X, 0, 1, axis=1)\ny = np.array(df_train_s['velocity'])\nX_train, X_test, y_train, y_test = train_test_split(X1, y, test_size=0.2,\n random_state=42)\ndist_train = X_train[:, 1]\ndist_test = X_test[:, 1]\nlist(enumerate(cl))\ndist_train.mean()\nmodel_sk = LinearRegression()\nmodel_sk.fit(X_train, y_train)\nplt.figure(figsize=(12, 8))\nplt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)\nplt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')\nplt.xlim([-1, model_sk.coef_.shape[0]])\nplt.title('Linear model coefficients')\nplt.show()\nlinear_model = sm.OLS(y_train, X_train)\nlinear_results = linear_model.fit()\nprint(linear_results.summary())\ngamma_model = sm.GLM(y_train, X_train, family=sm.families.Gamma())\ngamma_results = gamma_model.fit()\nprint(gamma_results.summary())\nDNN_model = Sequential()\nDNN_model.add(Dense(100, input_dim=X_train.shape[1], init='uniform',\n activation='relu'))\nDNN_model.add(Dropout(0.5))\nDNN_model.add(Dense(50, init='uniform', activation='softmax'))\nDNN_model.add(Dropout(0.5))\nDNN_model.add(Dense(100, init='uniform', activation='relu'))\nDNN_model.add(Dropout(0.5))\nDNN_model.add(Dense(1, init='uniform', activation='relu'))\nDNN_model.summary()\nmn = X1.mean(axis=0)\nDNN_model.compile(loss='mean_absolute_error', optimizer='adam')\nhistory = DNN_model.fit(X_train / mn, y_train, validation_data=(X_test / mn,\n y_test), epochs=100, batch_size=100, verbose=2)\nplt.figure(figsize=(10, 8))\nplt.title('Dense model training', fontsize=12)\nplt.plot(history.history['loss'], label='Train')\nplt.plot(history.history['val_loss'], label='Test')\nplt.grid('on')\nplt.xlabel('Epoch', fontsize=12)\nplt.ylabel('loss', fontsize=12)\nplt.legend(loc='upper right')\npreds_test, preds_train = {}, {}\npreds_test['linear'] = linear_results.predict(X_test)\npreds_train['linear'] = linear_results.predict(X_train)\npreds_test['GLM'] = gamma_results.predict(X_test)\npreds_train['GLM'] = gamma_results.predict(X_train)\npreds_test['DL'] = np.squeeze(DNN_model.predict(X_test / mn))\npreds_train['DL'] = np.squeeze(DNN_model.predict(X_train / mn))\n\n\ndef mean_absolute_error(dist, y_true, y_pred):\n \"\"\"\n Args: \n dist(ndarray) : distance between pick up and drop off \n y_true(ndarray) : true velocity\n y_pred(ndarray) : the prediction value of velocity\n\n \"\"\"\n err = np.abs(dist / y_true - dist / y_pred)\n err = err[np.isfinite(err)]\n return np.mean(err) * 3600\n\n\ndef mean_absolute_percentage_error(dist, y_true, y_pred):\n \"\"\"\n Args: \n dist(ndarray) : distance between pick up and drop off \n y_true(ndarray) : true velocity\n y_pred(ndarray) : the prediction value of velocity\n\n \"\"\"\n err = np.abs(y_true / y_pred - 1)\n err = err[np.isfinite(err)]\n return np.mean(err) * 100\n\n\ndef evalute(dist, y_true, prediction):\n MAE, MAPE = {}, {}\n for kys, y_pred in prediction.items():\n MAE[kys] = mean_absolute_error(dist, y_true, y_pred)\n MAPE[kys] = mean_absolute_percentage_error(dist, y_true, y_pred)\n return MAE, MAPE\n\n\nMAE_train, MAPE_train = evalute(dist_train, y_train, preds_train)\nMAE_test, MAPE_test = evalute(dist_test, y_test, preds_test)\npd.DataFrame([MAE_test, MAE_train, MAPE_test, MAPE_train], index=[\n 'MAE_test', 'MAE_train', 'MAPE_test', 'MAPE_train']).transpose()\ndist_train.mean()\nXX = np.array(df_test[cl])\nXX = np.insert(XX, 0, 1, axis=1)\ndist_x = XX[:, 1]\nGLM_TD = dist_x / gamma_results.predict(XX) * 3600\ndf_ans = pd.DataFrame(GLM_TD, columns=['duration'])\ndf_ans.index.name = 'row_id'\ndf_ans.to_csv('answer.csv')\ndf_ans = pd.DataFrame(TD, columns=['duration'])\n",
"<import token>\nget_ipython().magic('matplotlib inline')\ndf_train = pd.read_csv('train.csv', index_col='row_id')\ndf_test = pd.read_csv('test.csv', index_col='row_id')\ndf_train.head()\n\n\ndef distance(row):\n source = row['start_lat'], row['start_lng']\n dest = row['end_lat'], row['end_lng']\n return vincenty(source, dest).miles\n\n\nManhattan = 40.7831, -73.9712\n\n\ndef pickup_to_MH(row):\n \"\"\"find the distance between pick up point and Manhattan center\"\"\"\n source = row['start_lat'], row['start_lng']\n return vincenty(source, Manhattan).miles\n\n\ndef dropoff_to_MH(row):\n \"\"\"find the distance between dropoff point and Manhattan center\"\"\"\n dest = row['end_lat'], row['end_lng']\n return vincenty(dest, Manhattan).miles\n\n\ndef day_of_week(ep):\n return datetime.fromtimestamp(ep).strftime('%A')\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\n\ndef time_of_day(ep):\n ref = datetime(2015, 1, 1, 0, 0, 0)\n sec = (datetime.fromtimestamp(ep) - ref).seconds\n return min(sec, 86400 - sec)\n\n\ndef year(ep):\n return datetime.fromtimestamp(ep).year\n\n\ndef add_features(df_train_s):\n DD = df_train_s['start_timestamp'].map(day_of_week)\n df_train_s['day'] = DD\n DD = pd.get_dummies(DD, prefix='day', drop_first=True)\n df_train_s = pd.concat([df_train_s, DD], axis=1)\n df_train_s['month'] = df_train_s['start_timestamp'].map(month)\n df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)\n df_train_s['distance'] = df_train_s.apply(lambda x: distance(x), axis=1)\n df_train_s['distance2'] = df_train_s['distance'] ** 2\n df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1)\n df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1)\n return df_train_s\n\n\nnp.random.seed(42)\ndf_train_s = df_train.sample(frac=0.01, replace=False)\ndf_train_s = add_features(df_train_s)\ndf_train_s['velocity'] = np.array(df_train_s['distance'] / (df_train_s[\n 'duration'] / 3600))\ndf_train_s.head()\ndf_test = add_features(df_test)\ndf_train_s = df_train_s[df_train_s['velocity'] < 90]\ndf_train_s = df_train_s[df_train_s['velocity'] > 0.5]\nfig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))\nax = df_train_s.plot.scatter('start_lat', 'start_lng', ax=axes[0], title=\n 'Start point of travel')\nax.set(xlabel='latitude', ylabel='longitude')\nax = df_train_s.plot.scatter('end_lng', 'end_lat', ax=axes[1], title=\n 'Destination of the travel')\nax.set(xlabel='latitude', ylabel='longitude')\nplt.show()\ndf_train_s[['distance', 'duration', 'velocity']].describe()\ndf_train_s['velocity'].hist(bins=1000, normed=True)\ncorr = df_train_s.corr()\nmask = np.zeros_like(corr, dtype=np.bool)\nmask[np.triu_indices_from(mask)] = True\nf, ax = plt.subplots(figsize=(18, 18))\ncmap = sns.diverging_palette(220, 10, as_cmap=True)\nsns.heatmap(corr, mask=mask, cmap=cmap, vmax=0.3, square=True, linewidths=\n 0.5, cbar_kws={'shrink': 0.5}, ax=ax)\nplt.show()\ndf_train_s.plot.scatter('distance', 'velocity')\ngr = df_train_s[['velocity', 'month']].groupby(by='month')\ngr.mean().plot.bar(yerr=gr.std())\ncl = list(set(df_train_s.keys()) - {'velocity', 'duration', 'day'})\nX = np.array(df_train_s[cl])\nX1 = np.insert(X, 0, 1, axis=1)\ny = np.array(df_train_s['velocity'])\nX_train, X_test, y_train, y_test = train_test_split(X1, y, test_size=0.2,\n random_state=42)\ndist_train = X_train[:, 1]\ndist_test = X_test[:, 1]\nlist(enumerate(cl))\ndist_train.mean()\nmodel_sk = LinearRegression()\nmodel_sk.fit(X_train, y_train)\nplt.figure(figsize=(12, 8))\nplt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)\nplt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')\nplt.xlim([-1, model_sk.coef_.shape[0]])\nplt.title('Linear model coefficients')\nplt.show()\nlinear_model = sm.OLS(y_train, X_train)\nlinear_results = linear_model.fit()\nprint(linear_results.summary())\ngamma_model = sm.GLM(y_train, X_train, family=sm.families.Gamma())\ngamma_results = gamma_model.fit()\nprint(gamma_results.summary())\nDNN_model = Sequential()\nDNN_model.add(Dense(100, input_dim=X_train.shape[1], init='uniform',\n activation='relu'))\nDNN_model.add(Dropout(0.5))\nDNN_model.add(Dense(50, init='uniform', activation='softmax'))\nDNN_model.add(Dropout(0.5))\nDNN_model.add(Dense(100, init='uniform', activation='relu'))\nDNN_model.add(Dropout(0.5))\nDNN_model.add(Dense(1, init='uniform', activation='relu'))\nDNN_model.summary()\nmn = X1.mean(axis=0)\nDNN_model.compile(loss='mean_absolute_error', optimizer='adam')\nhistory = DNN_model.fit(X_train / mn, y_train, validation_data=(X_test / mn,\n y_test), epochs=100, batch_size=100, verbose=2)\nplt.figure(figsize=(10, 8))\nplt.title('Dense model training', fontsize=12)\nplt.plot(history.history['loss'], label='Train')\nplt.plot(history.history['val_loss'], label='Test')\nplt.grid('on')\nplt.xlabel('Epoch', fontsize=12)\nplt.ylabel('loss', fontsize=12)\nplt.legend(loc='upper right')\npreds_test, preds_train = {}, {}\npreds_test['linear'] = linear_results.predict(X_test)\npreds_train['linear'] = linear_results.predict(X_train)\npreds_test['GLM'] = gamma_results.predict(X_test)\npreds_train['GLM'] = gamma_results.predict(X_train)\npreds_test['DL'] = np.squeeze(DNN_model.predict(X_test / mn))\npreds_train['DL'] = np.squeeze(DNN_model.predict(X_train / mn))\n\n\ndef mean_absolute_error(dist, y_true, y_pred):\n \"\"\"\n Args: \n dist(ndarray) : distance between pick up and drop off \n y_true(ndarray) : true velocity\n y_pred(ndarray) : the prediction value of velocity\n\n \"\"\"\n err = np.abs(dist / y_true - dist / y_pred)\n err = err[np.isfinite(err)]\n return np.mean(err) * 3600\n\n\ndef mean_absolute_percentage_error(dist, y_true, y_pred):\n \"\"\"\n Args: \n dist(ndarray) : distance between pick up and drop off \n y_true(ndarray) : true velocity\n y_pred(ndarray) : the prediction value of velocity\n\n \"\"\"\n err = np.abs(y_true / y_pred - 1)\n err = err[np.isfinite(err)]\n return np.mean(err) * 100\n\n\ndef evalute(dist, y_true, prediction):\n MAE, MAPE = {}, {}\n for kys, y_pred in prediction.items():\n MAE[kys] = mean_absolute_error(dist, y_true, y_pred)\n MAPE[kys] = mean_absolute_percentage_error(dist, y_true, y_pred)\n return MAE, MAPE\n\n\nMAE_train, MAPE_train = evalute(dist_train, y_train, preds_train)\nMAE_test, MAPE_test = evalute(dist_test, y_test, preds_test)\npd.DataFrame([MAE_test, MAE_train, MAPE_test, MAPE_train], index=[\n 'MAE_test', 'MAE_train', 'MAPE_test', 'MAPE_train']).transpose()\ndist_train.mean()\nXX = np.array(df_test[cl])\nXX = np.insert(XX, 0, 1, axis=1)\ndist_x = XX[:, 1]\nGLM_TD = dist_x / gamma_results.predict(XX) * 3600\ndf_ans = pd.DataFrame(GLM_TD, columns=['duration'])\ndf_ans.index.name = 'row_id'\ndf_ans.to_csv('answer.csv')\ndf_ans = pd.DataFrame(TD, columns=['duration'])\n",
"<import token>\nget_ipython().magic('matplotlib inline')\n<assignment token>\ndf_train.head()\n\n\ndef distance(row):\n source = row['start_lat'], row['start_lng']\n dest = row['end_lat'], row['end_lng']\n return vincenty(source, dest).miles\n\n\n<assignment token>\n\n\ndef pickup_to_MH(row):\n \"\"\"find the distance between pick up point and Manhattan center\"\"\"\n source = row['start_lat'], row['start_lng']\n return vincenty(source, Manhattan).miles\n\n\ndef dropoff_to_MH(row):\n \"\"\"find the distance between dropoff point and Manhattan center\"\"\"\n dest = row['end_lat'], row['end_lng']\n return vincenty(dest, Manhattan).miles\n\n\ndef day_of_week(ep):\n return datetime.fromtimestamp(ep).strftime('%A')\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\n\ndef time_of_day(ep):\n ref = datetime(2015, 1, 1, 0, 0, 0)\n sec = (datetime.fromtimestamp(ep) - ref).seconds\n return min(sec, 86400 - sec)\n\n\ndef year(ep):\n return datetime.fromtimestamp(ep).year\n\n\ndef add_features(df_train_s):\n DD = df_train_s['start_timestamp'].map(day_of_week)\n df_train_s['day'] = DD\n DD = pd.get_dummies(DD, prefix='day', drop_first=True)\n df_train_s = pd.concat([df_train_s, DD], axis=1)\n df_train_s['month'] = df_train_s['start_timestamp'].map(month)\n df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)\n df_train_s['distance'] = df_train_s.apply(lambda x: distance(x), axis=1)\n df_train_s['distance2'] = df_train_s['distance'] ** 2\n df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1)\n df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1)\n return df_train_s\n\n\nnp.random.seed(42)\n<assignment token>\ndf_train_s.head()\n<assignment token>\nax.set(xlabel='latitude', ylabel='longitude')\n<assignment token>\nax.set(xlabel='latitude', ylabel='longitude')\nplt.show()\ndf_train_s[['distance', 'duration', 'velocity']].describe()\ndf_train_s['velocity'].hist(bins=1000, normed=True)\n<assignment token>\nsns.heatmap(corr, mask=mask, cmap=cmap, vmax=0.3, square=True, linewidths=\n 0.5, cbar_kws={'shrink': 0.5}, ax=ax)\nplt.show()\ndf_train_s.plot.scatter('distance', 'velocity')\n<assignment token>\ngr.mean().plot.bar(yerr=gr.std())\n<assignment token>\nlist(enumerate(cl))\ndist_train.mean()\n<assignment token>\nmodel_sk.fit(X_train, y_train)\nplt.figure(figsize=(12, 8))\nplt.bar(np.arange(model_sk.coef_.shape[0]) - 0.4, model_sk.coef_)\nplt.xticks(np.arange(model_sk.coef_.shape[0]), cl, rotation='vertical')\nplt.xlim([-1, model_sk.coef_.shape[0]])\nplt.title('Linear model coefficients')\nplt.show()\n<assignment token>\nprint(linear_results.summary())\n<assignment token>\nprint(gamma_results.summary())\n<assignment token>\nDNN_model.add(Dense(100, input_dim=X_train.shape[1], init='uniform',\n activation='relu'))\nDNN_model.add(Dropout(0.5))\nDNN_model.add(Dense(50, init='uniform', activation='softmax'))\nDNN_model.add(Dropout(0.5))\nDNN_model.add(Dense(100, init='uniform', activation='relu'))\nDNN_model.add(Dropout(0.5))\nDNN_model.add(Dense(1, init='uniform', activation='relu'))\nDNN_model.summary()\n<assignment token>\nDNN_model.compile(loss='mean_absolute_error', optimizer='adam')\n<assignment token>\nplt.figure(figsize=(10, 8))\nplt.title('Dense model training', fontsize=12)\nplt.plot(history.history['loss'], label='Train')\nplt.plot(history.history['val_loss'], label='Test')\nplt.grid('on')\nplt.xlabel('Epoch', fontsize=12)\nplt.ylabel('loss', fontsize=12)\nplt.legend(loc='upper right')\n<assignment token>\n\n\ndef mean_absolute_error(dist, y_true, y_pred):\n \"\"\"\n Args: \n dist(ndarray) : distance between pick up and drop off \n y_true(ndarray) : true velocity\n y_pred(ndarray) : the prediction value of velocity\n\n \"\"\"\n err = np.abs(dist / y_true - dist / y_pred)\n err = err[np.isfinite(err)]\n return np.mean(err) * 3600\n\n\ndef mean_absolute_percentage_error(dist, y_true, y_pred):\n \"\"\"\n Args: \n dist(ndarray) : distance between pick up and drop off \n y_true(ndarray) : true velocity\n y_pred(ndarray) : the prediction value of velocity\n\n \"\"\"\n err = np.abs(y_true / y_pred - 1)\n err = err[np.isfinite(err)]\n return np.mean(err) * 100\n\n\ndef evalute(dist, y_true, prediction):\n MAE, MAPE = {}, {}\n for kys, y_pred in prediction.items():\n MAE[kys] = mean_absolute_error(dist, y_true, y_pred)\n MAPE[kys] = mean_absolute_percentage_error(dist, y_true, y_pred)\n return MAE, MAPE\n\n\n<assignment token>\npd.DataFrame([MAE_test, MAE_train, MAPE_test, MAPE_train], index=[\n 'MAE_test', 'MAE_train', 'MAPE_test', 'MAPE_train']).transpose()\ndist_train.mean()\n<assignment token>\ndf_ans.to_csv('answer.csv')\n<assignment token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef distance(row):\n source = row['start_lat'], row['start_lng']\n dest = row['end_lat'], row['end_lng']\n return vincenty(source, dest).miles\n\n\n<assignment token>\n\n\ndef pickup_to_MH(row):\n \"\"\"find the distance between pick up point and Manhattan center\"\"\"\n source = row['start_lat'], row['start_lng']\n return vincenty(source, Manhattan).miles\n\n\ndef dropoff_to_MH(row):\n \"\"\"find the distance between dropoff point and Manhattan center\"\"\"\n dest = row['end_lat'], row['end_lng']\n return vincenty(dest, Manhattan).miles\n\n\ndef day_of_week(ep):\n return datetime.fromtimestamp(ep).strftime('%A')\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\n\ndef time_of_day(ep):\n ref = datetime(2015, 1, 1, 0, 0, 0)\n sec = (datetime.fromtimestamp(ep) - ref).seconds\n return min(sec, 86400 - sec)\n\n\ndef year(ep):\n return datetime.fromtimestamp(ep).year\n\n\ndef add_features(df_train_s):\n DD = df_train_s['start_timestamp'].map(day_of_week)\n df_train_s['day'] = DD\n DD = pd.get_dummies(DD, prefix='day', drop_first=True)\n df_train_s = pd.concat([df_train_s, DD], axis=1)\n df_train_s['month'] = df_train_s['start_timestamp'].map(month)\n df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)\n df_train_s['distance'] = df_train_s.apply(lambda x: distance(x), axis=1)\n df_train_s['distance2'] = df_train_s['distance'] ** 2\n df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1)\n df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1)\n return df_train_s\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef mean_absolute_error(dist, y_true, y_pred):\n \"\"\"\n Args: \n dist(ndarray) : distance between pick up and drop off \n y_true(ndarray) : true velocity\n y_pred(ndarray) : the prediction value of velocity\n\n \"\"\"\n err = np.abs(dist / y_true - dist / y_pred)\n err = err[np.isfinite(err)]\n return np.mean(err) * 3600\n\n\ndef mean_absolute_percentage_error(dist, y_true, y_pred):\n \"\"\"\n Args: \n dist(ndarray) : distance between pick up and drop off \n y_true(ndarray) : true velocity\n y_pred(ndarray) : the prediction value of velocity\n\n \"\"\"\n err = np.abs(y_true / y_pred - 1)\n err = err[np.isfinite(err)]\n return np.mean(err) * 100\n\n\ndef evalute(dist, y_true, prediction):\n MAE, MAPE = {}, {}\n for kys, y_pred in prediction.items():\n MAE[kys] = mean_absolute_error(dist, y_true, y_pred)\n MAPE[kys] = mean_absolute_percentage_error(dist, y_true, y_pred)\n return MAE, MAPE\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef distance(row):\n source = row['start_lat'], row['start_lng']\n dest = row['end_lat'], row['end_lng']\n return vincenty(source, dest).miles\n\n\n<assignment token>\n\n\ndef pickup_to_MH(row):\n \"\"\"find the distance between pick up point and Manhattan center\"\"\"\n source = row['start_lat'], row['start_lng']\n return vincenty(source, Manhattan).miles\n\n\ndef dropoff_to_MH(row):\n \"\"\"find the distance between dropoff point and Manhattan center\"\"\"\n dest = row['end_lat'], row['end_lng']\n return vincenty(dest, Manhattan).miles\n\n\ndef day_of_week(ep):\n return datetime.fromtimestamp(ep).strftime('%A')\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\n\ndef time_of_day(ep):\n ref = datetime(2015, 1, 1, 0, 0, 0)\n sec = (datetime.fromtimestamp(ep) - ref).seconds\n return min(sec, 86400 - sec)\n\n\ndef year(ep):\n return datetime.fromtimestamp(ep).year\n\n\ndef add_features(df_train_s):\n DD = df_train_s['start_timestamp'].map(day_of_week)\n df_train_s['day'] = DD\n DD = pd.get_dummies(DD, prefix='day', drop_first=True)\n df_train_s = pd.concat([df_train_s, DD], axis=1)\n df_train_s['month'] = df_train_s['start_timestamp'].map(month)\n df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)\n df_train_s['distance'] = df_train_s.apply(lambda x: distance(x), axis=1)\n df_train_s['distance2'] = df_train_s['distance'] ** 2\n df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1)\n df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1)\n return df_train_s\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef mean_absolute_error(dist, y_true, y_pred):\n \"\"\"\n Args: \n dist(ndarray) : distance between pick up and drop off \n y_true(ndarray) : true velocity\n y_pred(ndarray) : the prediction value of velocity\n\n \"\"\"\n err = np.abs(dist / y_true - dist / y_pred)\n err = err[np.isfinite(err)]\n return np.mean(err) * 3600\n\n\n<function token>\n\n\ndef evalute(dist, y_true, prediction):\n MAE, MAPE = {}, {}\n for kys, y_pred in prediction.items():\n MAE[kys] = mean_absolute_error(dist, y_true, y_pred)\n MAPE[kys] = mean_absolute_percentage_error(dist, y_true, y_pred)\n return MAE, MAPE\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n\n\ndef pickup_to_MH(row):\n \"\"\"find the distance between pick up point and Manhattan center\"\"\"\n source = row['start_lat'], row['start_lng']\n return vincenty(source, Manhattan).miles\n\n\ndef dropoff_to_MH(row):\n \"\"\"find the distance between dropoff point and Manhattan center\"\"\"\n dest = row['end_lat'], row['end_lng']\n return vincenty(dest, Manhattan).miles\n\n\ndef day_of_week(ep):\n return datetime.fromtimestamp(ep).strftime('%A')\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\n\ndef time_of_day(ep):\n ref = datetime(2015, 1, 1, 0, 0, 0)\n sec = (datetime.fromtimestamp(ep) - ref).seconds\n return min(sec, 86400 - sec)\n\n\ndef year(ep):\n return datetime.fromtimestamp(ep).year\n\n\ndef add_features(df_train_s):\n DD = df_train_s['start_timestamp'].map(day_of_week)\n df_train_s['day'] = DD\n DD = pd.get_dummies(DD, prefix='day', drop_first=True)\n df_train_s = pd.concat([df_train_s, DD], axis=1)\n df_train_s['month'] = df_train_s['start_timestamp'].map(month)\n df_train_s['time_of_day'] = df_train_s['start_timestamp'].map(time_of_day)\n df_train_s['distance'] = df_train_s.apply(lambda x: distance(x), axis=1)\n df_train_s['distance2'] = df_train_s['distance'] ** 2\n df_train_s['pickup_MH'] = df_train_s.apply(pickup_to_MH, axis=1)\n df_train_s['dropoff_MH'] = df_train_s.apply(dropoff_to_MH, axis=1)\n return df_train_s\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef mean_absolute_error(dist, y_true, y_pred):\n \"\"\"\n Args: \n dist(ndarray) : distance between pick up and drop off \n y_true(ndarray) : true velocity\n y_pred(ndarray) : the prediction value of velocity\n\n \"\"\"\n err = np.abs(dist / y_true - dist / y_pred)\n err = err[np.isfinite(err)]\n return np.mean(err) * 3600\n\n\n<function token>\n\n\ndef evalute(dist, y_true, prediction):\n MAE, MAPE = {}, {}\n for kys, y_pred in prediction.items():\n MAE[kys] = mean_absolute_error(dist, y_true, y_pred)\n MAPE[kys] = mean_absolute_percentage_error(dist, y_true, y_pred)\n return MAE, MAPE\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n\n\ndef pickup_to_MH(row):\n \"\"\"find the distance between pick up point and Manhattan center\"\"\"\n source = row['start_lat'], row['start_lng']\n return vincenty(source, Manhattan).miles\n\n\ndef dropoff_to_MH(row):\n \"\"\"find the distance between dropoff point and Manhattan center\"\"\"\n dest = row['end_lat'], row['end_lng']\n return vincenty(dest, Manhattan).miles\n\n\ndef day_of_week(ep):\n return datetime.fromtimestamp(ep).strftime('%A')\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\n\ndef time_of_day(ep):\n ref = datetime(2015, 1, 1, 0, 0, 0)\n sec = (datetime.fromtimestamp(ep) - ref).seconds\n return min(sec, 86400 - sec)\n\n\ndef year(ep):\n return datetime.fromtimestamp(ep).year\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef mean_absolute_error(dist, y_true, y_pred):\n \"\"\"\n Args: \n dist(ndarray) : distance between pick up and drop off \n y_true(ndarray) : true velocity\n y_pred(ndarray) : the prediction value of velocity\n\n \"\"\"\n err = np.abs(dist / y_true - dist / y_pred)\n err = err[np.isfinite(err)]\n return np.mean(err) * 3600\n\n\n<function token>\n\n\ndef evalute(dist, y_true, prediction):\n MAE, MAPE = {}, {}\n for kys, y_pred in prediction.items():\n MAE[kys] = mean_absolute_error(dist, y_true, y_pred)\n MAPE[kys] = mean_absolute_percentage_error(dist, y_true, y_pred)\n return MAE, MAPE\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n\n\ndef pickup_to_MH(row):\n \"\"\"find the distance between pick up point and Manhattan center\"\"\"\n source = row['start_lat'], row['start_lng']\n return vincenty(source, Manhattan).miles\n\n\ndef dropoff_to_MH(row):\n \"\"\"find the distance between dropoff point and Manhattan center\"\"\"\n dest = row['end_lat'], row['end_lng']\n return vincenty(dest, Manhattan).miles\n\n\n<function token>\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\n\ndef time_of_day(ep):\n ref = datetime(2015, 1, 1, 0, 0, 0)\n sec = (datetime.fromtimestamp(ep) - ref).seconds\n return min(sec, 86400 - sec)\n\n\ndef year(ep):\n return datetime.fromtimestamp(ep).year\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef mean_absolute_error(dist, y_true, y_pred):\n \"\"\"\n Args: \n dist(ndarray) : distance between pick up and drop off \n y_true(ndarray) : true velocity\n y_pred(ndarray) : the prediction value of velocity\n\n \"\"\"\n err = np.abs(dist / y_true - dist / y_pred)\n err = err[np.isfinite(err)]\n return np.mean(err) * 3600\n\n\n<function token>\n\n\ndef evalute(dist, y_true, prediction):\n MAE, MAPE = {}, {}\n for kys, y_pred in prediction.items():\n MAE[kys] = mean_absolute_error(dist, y_true, y_pred)\n MAPE[kys] = mean_absolute_percentage_error(dist, y_true, y_pred)\n return MAE, MAPE\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n\n\ndef pickup_to_MH(row):\n \"\"\"find the distance between pick up point and Manhattan center\"\"\"\n source = row['start_lat'], row['start_lng']\n return vincenty(source, Manhattan).miles\n\n\ndef dropoff_to_MH(row):\n \"\"\"find the distance between dropoff point and Manhattan center\"\"\"\n dest = row['end_lat'], row['end_lng']\n return vincenty(dest, Manhattan).miles\n\n\n<function token>\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\n\ndef time_of_day(ep):\n ref = datetime(2015, 1, 1, 0, 0, 0)\n sec = (datetime.fromtimestamp(ep) - ref).seconds\n return min(sec, 86400 - sec)\n\n\ndef year(ep):\n return datetime.fromtimestamp(ep).year\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef evalute(dist, y_true, prediction):\n MAE, MAPE = {}, {}\n for kys, y_pred in prediction.items():\n MAE[kys] = mean_absolute_error(dist, y_true, y_pred)\n MAPE[kys] = mean_absolute_percentage_error(dist, y_true, y_pred)\n return MAE, MAPE\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n\n\ndef pickup_to_MH(row):\n \"\"\"find the distance between pick up point and Manhattan center\"\"\"\n source = row['start_lat'], row['start_lng']\n return vincenty(source, Manhattan).miles\n\n\n<function token>\n<function token>\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\n\ndef time_of_day(ep):\n ref = datetime(2015, 1, 1, 0, 0, 0)\n sec = (datetime.fromtimestamp(ep) - ref).seconds\n return min(sec, 86400 - sec)\n\n\ndef year(ep):\n return datetime.fromtimestamp(ep).year\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef evalute(dist, y_true, prediction):\n MAE, MAPE = {}, {}\n for kys, y_pred in prediction.items():\n MAE[kys] = mean_absolute_error(dist, y_true, y_pred)\n MAPE[kys] = mean_absolute_percentage_error(dist, y_true, y_pred)\n return MAE, MAPE\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n\n\ndef pickup_to_MH(row):\n \"\"\"find the distance between pick up point and Manhattan center\"\"\"\n source = row['start_lat'], row['start_lng']\n return vincenty(source, Manhattan).miles\n\n\n<function token>\n<function token>\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\n\ndef time_of_day(ep):\n ref = datetime(2015, 1, 1, 0, 0, 0)\n sec = (datetime.fromtimestamp(ep) - ref).seconds\n return min(sec, 86400 - sec)\n\n\ndef year(ep):\n return datetime.fromtimestamp(ep).year\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\n\ndef time_of_day(ep):\n ref = datetime(2015, 1, 1, 0, 0, 0)\n sec = (datetime.fromtimestamp(ep) - ref).seconds\n return min(sec, 86400 - sec)\n\n\ndef year(ep):\n return datetime.fromtimestamp(ep).year\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\n\n<function token>\n\n\ndef year(ep):\n return datetime.fromtimestamp(ep).year\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef month(ep):\n return datetime.fromtimestamp(ep).month\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n"
] | false |
1,019 |
ae84b449c8919f14954633b14993e6291501bc24
|
import requests
def login(username, password):
data = {'login':username,'pwd':password,'lang':''}
r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php', data=data, allow_redirects=False)
if r.headers['Location'] == '../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect':
return False
return True
# import pdb;pdb.set_trace()
if login("michelle", "michelle"):
print("Login Successfull[+]")
|
[
"import requests\n\ndef login(username, password):\n data = {'login':username,'pwd':password,'lang':''}\n r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php', data=data, allow_redirects=False)\n if r.headers['Location'] == '../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect':\n return False\n return True\n # import pdb;pdb.set_trace()\n\n\nif login(\"michelle\", \"michelle\"):\n print(\"Login Successfull[+]\")\n",
"import requests\n\n\ndef login(username, password):\n data = {'login': username, 'pwd': password, 'lang': ''}\n r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php',\n data=data, allow_redirects=False)\n if (r.headers['Location'] ==\n '../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect'\n ):\n return False\n return True\n\n\nif login('michelle', 'michelle'):\n print('Login Successfull[+]')\n",
"<import token>\n\n\ndef login(username, password):\n data = {'login': username, 'pwd': password, 'lang': ''}\n r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php',\n data=data, allow_redirects=False)\n if (r.headers['Location'] ==\n '../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect'\n ):\n return False\n return True\n\n\nif login('michelle', 'michelle'):\n print('Login Successfull[+]')\n",
"<import token>\n\n\ndef login(username, password):\n data = {'login': username, 'pwd': password, 'lang': ''}\n r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php',\n data=data, allow_redirects=False)\n if (r.headers['Location'] ==\n '../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect'\n ):\n return False\n return True\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
1,020 |
9aa54f1259aceb052cfba74cedcfadfe68778ebd
|
from IPython import embed
from selenium import webdriver
b = webdriver.Firefox()
embed()
|
[
"from IPython import embed\nfrom selenium import webdriver\n\nb = webdriver.Firefox()\nembed()\n",
"from IPython import embed\nfrom selenium import webdriver\nb = webdriver.Firefox()\nembed()\n",
"<import token>\nb = webdriver.Firefox()\nembed()\n",
"<import token>\n<assignment token>\nembed()\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
1,021 |
0bfb089556bfa253bf139f03cd3079ced962d858
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from sbpy.data import Phys
from sbpy import bib
@pytest.mark.remote_data
def test_from_sbdb():
""" test from_horizons method"""
# query one object
data = Phys.from_sbdb('Ceres')
assert len(data.table) == 1
# query several objects
data = Phys.from_sbdb([n+1 for n in range(5)])
assert len(data.table) == 5
|
[
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\n\nfrom sbpy.data import Phys\nfrom sbpy import bib\n\n\[email protected]_data\ndef test_from_sbdb():\n \"\"\" test from_horizons method\"\"\"\n\n # query one object\n data = Phys.from_sbdb('Ceres')\n assert len(data.table) == 1\n\n # query several objects\n data = Phys.from_sbdb([n+1 for n in range(5)])\n assert len(data.table) == 5\n",
"import pytest\nfrom sbpy.data import Phys\nfrom sbpy import bib\n\n\[email protected]_data\ndef test_from_sbdb():\n \"\"\" test from_horizons method\"\"\"\n data = Phys.from_sbdb('Ceres')\n assert len(data.table) == 1\n data = Phys.from_sbdb([(n + 1) for n in range(5)])\n assert len(data.table) == 5\n",
"<import token>\n\n\[email protected]_data\ndef test_from_sbdb():\n \"\"\" test from_horizons method\"\"\"\n data = Phys.from_sbdb('Ceres')\n assert len(data.table) == 1\n data = Phys.from_sbdb([(n + 1) for n in range(5)])\n assert len(data.table) == 5\n",
"<import token>\n<function token>\n"
] | false |
1,022 |
368151a134f987ed78c8048521137672530b5cce
|
# KeyLogger.py
# show a character key when pressed without using Enter key
# hide the Tkinter GUI window, only console shows
import Tkinter as tk
def key(event):
if event.keysym == 'Escape':
root.destroy()
print event.char, event.keysym
root = tk.Tk()
print "Press a key (Escape key to exit):"
root.bind_all('<Key>', key)
# don't show the tk window
root.withdraw()
root.mainloop()
|
[
"# KeyLogger.py\n# show a character key when pressed without using Enter key\n# hide the Tkinter GUI window, only console shows\n\nimport Tkinter as tk\n\ndef key(event):\n if event.keysym == 'Escape':\n root.destroy()\n print event.char, event.keysym\n\nroot = tk.Tk()\nprint \"Press a key (Escape key to exit):\"\nroot.bind_all('<Key>', key)\n# don't show the tk window\nroot.withdraw()\nroot.mainloop()\n"
] | true |
1,023 |
70aba6c94b7050113adf7ae48bd4e13aa9a34587
|
import typ
@typ.typ(items=[int])
def gnome_sort(items):
"""
>>> gnome_sort([])
[]
>>> gnome_sort([1])
[1]
>>> gnome_sort([2,1])
[1, 2]
>>> gnome_sort([1,2])
[1, 2]
>>> gnome_sort([1,2,2])
[1, 2, 2]
"""
i = 0
n = len(items)
while i < n:
if i and items[i] < items[i-1]:
items[i], items[i-1] = items[i-1], items[i]
i -= 1
else:
i += 1
return items
|
[
"import typ\n\[email protected](items=[int])\ndef gnome_sort(items):\n \"\"\"\n >>> gnome_sort([])\n []\n >>> gnome_sort([1])\n [1]\n >>> gnome_sort([2,1])\n [1, 2]\n >>> gnome_sort([1,2])\n [1, 2]\n >>> gnome_sort([1,2,2])\n [1, 2, 2]\n \"\"\"\n i = 0\n n = len(items)\n while i < n:\n if i and items[i] < items[i-1]:\n items[i], items[i-1] = items[i-1], items[i]\n i -= 1\n else:\n i += 1\n return items\n \n",
"import typ\n\n\[email protected](items=[int])\ndef gnome_sort(items):\n \"\"\"\n >>> gnome_sort([])\n []\n >>> gnome_sort([1])\n [1]\n >>> gnome_sort([2,1])\n [1, 2]\n >>> gnome_sort([1,2])\n [1, 2]\n >>> gnome_sort([1,2,2])\n [1, 2, 2]\n \"\"\"\n i = 0\n n = len(items)\n while i < n:\n if i and items[i] < items[i - 1]:\n items[i], items[i - 1] = items[i - 1], items[i]\n i -= 1\n else:\n i += 1\n return items\n",
"<import token>\n\n\[email protected](items=[int])\ndef gnome_sort(items):\n \"\"\"\n >>> gnome_sort([])\n []\n >>> gnome_sort([1])\n [1]\n >>> gnome_sort([2,1])\n [1, 2]\n >>> gnome_sort([1,2])\n [1, 2]\n >>> gnome_sort([1,2,2])\n [1, 2, 2]\n \"\"\"\n i = 0\n n = len(items)\n while i < n:\n if i and items[i] < items[i - 1]:\n items[i], items[i - 1] = items[i - 1], items[i]\n i -= 1\n else:\n i += 1\n return items\n",
"<import token>\n<function token>\n"
] | false |
1,024 |
1ead23c6ea4e66b24e60598ae20606e24fa41482
|
# SPDX-FileCopyrightText: 2019-2021 Python201 Contributors
# SPDX-License-Identifier: MIT
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
import datetime
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
year = datetime.datetime.now().year
project = 'python201'
copyright = f'2019-{year} Geoffrey Lentner, 2018 Ashwin Srinath'
author = 'Geoffrey Lentner, Ashwin Srinath'
version = '0.0.1'
release = '0.0.1'
# -- General configuration ---------------------------------------------------
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinx.ext.autodoc',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
html_theme = 'pydata_sphinx_theme'
html_logo = '_static/logo.png'
html_favicon = '_static/favicon.ico'
html_static_path = ['']
html_theme_options = {
'external_links': [],
'github_url': 'https://github.com/glentner/python201',
}
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {}
latex_documents = [
(master_doc, 'python-201.tex', 'python-201 Documentation',
'Geoffrey Lentner, Ashwin Srinath', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# manual pages options
man_pages = [(
'manpage',
'cumprod',
'Compute cumulative product of a sequence of numbers.',
'Geoffrey Lentner <[email protected]>.',
'1'
),
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'python-201', 'python-201 Documentation',
author, 'python-201', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
intersphinx_mapping = {'https://docs.python.org/3/': None}
# export variables with epilogue
rst_epilog = f"""
.. |release| replace:: {release}
.. |copyright| replace:: {copyright}
"""
|
[
"# SPDX-FileCopyrightText: 2019-2021 Python201 Contributors\n# SPDX-License-Identifier: MIT\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\nimport datetime\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nyear = datetime.datetime.now().year\nproject = 'python201'\ncopyright = f'2019-{year} Geoffrey Lentner, 2018 Ashwin Srinath'\nauthor = 'Geoffrey Lentner, Ashwin Srinath'\n\nversion = '0.0.1'\nrelease = '0.0.1'\n\n\n# -- General configuration ---------------------------------------------------\n\nextensions = [\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.autodoc',\n 'IPython.sphinxext.ipython_directive',\n 'IPython.sphinxext.ipython_console_highlighting',\n]\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# -- Options for HTML output -------------------------------------------------\n\nhtml_theme = 'pydata_sphinx_theme'\nhtml_logo = '_static/logo.png'\nhtml_favicon = '_static/favicon.ico'\nhtml_static_path = ['']\nhtml_theme_options = {\n 'external_links': [],\n 'github_url': 'https://github.com/glentner/python201',\n}\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {}\nlatex_documents = [\n (master_doc, 'python-201.tex', 'python-201 Documentation',\n 'Geoffrey Lentner, Ashwin Srinath', 'manual'),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# manual pages options\nman_pages = [(\n 'manpage',\n 'cumprod',\n 'Compute cumulative product of a sequence of numbers.',\n 'Geoffrey Lentner <[email protected]>.',\n '1'\n),\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'python-201', 'python-201 Documentation',\n author, 'python-201', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n# -- Extension configuration -------------------------------------------------\nintersphinx_mapping = {'https://docs.python.org/3/': None}\n\n# export variables with epilogue\nrst_epilog = f\"\"\"\n.. |release| replace:: {release}\n.. |copyright| replace:: {copyright}\n\"\"\"\n",
"import datetime\nyear = datetime.datetime.now().year\nproject = 'python201'\ncopyright = f'2019-{year} Geoffrey Lentner, 2018 Ashwin Srinath'\nauthor = 'Geoffrey Lentner, Ashwin Srinath'\nversion = '0.0.1'\nrelease = '0.0.1'\nextensions = ['sphinx.ext.intersphinx', 'sphinx.ext.mathjax',\n 'sphinx.ext.githubpages', 'sphinx.ext.autodoc',\n 'IPython.sphinxext.ipython_directive',\n 'IPython.sphinxext.ipython_console_highlighting']\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nlanguage = None\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\npygments_style = 'sphinx'\nhtml_theme = 'pydata_sphinx_theme'\nhtml_logo = '_static/logo.png'\nhtml_favicon = '_static/favicon.ico'\nhtml_static_path = ['']\nhtml_theme_options = {'external_links': [], 'github_url':\n 'https://github.com/glentner/python201'}\nlatex_elements = {}\nlatex_documents = [(master_doc, 'python-201.tex',\n 'python-201 Documentation', 'Geoffrey Lentner, Ashwin Srinath', 'manual')]\nman_pages = [('manpage', 'cumprod',\n 'Compute cumulative product of a sequence of numbers.',\n 'Geoffrey Lentner <[email protected]>.', '1')]\ntexinfo_documents = [(master_doc, 'python-201', 'python-201 Documentation',\n author, 'python-201', 'One line description of project.', 'Miscellaneous')]\nintersphinx_mapping = {'https://docs.python.org/3/': None}\nrst_epilog = f\"\"\"\n.. |release| replace:: {release}\n.. |copyright| replace:: {copyright}\n\"\"\"\n",
"<import token>\nyear = datetime.datetime.now().year\nproject = 'python201'\ncopyright = f'2019-{year} Geoffrey Lentner, 2018 Ashwin Srinath'\nauthor = 'Geoffrey Lentner, Ashwin Srinath'\nversion = '0.0.1'\nrelease = '0.0.1'\nextensions = ['sphinx.ext.intersphinx', 'sphinx.ext.mathjax',\n 'sphinx.ext.githubpages', 'sphinx.ext.autodoc',\n 'IPython.sphinxext.ipython_directive',\n 'IPython.sphinxext.ipython_console_highlighting']\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nlanguage = None\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\npygments_style = 'sphinx'\nhtml_theme = 'pydata_sphinx_theme'\nhtml_logo = '_static/logo.png'\nhtml_favicon = '_static/favicon.ico'\nhtml_static_path = ['']\nhtml_theme_options = {'external_links': [], 'github_url':\n 'https://github.com/glentner/python201'}\nlatex_elements = {}\nlatex_documents = [(master_doc, 'python-201.tex',\n 'python-201 Documentation', 'Geoffrey Lentner, Ashwin Srinath', 'manual')]\nman_pages = [('manpage', 'cumprod',\n 'Compute cumulative product of a sequence of numbers.',\n 'Geoffrey Lentner <[email protected]>.', '1')]\ntexinfo_documents = [(master_doc, 'python-201', 'python-201 Documentation',\n author, 'python-201', 'One line description of project.', 'Miscellaneous')]\nintersphinx_mapping = {'https://docs.python.org/3/': None}\nrst_epilog = f\"\"\"\n.. |release| replace:: {release}\n.. |copyright| replace:: {copyright}\n\"\"\"\n",
"<import token>\n<assignment token>\n"
] | false |
1,025 |
8fee548466abf6d35ea180f8de4e52a9b8902d3f
|
import os
import math
from collections import defaultdict
__author__ = 'steven'
question='qb'
fs={'t1','small.in','large'}
def getmincost(n,c,f,x):
t=0.0
for i in range(0,n):
t+=1/(2+f*i)
t=t*c
t+=x/(2+f*n)
ct=getmincostnshift(n,c,f,x)
return min(t,ct);
def getmincostnshift(n,c,f,x):
t=0.0
n-=1;
for i in range(0,n):
t+=1/(2+f*i)
t=t*c
t+=x/(2+f*n)
return t
def getminn(c,f,x):
return int(math.ceil((x*f-2*c)/(c*f)))
def solver(c,f,x):
if (x*f-2*c)<0:
return x/2
minn=getminn(c,f,x)
return getmincost(minn,c,f,x)
for s in fs:
print question+s
f='./'+question+s
if os.path.isfile('./'+question+s):
ls=open(f)
noq=(int)(ls.readline())
fout=open(question+s+'-a','w')
print noq
for i in range(0,noq):
fa=ls.readline()
fa=fa.split();
c, f, x=[float(s) for s in fa]
fout.write('Case #%d: %f\n'%(i+1,solver(c,f,x)))
#Case #1: 7
#Case #2: Bad magician!
#Case #3: Volunteer cheated!
|
[
"import os\nimport math\nfrom collections import defaultdict\n__author__ = 'steven'\n\nquestion='qb'\nfs={'t1','small.in','large'}\ndef getmincost(n,c,f,x):\n t=0.0\n\n for i in range(0,n):\n t+=1/(2+f*i)\n t=t*c\n t+=x/(2+f*n)\n ct=getmincostnshift(n,c,f,x)\n return min(t,ct);\n\ndef getmincostnshift(n,c,f,x):\n t=0.0\n n-=1;\n\n for i in range(0,n):\n t+=1/(2+f*i)\n t=t*c\n t+=x/(2+f*n)\n return t\ndef getminn(c,f,x):\n return int(math.ceil((x*f-2*c)/(c*f)))\ndef solver(c,f,x):\n if (x*f-2*c)<0:\n return x/2\n minn=getminn(c,f,x)\n return getmincost(minn,c,f,x)\n\n\nfor s in fs:\n print question+s\n f='./'+question+s\n if os.path.isfile('./'+question+s):\n ls=open(f)\n noq=(int)(ls.readline())\n fout=open(question+s+'-a','w')\n print noq\n for i in range(0,noq):\n fa=ls.readline()\n fa=fa.split();\n c, f, x=[float(s) for s in fa]\n fout.write('Case #%d: %f\\n'%(i+1,solver(c,f,x)))\n\n#Case #1: 7\n#Case #2: Bad magician!\n#Case #3: Volunteer cheated!\n\n\n"
] | true |
1,026 |
f2e6d23e6d8c5aa6e80a652dc6cb8bda45824d0c
|
"""Code for constructing and executing Tasks"""
from bcipy.tasks.rsvp.calibration.alert_tone_calibration import RSVPAlertToneCalibrationTask
from bcipy.tasks.rsvp.calibration.inter_sequence_feedback_calibration import (
RSVPInterSequenceFeedbackCalibration
)
from bcipy.tasks.rsvp.calibration.calibration import RSVPCalibrationTask
from bcipy.tasks.rsvp.copy_phrase import RSVPCopyPhraseTask
from bcipy.tasks.rsvp.icon_to_icon import RSVPIconToIconTask
from bcipy.tasks.rsvp.calibration.timing_verification import RSVPTimingVerificationCalibration
from bcipy.tasks.task import Task
from bcipy.tasks.exceptions import TaskRegistryException
from bcipy.tasks.task_registry import ExperimentType
def make_task(display_window, daq, exp_type, parameters, file_save,
signal_model=None, language_model=None, fake=True,
auc_filename=None) -> Task:
"""Creates a Task based on the provided parameters.
Parameters:
-----------
display_window: pyschopy Window
daq: DataAcquisitionClient
exp_type: ExperimentType
parameters: dict
file_save: str - path to file in which to save data
signal_model
language_model - language model
fake: boolean - true if eeg stream is randomly generated
auc_filename: str
Returns:
--------
Task instance
"""
# NORMAL RSVP MODES
if exp_type is ExperimentType.RSVP_CALIBRATION:
return RSVPCalibrationTask(
display_window, daq, parameters, file_save)
if exp_type is ExperimentType.RSVP_COPY_PHRASE:
return RSVPCopyPhraseTask(
display_window, daq, parameters, file_save, signal_model,
language_model, fake=fake)
# ICON TASKS
if exp_type is ExperimentType.RSVP_ICON_TO_ICON:
return RSVPIconToIconTask(display_window, daq,
parameters, file_save, signal_model,
language_model, fake, False, auc_filename)
if exp_type is ExperimentType.RSVP_ICON_TO_WORD:
# pylint: disable=fixme
# TODO: consider a new class for this scenario.
return RSVPIconToIconTask(display_window, daq,
parameters, file_save, signal_model,
language_model, fake, True, auc_filename)
# CALIBRATION FEEDBACK TASKS
if exp_type is ExperimentType.RSVP_ALERT_TONE_CALIBRATION:
return RSVPAlertToneCalibrationTask(
display_window, daq, parameters, file_save)
if exp_type is ExperimentType.RSVP_INTER_SEQUENCE_FEEDBACK_CALIBRATION:
return RSVPInterSequenceFeedbackCalibration(
display_window, daq, parameters, file_save)
if exp_type is ExperimentType.RSVP_TIMING_VERIFICATION_CALIBRATION:
return RSVPTimingVerificationCalibration(display_window, daq,
parameters, file_save)
raise TaskRegistryException(
'The provided experiment type is not registered.')
def start_task(display_window, daq, exp_type, parameters, file_save,
signal_model=None, language_model=None, fake=True, auc_filename=None):
"""Creates a Task and starts execution."""
task = make_task(display_window, daq, exp_type, parameters, file_save,
signal_model, language_model, fake, auc_filename)
task.execute()
|
[
"\"\"\"Code for constructing and executing Tasks\"\"\"\nfrom bcipy.tasks.rsvp.calibration.alert_tone_calibration import RSVPAlertToneCalibrationTask\nfrom bcipy.tasks.rsvp.calibration.inter_sequence_feedback_calibration import (\n RSVPInterSequenceFeedbackCalibration\n)\nfrom bcipy.tasks.rsvp.calibration.calibration import RSVPCalibrationTask\nfrom bcipy.tasks.rsvp.copy_phrase import RSVPCopyPhraseTask\nfrom bcipy.tasks.rsvp.icon_to_icon import RSVPIconToIconTask\nfrom bcipy.tasks.rsvp.calibration.timing_verification import RSVPTimingVerificationCalibration\n\nfrom bcipy.tasks.task import Task\nfrom bcipy.tasks.exceptions import TaskRegistryException\nfrom bcipy.tasks.task_registry import ExperimentType\n\n\ndef make_task(display_window, daq, exp_type, parameters, file_save,\n signal_model=None, language_model=None, fake=True,\n auc_filename=None) -> Task:\n \"\"\"Creates a Task based on the provided parameters.\n\n Parameters:\n -----------\n display_window: pyschopy Window\n daq: DataAcquisitionClient\n exp_type: ExperimentType\n parameters: dict\n file_save: str - path to file in which to save data\n signal_model\n language_model - language model\n fake: boolean - true if eeg stream is randomly generated\n auc_filename: str\n Returns:\n --------\n Task instance\n \"\"\"\n\n # NORMAL RSVP MODES\n if exp_type is ExperimentType.RSVP_CALIBRATION:\n return RSVPCalibrationTask(\n display_window, daq, parameters, file_save)\n\n if exp_type is ExperimentType.RSVP_COPY_PHRASE:\n return RSVPCopyPhraseTask(\n display_window, daq, parameters, file_save, signal_model,\n language_model, fake=fake)\n\n # ICON TASKS\n if exp_type is ExperimentType.RSVP_ICON_TO_ICON:\n return RSVPIconToIconTask(display_window, daq,\n parameters, file_save, signal_model,\n language_model, fake, False, auc_filename)\n\n if exp_type is ExperimentType.RSVP_ICON_TO_WORD:\n # pylint: disable=fixme\n # TODO: consider a new class for this scenario.\n return RSVPIconToIconTask(display_window, daq,\n parameters, file_save, signal_model,\n language_model, fake, True, auc_filename)\n\n # CALIBRATION FEEDBACK TASKS\n if exp_type is ExperimentType.RSVP_ALERT_TONE_CALIBRATION:\n return RSVPAlertToneCalibrationTask(\n display_window, daq, parameters, file_save)\n\n if exp_type is ExperimentType.RSVP_INTER_SEQUENCE_FEEDBACK_CALIBRATION:\n return RSVPInterSequenceFeedbackCalibration(\n display_window, daq, parameters, file_save)\n\n if exp_type is ExperimentType.RSVP_TIMING_VERIFICATION_CALIBRATION:\n return RSVPTimingVerificationCalibration(display_window, daq,\n parameters, file_save)\n raise TaskRegistryException(\n 'The provided experiment type is not registered.')\n\n\ndef start_task(display_window, daq, exp_type, parameters, file_save,\n signal_model=None, language_model=None, fake=True, auc_filename=None):\n \"\"\"Creates a Task and starts execution.\"\"\"\n task = make_task(display_window, daq, exp_type, parameters, file_save,\n signal_model, language_model, fake, auc_filename)\n task.execute()\n",
"<docstring token>\nfrom bcipy.tasks.rsvp.calibration.alert_tone_calibration import RSVPAlertToneCalibrationTask\nfrom bcipy.tasks.rsvp.calibration.inter_sequence_feedback_calibration import RSVPInterSequenceFeedbackCalibration\nfrom bcipy.tasks.rsvp.calibration.calibration import RSVPCalibrationTask\nfrom bcipy.tasks.rsvp.copy_phrase import RSVPCopyPhraseTask\nfrom bcipy.tasks.rsvp.icon_to_icon import RSVPIconToIconTask\nfrom bcipy.tasks.rsvp.calibration.timing_verification import RSVPTimingVerificationCalibration\nfrom bcipy.tasks.task import Task\nfrom bcipy.tasks.exceptions import TaskRegistryException\nfrom bcipy.tasks.task_registry import ExperimentType\n\n\ndef make_task(display_window, daq, exp_type, parameters, file_save,\n signal_model=None, language_model=None, fake=True, auc_filename=None\n ) ->Task:\n \"\"\"Creates a Task based on the provided parameters.\n\n Parameters:\n -----------\n display_window: pyschopy Window\n daq: DataAcquisitionClient\n exp_type: ExperimentType\n parameters: dict\n file_save: str - path to file in which to save data\n signal_model\n language_model - language model\n fake: boolean - true if eeg stream is randomly generated\n auc_filename: str\n Returns:\n --------\n Task instance\n \"\"\"\n if exp_type is ExperimentType.RSVP_CALIBRATION:\n return RSVPCalibrationTask(display_window, daq, parameters, file_save)\n if exp_type is ExperimentType.RSVP_COPY_PHRASE:\n return RSVPCopyPhraseTask(display_window, daq, parameters,\n file_save, signal_model, language_model, fake=fake)\n if exp_type is ExperimentType.RSVP_ICON_TO_ICON:\n return RSVPIconToIconTask(display_window, daq, parameters,\n file_save, signal_model, language_model, fake, False, auc_filename)\n if exp_type is ExperimentType.RSVP_ICON_TO_WORD:\n return RSVPIconToIconTask(display_window, daq, parameters,\n file_save, signal_model, language_model, fake, True, auc_filename)\n if exp_type is ExperimentType.RSVP_ALERT_TONE_CALIBRATION:\n return RSVPAlertToneCalibrationTask(display_window, daq, parameters,\n file_save)\n if exp_type is ExperimentType.RSVP_INTER_SEQUENCE_FEEDBACK_CALIBRATION:\n return RSVPInterSequenceFeedbackCalibration(display_window, daq,\n parameters, file_save)\n if exp_type is ExperimentType.RSVP_TIMING_VERIFICATION_CALIBRATION:\n return RSVPTimingVerificationCalibration(display_window, daq,\n parameters, file_save)\n raise TaskRegistryException(\n 'The provided experiment type is not registered.')\n\n\ndef start_task(display_window, daq, exp_type, parameters, file_save,\n signal_model=None, language_model=None, fake=True, auc_filename=None):\n \"\"\"Creates a Task and starts execution.\"\"\"\n task = make_task(display_window, daq, exp_type, parameters, file_save,\n signal_model, language_model, fake, auc_filename)\n task.execute()\n",
"<docstring token>\n<import token>\n\n\ndef make_task(display_window, daq, exp_type, parameters, file_save,\n signal_model=None, language_model=None, fake=True, auc_filename=None\n ) ->Task:\n \"\"\"Creates a Task based on the provided parameters.\n\n Parameters:\n -----------\n display_window: pyschopy Window\n daq: DataAcquisitionClient\n exp_type: ExperimentType\n parameters: dict\n file_save: str - path to file in which to save data\n signal_model\n language_model - language model\n fake: boolean - true if eeg stream is randomly generated\n auc_filename: str\n Returns:\n --------\n Task instance\n \"\"\"\n if exp_type is ExperimentType.RSVP_CALIBRATION:\n return RSVPCalibrationTask(display_window, daq, parameters, file_save)\n if exp_type is ExperimentType.RSVP_COPY_PHRASE:\n return RSVPCopyPhraseTask(display_window, daq, parameters,\n file_save, signal_model, language_model, fake=fake)\n if exp_type is ExperimentType.RSVP_ICON_TO_ICON:\n return RSVPIconToIconTask(display_window, daq, parameters,\n file_save, signal_model, language_model, fake, False, auc_filename)\n if exp_type is ExperimentType.RSVP_ICON_TO_WORD:\n return RSVPIconToIconTask(display_window, daq, parameters,\n file_save, signal_model, language_model, fake, True, auc_filename)\n if exp_type is ExperimentType.RSVP_ALERT_TONE_CALIBRATION:\n return RSVPAlertToneCalibrationTask(display_window, daq, parameters,\n file_save)\n if exp_type is ExperimentType.RSVP_INTER_SEQUENCE_FEEDBACK_CALIBRATION:\n return RSVPInterSequenceFeedbackCalibration(display_window, daq,\n parameters, file_save)\n if exp_type is ExperimentType.RSVP_TIMING_VERIFICATION_CALIBRATION:\n return RSVPTimingVerificationCalibration(display_window, daq,\n parameters, file_save)\n raise TaskRegistryException(\n 'The provided experiment type is not registered.')\n\n\ndef start_task(display_window, daq, exp_type, parameters, file_save,\n signal_model=None, language_model=None, fake=True, auc_filename=None):\n \"\"\"Creates a Task and starts execution.\"\"\"\n task = make_task(display_window, daq, exp_type, parameters, file_save,\n signal_model, language_model, fake, auc_filename)\n task.execute()\n",
"<docstring token>\n<import token>\n\n\ndef make_task(display_window, daq, exp_type, parameters, file_save,\n signal_model=None, language_model=None, fake=True, auc_filename=None\n ) ->Task:\n \"\"\"Creates a Task based on the provided parameters.\n\n Parameters:\n -----------\n display_window: pyschopy Window\n daq: DataAcquisitionClient\n exp_type: ExperimentType\n parameters: dict\n file_save: str - path to file in which to save data\n signal_model\n language_model - language model\n fake: boolean - true if eeg stream is randomly generated\n auc_filename: str\n Returns:\n --------\n Task instance\n \"\"\"\n if exp_type is ExperimentType.RSVP_CALIBRATION:\n return RSVPCalibrationTask(display_window, daq, parameters, file_save)\n if exp_type is ExperimentType.RSVP_COPY_PHRASE:\n return RSVPCopyPhraseTask(display_window, daq, parameters,\n file_save, signal_model, language_model, fake=fake)\n if exp_type is ExperimentType.RSVP_ICON_TO_ICON:\n return RSVPIconToIconTask(display_window, daq, parameters,\n file_save, signal_model, language_model, fake, False, auc_filename)\n if exp_type is ExperimentType.RSVP_ICON_TO_WORD:\n return RSVPIconToIconTask(display_window, daq, parameters,\n file_save, signal_model, language_model, fake, True, auc_filename)\n if exp_type is ExperimentType.RSVP_ALERT_TONE_CALIBRATION:\n return RSVPAlertToneCalibrationTask(display_window, daq, parameters,\n file_save)\n if exp_type is ExperimentType.RSVP_INTER_SEQUENCE_FEEDBACK_CALIBRATION:\n return RSVPInterSequenceFeedbackCalibration(display_window, daq,\n parameters, file_save)\n if exp_type is ExperimentType.RSVP_TIMING_VERIFICATION_CALIBRATION:\n return RSVPTimingVerificationCalibration(display_window, daq,\n parameters, file_save)\n raise TaskRegistryException(\n 'The provided experiment type is not registered.')\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n"
] | false |
1,027 |
e59e60b0a4b7deca9c510bd6b9c58636c6d34c80
|
l={1,2,3,4}
try:
print(l)
s=len(l)
if s>5:
raise TypeError
print(d[2])
except TypeError:
print("Error!!!length should be less than or equals to 5")
except NameError:
print("index out of range")
else:
for i in l:
print(i)
finally:
print("execution done!!!!!!")
|
[
"\nl={1,2,3,4}\ntry:\n\tprint(l)\n\ts=len(l)\n\tif s>5:\n\t\traise TypeError\n\tprint(d[2])\n\nexcept TypeError:\n\tprint(\"Error!!!length should be less than or equals to 5\")\nexcept NameError:\n\tprint(\"index out of range\")\nelse:\n\tfor i in l:\n\t\tprint(i)\nfinally:\n\tprint(\"execution done!!!!!!\")",
"l = {1, 2, 3, 4}\ntry:\n print(l)\n s = len(l)\n if s > 5:\n raise TypeError\n print(d[2])\nexcept TypeError:\n print('Error!!!length should be less than or equals to 5')\nexcept NameError:\n print('index out of range')\nelse:\n for i in l:\n print(i)\nfinally:\n print('execution done!!!!!!')\n",
"<assignment token>\ntry:\n print(l)\n s = len(l)\n if s > 5:\n raise TypeError\n print(d[2])\nexcept TypeError:\n print('Error!!!length should be less than or equals to 5')\nexcept NameError:\n print('index out of range')\nelse:\n for i in l:\n print(i)\nfinally:\n print('execution done!!!!!!')\n",
"<assignment token>\n<code token>\n"
] | false |
1,028 |
c0503536672aa824eaf0d19b9d4b5431ef910432
|
#!/usr/bin/env python
# encoding: utf-8
import os
import argparse
import coaddBatchCutout as cbc
def run(args):
min = -0.0
max = 0.5
Q = 10
if os.path.isfile(args.incat):
cbc.coaddBatchCutFull(args.root, args.incat,
filter=args.filter,
idField=args.idField,
prefix=args.prefix,
zCutoutSize=args.zCutout,
zField=args.zField,
onlyColor=args.onlyColor,
noColor=args.noColor,
saveSrc=args.saveSrc,
makeDir=args.makeDir,
raField=args.raField,
decField=args.decField)
else:
raise Exception("### Can not find the input catalog: %s" % args.incat)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("root", help="Root directory of data repository")
parser.add_argument("incat", help="The input catalog for cutout")
parser.add_argument("-s", '--size', dest='size', type=int,
help="Half size of the cutout box", default=200)
parser.add_argument('-f', '--filter', dest='filter', help="Filter",
default='HSC-I')
parser.add_argument('-cf', '--color-filters', dest='colorFilters',
help="Choice of filters for color images", default='riz')
parser.add_argument('-sf', '--size-field', dest='sizeField',
help="Column name for cutout size", default='cutout_size')
parser.add_argument('-info1', '--infoField1', dest='infoField1',
help="Column name for first extra information",
default=None)
parser.add_argument('-info2', '--infoField2', dest='infoField2',
help="Column name for second extra information",
default=None)
parser.add_argument('-oc', '--onlyColor', action="store_true", dest='onlyColor',
default=False)
parser.add_argument('-safe', '--safe', action="store_true", dest='safe',
default=False)
parser.add_argument('-clean', '--clean', action="store_true", dest='clean',
default=False)
parser.add_argument('-v', '--verbose', action="store_true", dest='verbose',
default=False)
parser.add_argument('-src', '--src', action="store_true", dest='saveSrc',
default=True)
parser.add_argument('-makeDir', '--makeDir', action="store_true", dest='makeDir',
default=True)
parser.add_argument('-zc', '--zCutoutSize', action="store_true", dest='zCutout',
default=True)
parser.add_argument('-nc', '--noColor', action="store_true", dest='noColor',
default=True)
parser.add_argument('-p', '--prefix', dest='prefix',
help='Prefix of the output file',
default='redBCG')
parser.add_argument('-id', '--id', dest='idField', help="Column name for ID",
default='ID_CLUSTER')
parser.add_argument('-ra', '--ra', dest='raField', help="Column name for RA",
default='RA_BCG')
parser.add_argument('-dec', '--dec', dest='decField', help="Column name for DEC",
default='DEC_BCG')
parser.add_argument('-z', '--redshift', dest='zField', help="Column name for z",
default='Z_LAMBDA')
args = parser.parse_args()
run(args)
|
[
"#!/usr/bin/env python\n# encoding: utf-8\n\nimport os\nimport argparse\nimport coaddBatchCutout as cbc\n\n\ndef run(args):\n\n min = -0.0\n max = 0.5\n Q = 10\n\n if os.path.isfile(args.incat):\n\n cbc.coaddBatchCutFull(args.root, args.incat,\n filter=args.filter,\n idField=args.idField,\n prefix=args.prefix,\n zCutoutSize=args.zCutout,\n zField=args.zField,\n onlyColor=args.onlyColor,\n noColor=args.noColor,\n saveSrc=args.saveSrc,\n makeDir=args.makeDir,\n raField=args.raField,\n decField=args.decField)\n else:\n raise Exception(\"### Can not find the input catalog: %s\" % args.incat)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"root\", help=\"Root directory of data repository\")\n parser.add_argument(\"incat\", help=\"The input catalog for cutout\")\n parser.add_argument(\"-s\", '--size', dest='size', type=int,\n help=\"Half size of the cutout box\", default=200)\n parser.add_argument('-f', '--filter', dest='filter', help=\"Filter\",\n default='HSC-I')\n parser.add_argument('-cf', '--color-filters', dest='colorFilters',\n help=\"Choice of filters for color images\", default='riz')\n parser.add_argument('-sf', '--size-field', dest='sizeField',\n help=\"Column name for cutout size\", default='cutout_size')\n parser.add_argument('-info1', '--infoField1', dest='infoField1',\n help=\"Column name for first extra information\",\n default=None)\n parser.add_argument('-info2', '--infoField2', dest='infoField2',\n help=\"Column name for second extra information\",\n default=None)\n parser.add_argument('-oc', '--onlyColor', action=\"store_true\", dest='onlyColor',\n default=False)\n parser.add_argument('-safe', '--safe', action=\"store_true\", dest='safe',\n default=False)\n parser.add_argument('-clean', '--clean', action=\"store_true\", dest='clean',\n default=False)\n parser.add_argument('-v', '--verbose', action=\"store_true\", dest='verbose',\n default=False)\n parser.add_argument('-src', '--src', action=\"store_true\", dest='saveSrc',\n default=True)\n parser.add_argument('-makeDir', '--makeDir', action=\"store_true\", dest='makeDir',\n default=True)\n parser.add_argument('-zc', '--zCutoutSize', action=\"store_true\", dest='zCutout',\n default=True)\n parser.add_argument('-nc', '--noColor', action=\"store_true\", dest='noColor',\n default=True)\n parser.add_argument('-p', '--prefix', dest='prefix',\n help='Prefix of the output file',\n default='redBCG')\n parser.add_argument('-id', '--id', dest='idField', help=\"Column name for ID\",\n default='ID_CLUSTER')\n parser.add_argument('-ra', '--ra', dest='raField', help=\"Column name for RA\",\n default='RA_BCG')\n parser.add_argument('-dec', '--dec', dest='decField', help=\"Column name for DEC\",\n default='DEC_BCG')\n parser.add_argument('-z', '--redshift', dest='zField', help=\"Column name for z\",\n default='Z_LAMBDA')\n args = parser.parse_args()\n\n run(args)\n\n",
"import os\nimport argparse\nimport coaddBatchCutout as cbc\n\n\ndef run(args):\n min = -0.0\n max = 0.5\n Q = 10\n if os.path.isfile(args.incat):\n cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,\n idField=args.idField, prefix=args.prefix, zCutoutSize=args.\n zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=\n args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,\n raField=args.raField, decField=args.decField)\n else:\n raise Exception('### Can not find the input catalog: %s' % args.incat)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('root', help='Root directory of data repository')\n parser.add_argument('incat', help='The input catalog for cutout')\n parser.add_argument('-s', '--size', dest='size', type=int, help=\n 'Half size of the cutout box', default=200)\n parser.add_argument('-f', '--filter', dest='filter', help='Filter',\n default='HSC-I')\n parser.add_argument('-cf', '--color-filters', dest='colorFilters', help\n ='Choice of filters for color images', default='riz')\n parser.add_argument('-sf', '--size-field', dest='sizeField', help=\n 'Column name for cutout size', default='cutout_size')\n parser.add_argument('-info1', '--infoField1', dest='infoField1', help=\n 'Column name for first extra information', default=None)\n parser.add_argument('-info2', '--infoField2', dest='infoField2', help=\n 'Column name for second extra information', default=None)\n parser.add_argument('-oc', '--onlyColor', action='store_true', dest=\n 'onlyColor', default=False)\n parser.add_argument('-safe', '--safe', action='store_true', dest='safe',\n default=False)\n parser.add_argument('-clean', '--clean', action='store_true', dest=\n 'clean', default=False)\n parser.add_argument('-v', '--verbose', action='store_true', dest=\n 'verbose', default=False)\n parser.add_argument('-src', '--src', action='store_true', dest=\n 'saveSrc', default=True)\n parser.add_argument('-makeDir', '--makeDir', action='store_true', dest=\n 'makeDir', default=True)\n parser.add_argument('-zc', '--zCutoutSize', action='store_true', dest=\n 'zCutout', default=True)\n parser.add_argument('-nc', '--noColor', action='store_true', dest=\n 'noColor', default=True)\n parser.add_argument('-p', '--prefix', dest='prefix', help=\n 'Prefix of the output file', default='redBCG')\n parser.add_argument('-id', '--id', dest='idField', help=\n 'Column name for ID', default='ID_CLUSTER')\n parser.add_argument('-ra', '--ra', dest='raField', help=\n 'Column name for RA', default='RA_BCG')\n parser.add_argument('-dec', '--dec', dest='decField', help=\n 'Column name for DEC', default='DEC_BCG')\n parser.add_argument('-z', '--redshift', dest='zField', help=\n 'Column name for z', default='Z_LAMBDA')\n args = parser.parse_args()\n run(args)\n",
"<import token>\n\n\ndef run(args):\n min = -0.0\n max = 0.5\n Q = 10\n if os.path.isfile(args.incat):\n cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,\n idField=args.idField, prefix=args.prefix, zCutoutSize=args.\n zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=\n args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,\n raField=args.raField, decField=args.decField)\n else:\n raise Exception('### Can not find the input catalog: %s' % args.incat)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('root', help='Root directory of data repository')\n parser.add_argument('incat', help='The input catalog for cutout')\n parser.add_argument('-s', '--size', dest='size', type=int, help=\n 'Half size of the cutout box', default=200)\n parser.add_argument('-f', '--filter', dest='filter', help='Filter',\n default='HSC-I')\n parser.add_argument('-cf', '--color-filters', dest='colorFilters', help\n ='Choice of filters for color images', default='riz')\n parser.add_argument('-sf', '--size-field', dest='sizeField', help=\n 'Column name for cutout size', default='cutout_size')\n parser.add_argument('-info1', '--infoField1', dest='infoField1', help=\n 'Column name for first extra information', default=None)\n parser.add_argument('-info2', '--infoField2', dest='infoField2', help=\n 'Column name for second extra information', default=None)\n parser.add_argument('-oc', '--onlyColor', action='store_true', dest=\n 'onlyColor', default=False)\n parser.add_argument('-safe', '--safe', action='store_true', dest='safe',\n default=False)\n parser.add_argument('-clean', '--clean', action='store_true', dest=\n 'clean', default=False)\n parser.add_argument('-v', '--verbose', action='store_true', dest=\n 'verbose', default=False)\n parser.add_argument('-src', '--src', action='store_true', dest=\n 'saveSrc', default=True)\n parser.add_argument('-makeDir', '--makeDir', action='store_true', dest=\n 'makeDir', default=True)\n parser.add_argument('-zc', '--zCutoutSize', action='store_true', dest=\n 'zCutout', default=True)\n parser.add_argument('-nc', '--noColor', action='store_true', dest=\n 'noColor', default=True)\n parser.add_argument('-p', '--prefix', dest='prefix', help=\n 'Prefix of the output file', default='redBCG')\n parser.add_argument('-id', '--id', dest='idField', help=\n 'Column name for ID', default='ID_CLUSTER')\n parser.add_argument('-ra', '--ra', dest='raField', help=\n 'Column name for RA', default='RA_BCG')\n parser.add_argument('-dec', '--dec', dest='decField', help=\n 'Column name for DEC', default='DEC_BCG')\n parser.add_argument('-z', '--redshift', dest='zField', help=\n 'Column name for z', default='Z_LAMBDA')\n args = parser.parse_args()\n run(args)\n",
"<import token>\n\n\ndef run(args):\n min = -0.0\n max = 0.5\n Q = 10\n if os.path.isfile(args.incat):\n cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,\n idField=args.idField, prefix=args.prefix, zCutoutSize=args.\n zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=\n args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,\n raField=args.raField, decField=args.decField)\n else:\n raise Exception('### Can not find the input catalog: %s' % args.incat)\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
1,029 |
2e140d1174e0b2d8a97df880b1bffdf84dc0d236
|
from helper.logger_helper import Log
from helper.mail_helper import MailHelper
import spider.spider as spider
from configuration.configuration_handler import Configuration
from configuration.products_handler import ProductsHandler
if __name__ == "__main__":
logger = Log()
conf = Configuration('configuration/configuration.yaml').load_configuration()
ph = ProductsHandler(conf["products_path"])
logger.info("Configuration loaded")
products = ph.load_products()
logger.info("Products loaded from {}".format(conf["products_path"]))
update, msg = spider.Spider(products, conf).crawl()
if len(update) > 0:
logger.info("Products to report")
mail_helper = MailHelper()
mail_helper.send_mail('', msg, "New prices lower")
logger.info("Mail sent")
mail_helper.close_connection()
else:
logger.info("Nothing to report")
ph.save_products(products)
logger.info("Configuration saved")
else:
print("Exec this file as the main entrypoint! -> python3 init.py")
|
[
"from helper.logger_helper import Log\nfrom helper.mail_helper import MailHelper\nimport spider.spider as spider\nfrom configuration.configuration_handler import Configuration\nfrom configuration.products_handler import ProductsHandler\n\nif __name__ == \"__main__\":\n logger = Log()\n conf = Configuration('configuration/configuration.yaml').load_configuration()\n ph = ProductsHandler(conf[\"products_path\"]) \n logger.info(\"Configuration loaded\")\n products = ph.load_products()\n logger.info(\"Products loaded from {}\".format(conf[\"products_path\"]))\n\n update, msg = spider.Spider(products, conf).crawl()\n if len(update) > 0:\n logger.info(\"Products to report\")\n mail_helper = MailHelper()\n mail_helper.send_mail('', msg, \"New prices lower\")\n \n logger.info(\"Mail sent\")\n mail_helper.close_connection()\n\n else:\n logger.info(\"Nothing to report\")\n \n ph.save_products(products)\n logger.info(\"Configuration saved\")\nelse:\n print(\"Exec this file as the main entrypoint! -> python3 init.py\")",
"from helper.logger_helper import Log\nfrom helper.mail_helper import MailHelper\nimport spider.spider as spider\nfrom configuration.configuration_handler import Configuration\nfrom configuration.products_handler import ProductsHandler\nif __name__ == '__main__':\n logger = Log()\n conf = Configuration('configuration/configuration.yaml'\n ).load_configuration()\n ph = ProductsHandler(conf['products_path'])\n logger.info('Configuration loaded')\n products = ph.load_products()\n logger.info('Products loaded from {}'.format(conf['products_path']))\n update, msg = spider.Spider(products, conf).crawl()\n if len(update) > 0:\n logger.info('Products to report')\n mail_helper = MailHelper()\n mail_helper.send_mail('', msg, 'New prices lower')\n logger.info('Mail sent')\n mail_helper.close_connection()\n else:\n logger.info('Nothing to report')\n ph.save_products(products)\n logger.info('Configuration saved')\nelse:\n print('Exec this file as the main entrypoint! -> python3 init.py')\n",
"<import token>\nif __name__ == '__main__':\n logger = Log()\n conf = Configuration('configuration/configuration.yaml'\n ).load_configuration()\n ph = ProductsHandler(conf['products_path'])\n logger.info('Configuration loaded')\n products = ph.load_products()\n logger.info('Products loaded from {}'.format(conf['products_path']))\n update, msg = spider.Spider(products, conf).crawl()\n if len(update) > 0:\n logger.info('Products to report')\n mail_helper = MailHelper()\n mail_helper.send_mail('', msg, 'New prices lower')\n logger.info('Mail sent')\n mail_helper.close_connection()\n else:\n logger.info('Nothing to report')\n ph.save_products(products)\n logger.info('Configuration saved')\nelse:\n print('Exec this file as the main entrypoint! -> python3 init.py')\n",
"<import token>\n<code token>\n"
] | false |
1,030 |
dbb66930edd70729e4df7d3023e83a6eae65cccd
|
#!/usr/bin/env python
def main():
import sys
from pyramid.paster import get_appsettings
from sqlalchemy import engine_from_config
from pyvideohub.models import ScopedSession, Base
config_file = sys.argv[1]
settings = get_appsettings(config_file)
engine = engine_from_config(settings, 'sqlalchemy.')
ScopedSession.configure(bind=engine)
Base.metadata.create_all(engine)
print('DB initialized done.')
if __name__ == '__main__':
main()
|
[
"#!/usr/bin/env python\n\ndef main():\n import sys\n from pyramid.paster import get_appsettings\n from sqlalchemy import engine_from_config\n from pyvideohub.models import ScopedSession, Base\n \n config_file = sys.argv[1]\n settings = get_appsettings(config_file)\n engine = engine_from_config(settings, 'sqlalchemy.')\n ScopedSession.configure(bind=engine)\n Base.metadata.create_all(engine)\n\n print('DB initialized done.')\n \n\n\nif __name__ == '__main__':\n main()\n",
"def main():\n import sys\n from pyramid.paster import get_appsettings\n from sqlalchemy import engine_from_config\n from pyvideohub.models import ScopedSession, Base\n config_file = sys.argv[1]\n settings = get_appsettings(config_file)\n engine = engine_from_config(settings, 'sqlalchemy.')\n ScopedSession.configure(bind=engine)\n Base.metadata.create_all(engine)\n print('DB initialized done.')\n\n\nif __name__ == '__main__':\n main()\n",
"def main():\n import sys\n from pyramid.paster import get_appsettings\n from sqlalchemy import engine_from_config\n from pyvideohub.models import ScopedSession, Base\n config_file = sys.argv[1]\n settings = get_appsettings(config_file)\n engine = engine_from_config(settings, 'sqlalchemy.')\n ScopedSession.configure(bind=engine)\n Base.metadata.create_all(engine)\n print('DB initialized done.')\n\n\n<code token>\n",
"<function token>\n<code token>\n"
] | false |
1,031 |
e288403cb310bb7241b25e74d1b5bcc63967128c
|
"""Note: AWS Glue split from spark since it requires different test dependencies."""
from tests.integration.backend_dependencies import BackendDependencies
from tests.integration.integration_test_fixture import IntegrationTestFixture
aws_glue_integration_tests = []
deployment_patterns = [
# TODO: The AWS_GLUE dependency is only being marked and not run at this time.
IntegrationTestFixture(
name="how_to_use_great_expectations_in_aws_glue",
user_flow_script="tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py",
backend_dependencies=[
BackendDependencies.SPARK,
BackendDependencies.AWS,
BackendDependencies.AWS_GLUE,
],
),
]
aws_glue_integration_tests += deployment_patterns
|
[
"\"\"\"Note: AWS Glue split from spark since it requires different test dependencies.\"\"\"\nfrom tests.integration.backend_dependencies import BackendDependencies\nfrom tests.integration.integration_test_fixture import IntegrationTestFixture\n\naws_glue_integration_tests = []\n\ndeployment_patterns = [\n # TODO: The AWS_GLUE dependency is only being marked and not run at this time.\n IntegrationTestFixture(\n name=\"how_to_use_great_expectations_in_aws_glue\",\n user_flow_script=\"tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py\",\n backend_dependencies=[\n BackendDependencies.SPARK,\n BackendDependencies.AWS,\n BackendDependencies.AWS_GLUE,\n ],\n ),\n]\n\naws_glue_integration_tests += deployment_patterns\n",
"<docstring token>\nfrom tests.integration.backend_dependencies import BackendDependencies\nfrom tests.integration.integration_test_fixture import IntegrationTestFixture\naws_glue_integration_tests = []\ndeployment_patterns = [IntegrationTestFixture(name=\n 'how_to_use_great_expectations_in_aws_glue', user_flow_script=\n 'tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py'\n , backend_dependencies=[BackendDependencies.SPARK, BackendDependencies.\n AWS, BackendDependencies.AWS_GLUE])]\naws_glue_integration_tests += deployment_patterns\n",
"<docstring token>\n<import token>\naws_glue_integration_tests = []\ndeployment_patterns = [IntegrationTestFixture(name=\n 'how_to_use_great_expectations_in_aws_glue', user_flow_script=\n 'tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py'\n , backend_dependencies=[BackendDependencies.SPARK, BackendDependencies.\n AWS, BackendDependencies.AWS_GLUE])]\naws_glue_integration_tests += deployment_patterns\n",
"<docstring token>\n<import token>\n<assignment token>\naws_glue_integration_tests += deployment_patterns\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
1,032 |
8fd74287fbc653ea3ed4aa76a272486aa29185cf
|
# !/usr/bin/python
# sudo mn --custom _mininet_topo.py --topo mytopo,5
# sudo mn --custom _mininet_topo.py --topo mytopo,3 --test simpletest
# or just run this python file
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.util import dumpNodeConnections
from mininet.log import setLogLevel
from mininet.cli import CLI
class SingleSwitchTopo(Topo):
"Single switch connected to n hosts."
def build(self):
# switch = self.addSwitch('s1')
# # Python's range(N) generates 0..N-1
# for h in range(n):
# host = self.addHost('h%s' % (h + 1))
# self.addLink(host, switch)
s1 = self.addSwitch('s1')
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
h4 = self.addHost('h4')
h5 = self.addHost('h5')
h6 = self.addHost('h6')
self.addLink(h1, s1)
self.addLink(h2, s1)
self.addLink(h3, s1)
self.addLink(h4, s1)
self.addLink(h5, s1)
self.addLink(h6, s1)
#
def simpleTest():
"Create and test a simple network"
topo = SingleSwitchTopo()
net = Mininet(topo)
net.start()
print "Dumping host connections"
dumpNodeConnections(net.hosts)
print "Testing network connectivity"
net.pingAll()
# net.stop()
h1 = net.get('h1')
h2 = net.get('h2')
h3 = net.get('h3')
h4 = net.get('h4')
h5 = net.get('h5')
h6 = net.get('h6')
for host in [h1, h2, h3, h4, h5, h6]:
host.cmdPrint('cd /media/sf_DHT-Torrent')
h1.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 600 --ip ' + h1.IP() + ' \' > h1.sh')
h2.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 500 --ip ' + h2.IP() + " --nextpeerid 600 --nextpeerip " + h1.IP() + ' \' > h2.sh')
h3.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 400 --ip ' + h3.IP() + " --nextpeerid 500 --nextpeerip " + h2.IP() + ' \' > h3.sh')
h4.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 300 --ip ' + h4.IP() + " --nextpeerid 400 --nextpeerip " + h3.IP() + ' \' > h4.sh')
h5.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 200 --ip ' + h5.IP() + " --nextpeerid 300 --nextpeerip " + h4.IP() + ' \' > h5.sh')
h6.cmdPrint('echo \'python /media/sf_DHT-Torrent/start.py --static --id 100 --ip ' + h6.IP() + " --nextpeerid 200 --nextpeerip " + h5.IP() + ' \' > h6.sh')
# h1.cmdPrint('ls')
net.startTerms()
CLI(net)
# CLI(net).do_xterm(h1)
net.stopXterms()
net.stop()
if __name__ == '__main__':
# Tell mininet to print useful information
setLogLevel('info')
simpleTest()
topos = { 'mytopo': SingleSwitchTopo }
# tests = { 'mytest': simpleTest }
|
[
"# !/usr/bin/python\n\n# sudo mn --custom _mininet_topo.py --topo mytopo,5\n# sudo mn --custom _mininet_topo.py --topo mytopo,3 --test simpletest\n# or just run this python file\n\nfrom mininet.topo import Topo\nfrom mininet.net import Mininet\nfrom mininet.util import dumpNodeConnections\nfrom mininet.log import setLogLevel\nfrom mininet.cli import CLI\n\n\nclass SingleSwitchTopo(Topo):\n \"Single switch connected to n hosts.\"\n\n def build(self):\n # switch = self.addSwitch('s1')\n # # Python's range(N) generates 0..N-1\n # for h in range(n):\n # host = self.addHost('h%s' % (h + 1))\n # self.addLink(host, switch)\n\n s1 = self.addSwitch('s1')\n\n h1 = self.addHost('h1')\n h2 = self.addHost('h2')\n h3 = self.addHost('h3')\n h4 = self.addHost('h4')\n h5 = self.addHost('h5')\n h6 = self.addHost('h6')\n\n self.addLink(h1, s1)\n self.addLink(h2, s1)\n self.addLink(h3, s1)\n self.addLink(h4, s1)\n self.addLink(h5, s1)\n self.addLink(h6, s1)\n\n#\ndef simpleTest():\n \"Create and test a simple network\"\n topo = SingleSwitchTopo()\n net = Mininet(topo)\n net.start()\n print \"Dumping host connections\"\n dumpNodeConnections(net.hosts)\n print \"Testing network connectivity\"\n net.pingAll()\n # net.stop()\n\n h1 = net.get('h1')\n h2 = net.get('h2')\n h3 = net.get('h3')\n h4 = net.get('h4')\n h5 = net.get('h5')\n h6 = net.get('h6')\n\n\n for host in [h1, h2, h3, h4, h5, h6]:\n host.cmdPrint('cd /media/sf_DHT-Torrent')\n\n h1.cmdPrint('echo \\'python /media/sf_DHT-Torrent/start.py --static --id 600 --ip ' + h1.IP() + ' \\' > h1.sh')\n h2.cmdPrint('echo \\'python /media/sf_DHT-Torrent/start.py --static --id 500 --ip ' + h2.IP() + \" --nextpeerid 600 --nextpeerip \" + h1.IP() + ' \\' > h2.sh')\n h3.cmdPrint('echo \\'python /media/sf_DHT-Torrent/start.py --static --id 400 --ip ' + h3.IP() + \" --nextpeerid 500 --nextpeerip \" + h2.IP() + ' \\' > h3.sh')\n h4.cmdPrint('echo \\'python /media/sf_DHT-Torrent/start.py --static --id 300 --ip ' + h4.IP() + \" --nextpeerid 400 --nextpeerip \" + h3.IP() + ' \\' > h4.sh')\n h5.cmdPrint('echo \\'python /media/sf_DHT-Torrent/start.py --static --id 200 --ip ' + h5.IP() + \" --nextpeerid 300 --nextpeerip \" + h4.IP() + ' \\' > h5.sh')\n h6.cmdPrint('echo \\'python /media/sf_DHT-Torrent/start.py --static --id 100 --ip ' + h6.IP() + \" --nextpeerid 200 --nextpeerip \" + h5.IP() + ' \\' > h6.sh')\n\n # h1.cmdPrint('ls')\n\n net.startTerms()\n CLI(net)\n # CLI(net).do_xterm(h1)\n\n net.stopXterms()\n net.stop()\n\nif __name__ == '__main__':\n # Tell mininet to print useful information\n setLogLevel('info')\n simpleTest()\n\ntopos = { 'mytopo': SingleSwitchTopo }\n# tests = { 'mytest': simpleTest }"
] | true |
1,033 |
0e112ecfd4ccf762234dff564dd6f3987418dedd
|
# Start the HTML and Javascript code
print '''
<html>
<head>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["treemap"]});
google.setOnLoadCallback(drawChart);
function drawChart() {
'''
print CountBugs('path/to/repo')
# Finish the HTML and Javascript
print '''
]);
// Create and draw the visualization.
var tree = new google.visualization.TreeMap(document.getElementById('chart_div'));
tree.draw(data, {
maxDepth: 2,
minColor: 'YellowGreen',
midColor: 'LightGoldenRodYellow',
maxColor: 'Red',
headerHeight: 15,
fontColor: 'black',
showScale: true});
}
</script>
</head>
<body>
<div id="chart_div" style="width: 900px; height: 500px;"></div>
</body>
</html>
'''
|
[
"# Start the HTML and Javascript code\nprint '''\n<html>\n <head>\n <script type=\"text/javascript\" src=\"https://www.google.com/jsapi\"></script>\n <script type=\"text/javascript\">\n google.load(\"visualization\", \"1\", {packages:[\"treemap\"]});\n google.setOnLoadCallback(drawChart);\n function drawChart() {\n'''\n\nprint CountBugs('path/to/repo')\n\n# Finish the HTML and Javascript\nprint '''\n ]);\n\n // Create and draw the visualization.\n var tree = new google.visualization.TreeMap(document.getElementById('chart_div'));\n tree.draw(data, {\n maxDepth: 2,\n minColor: 'YellowGreen',\n midColor: 'LightGoldenRodYellow',\n maxColor: 'Red',\n headerHeight: 15,\n fontColor: 'black',\n showScale: true});\n }\n </script>\n </head>\n\n <body>\n <div id=\"chart_div\" style=\"width: 900px; height: 500px;\"></div>\n </body>\n</html>\n'''\n"
] | true |
1,034 |
ed66e8028d653cf6b7ea4703fef5a658665c48db
|
# -*- coding: utf-8 -*-
# DATE 2018-08-21
# AUTHER = tongzz
#
import MySQLdb
from Elements.LoginElements import *
import datetime
import sys
class Tradepasswd():
def __init__(self):
self.db_config={
'host': '172.28.38.59',
'usr': 'mysqladmin',
'passwd': '123465',
'port': '3306',
'db': 'hdb'
}
def tradePasswd(self):
try:
conn = MySQLdb.connect(host=self.db_config['host'],user=self.db_config['usr'],passwd=self.db_config['passwd'],db=self.db_config['db'])
conn.autocommit(True)
curr = conn.cursor()
curr.execute("SET NAMES utf8")
curr.execute("USE %s"% self.db_config['db'])
# print u"******************** 操作数据库对象成功 ********************"
# return conn,curr
tradepasswd_sql = "UPDATE member set trade_pwd = NULL where uname = " + username + ";"
curr.execute(tradepasswd_sql)
# curr.fetchone()
print u"恢复交易密码成功"
curr.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d:%s"%(e.args[0],e.args[1])
return tradepasswd_sql
|
[
"# -*- coding: utf-8 -*-\r\n# DATE 2018-08-21\r\n# AUTHER = tongzz\r\n#\r\n\r\nimport MySQLdb\r\nfrom Elements.LoginElements import *\r\nimport datetime\r\nimport sys\r\nclass Tradepasswd():\r\n def __init__(self):\r\n self.db_config={\r\n 'host': '172.28.38.59',\r\n 'usr': 'mysqladmin',\r\n 'passwd': '123465',\r\n 'port': '3306',\r\n 'db': 'hdb'\r\n }\r\n def tradePasswd(self):\r\n try:\r\n conn = MySQLdb.connect(host=self.db_config['host'],user=self.db_config['usr'],passwd=self.db_config['passwd'],db=self.db_config['db'])\r\n conn.autocommit(True)\r\n curr = conn.cursor()\r\n curr.execute(\"SET NAMES utf8\")\r\n curr.execute(\"USE %s\"% self.db_config['db'])\r\n # print u\"******************** 操作数据库对象成功 ********************\"\r\n # return conn,curr\r\n tradepasswd_sql = \"UPDATE member set trade_pwd = NULL where uname = \" + username + \";\"\r\n curr.execute(tradepasswd_sql)\r\n # curr.fetchone()\r\n print u\"恢复交易密码成功\"\r\n curr.close()\r\n conn.close()\r\n except MySQLdb.Error,e:\r\n print \"Mysql Error %d:%s\"%(e.args[0],e.args[1])\r\n return tradepasswd_sql\r\n"
] | true |
1,035 |
81a1fbd13b06e4470bfbaa0d1716d5301e1a4b36
|
def readint(): return int(raw_input())
T = readint()
for t in xrange(T):
N = int(raw_input())
res = 0
sum = 0
min = 1000000
for i in raw_input().split():
r = int(i)
res ^= r
sum += r
if min > r: min = r
if res == 0:
sum -= min
print "Case #%d: %s" % (t + 1, sum)
else:
print "Case #%d: NO" % (t + 1)
|
[
"def readint(): return int(raw_input())\r\n\r\nT = readint()\r\nfor t in xrange(T):\r\n\tN = int(raw_input())\r\n\tres = 0\r\n\tsum = 0\r\n\tmin = 1000000\r\n\tfor i in raw_input().split():\r\n\t\tr = int(i)\r\n\t\tres ^= r\r\n\t\tsum += r\r\n\t\tif min > r: min = r\r\n\tif res == 0:\r\n\t\tsum -= min\r\n\t\tprint \"Case #%d: %s\" % (t + 1, sum)\r\n\telse:\r\n\t\tprint \"Case #%d: NO\" % (t + 1)"
] | true |
1,036 |
90fc6e37e3988a2014c66913db61749509db2d53
|
import os
class Idea:
def __init__(self, folder):
self.folder = folder
def name(self):
return "jetbrains-idea"
def cmd(self):
return "intellij-idea-ultimate-edition %s" % self.folder
|
[
"import os\n\nclass Idea:\n def __init__(self, folder):\n self.folder = folder\n\n def name(self):\n return \"jetbrains-idea\"\n\n def cmd(self):\n return \"intellij-idea-ultimate-edition %s\" % self.folder\n",
"import os\n\n\nclass Idea:\n\n def __init__(self, folder):\n self.folder = folder\n\n def name(self):\n return 'jetbrains-idea'\n\n def cmd(self):\n return 'intellij-idea-ultimate-edition %s' % self.folder\n",
"<import token>\n\n\nclass Idea:\n\n def __init__(self, folder):\n self.folder = folder\n\n def name(self):\n return 'jetbrains-idea'\n\n def cmd(self):\n return 'intellij-idea-ultimate-edition %s' % self.folder\n",
"<import token>\n\n\nclass Idea:\n <function token>\n\n def name(self):\n return 'jetbrains-idea'\n\n def cmd(self):\n return 'intellij-idea-ultimate-edition %s' % self.folder\n",
"<import token>\n\n\nclass Idea:\n <function token>\n\n def name(self):\n return 'jetbrains-idea'\n <function token>\n",
"<import token>\n\n\nclass Idea:\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
1,037 |
d2e3ac490ce5fdc20976567fa320a9e6a53cbe34
|
# -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
import scipy as sp
import numpy as np
from scipy import spatial
print(__doc__)
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Calcule Distance Haversine Methods
EARTHRADIUS = 6371.0
def getDistanceByHaversine(loc1, loc2):
'''Haversine formula - give coordinates as a 2D numpy array of
(lat_denter link description hereecimal,lon_decimal) pairs'''
#
# "unpack" our numpy array, this extracts column wise arrays
lat1 = loc1[1]
lon1 = loc1[0]
lat2 = loc2[1]
lon2 = loc2[0]
#
# convert to radians ##### Completely identical
lon1 = lon1 * sp.pi / 180.0
lon2 = lon2 * sp.pi / 180.0
lat1 = lat1 * sp.pi / 180.0
lat2 = lat2 * sp.pi / 180.0
#
# haversine formula #### Same, but atan2 named arctan2 in numpy
dlon = lon2 - lon1
dlat = lat2 - lat1
a = (np.sin(dlat/2))**2 + np.cos(lat1) * np.cos(lat2) * (np.sin(dlon/2.0))**2
c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0-a))
km = EARTHRADIUS * c
return km
##############################################################################
# Create a Matrix with longitude and latitude
import csv
import re
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
row_count = sum(1 for row in data)
gps_matrix = [[0 for i in range(row_count)] for j in range(2)]
with open('users_bcn.csv', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=',', quotechar='|')
for key, row in enumerate(data):
if key != 0:
try:
gps_matrix[0][key] = float(row[2].replace('"',''))
gps_matrix[1][key] = float(row[1].replace('"',''))
except:
a = float(row[1].replace(',',''))
print('problem string to float')
##############################################################################
# Calculate the Distance matrix
D = spatial.distance.pdist(gps_matrix, lambda u, v: getDistanceByHaversine(u,v))
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
[
"# -*- coding: utf-8 -*-\n\"\"\"\n===================================\nDemo of DBSCAN clustering algorithm\n===================================\n\nFinds core samples of high density and expands clusters from them.\n\n\"\"\"\nimport scipy as sp\nimport numpy as np\n\nfrom scipy import spatial\nprint(__doc__)\n\n\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import metrics\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.preprocessing import StandardScaler\n\n##############################################################################\n# Calcule Distance Haversine Methods\n\nEARTHRADIUS = 6371.0\n\ndef getDistanceByHaversine(loc1, loc2):\n '''Haversine formula - give coordinates as a 2D numpy array of\n (lat_denter link description hereecimal,lon_decimal) pairs'''\n #\n # \"unpack\" our numpy array, this extracts column wise arrays\n lat1 = loc1[1]\n lon1 = loc1[0]\n lat2 = loc2[1]\n lon2 = loc2[0]\n #\n # convert to radians ##### Completely identical\n lon1 = lon1 * sp.pi / 180.0\n lon2 = lon2 * sp.pi / 180.0\n lat1 = lat1 * sp.pi / 180.0\n lat2 = lat2 * sp.pi / 180.0\n #\n # haversine formula #### Same, but atan2 named arctan2 in numpy\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = (np.sin(dlat/2))**2 + np.cos(lat1) * np.cos(lat2) * (np.sin(dlon/2.0))**2\n c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0-a))\n km = EARTHRADIUS * c\n return km\n\n\n##############################################################################\n# Create a Matrix with longitude and latitude\n\nimport csv\nimport re\n\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n\n row_count = sum(1 for row in data)\n gps_matrix = [[0 for i in range(row_count)] for j in range(2)]\n\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n\n for key, row in enumerate(data):\n if key != 0:\n try:\n gps_matrix[0][key] = float(row[2].replace('\"',''))\n gps_matrix[1][key] = float(row[1].replace('\"',''))\n except:\n a = float(row[1].replace(',',''))\n print('problem string to float')\n\n##############################################################################\n# Calculate the Distance matrix\n\nD = spatial.distance.pdist(gps_matrix, lambda u, v: getDistanceByHaversine(u,v))\n\n\n##############################################################################\n# Generate sample data\ncenters = [[1, 1], [-1, -1], [1, -1]]\nX, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,\n random_state=0)\n\nX = StandardScaler().fit_transform(X)\n\n##############################################################################\n# Compute DBSCAN\ndb = DBSCAN(eps=0.3, min_samples=10).fit(X)\ncore_samples_mask = np.zeros_like(db.labels_, dtype=bool)\ncore_samples_mask[db.core_sample_indices_] = True\nlabels = db.labels_\n\n# Number of clusters in labels, ignoring noise if present.\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\nprint('Estimated number of clusters: %d' % n_clusters_)\nprint(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels_true, labels))\nprint(\"Completeness: %0.3f\" % metrics.completeness_score(labels_true, labels))\nprint(\"V-measure: %0.3f\" % metrics.v_measure_score(labels_true, labels))\nprint(\"Adjusted Rand Index: %0.3f\"\n % metrics.adjusted_rand_score(labels_true, labels))\nprint(\"Adjusted Mutual Information: %0.3f\"\n % metrics.adjusted_mutual_info_score(labels_true, labels))\nprint(\"Silhouette Coefficient: %0.3f\"\n % metrics.silhouette_score(X, labels))\n\n##############################################################################\n# Plot result\nimport matplotlib.pyplot as plt\n\n# Black removed and is used for noise instead.\nunique_labels = set(labels)\ncolors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))\nfor k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = 'k'\n\n class_member_mask = (labels == k)\n\n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,\n markeredgecolor='k', markersize=14)\n\n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,\n markeredgecolor='k', markersize=6)\n\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()\n",
"<docstring token>\nimport scipy as sp\nimport numpy as np\nfrom scipy import spatial\nprint(__doc__)\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import metrics\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.preprocessing import StandardScaler\nEARTHRADIUS = 6371.0\n\n\ndef getDistanceByHaversine(loc1, loc2):\n \"\"\"Haversine formula - give coordinates as a 2D numpy array of\n (lat_denter link description hereecimal,lon_decimal) pairs\"\"\"\n lat1 = loc1[1]\n lon1 = loc1[0]\n lat2 = loc2[1]\n lon2 = loc2[0]\n lon1 = lon1 * sp.pi / 180.0\n lon2 = lon2 * sp.pi / 180.0\n lat1 = lat1 * sp.pi / 180.0\n lat2 = lat2 * sp.pi / 180.0\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0\n ) ** 2\n c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))\n km = EARTHRADIUS * c\n return km\n\n\nimport csv\nimport re\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n row_count = sum(1 for row in data)\n gps_matrix = [[(0) for i in range(row_count)] for j in range(2)]\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n for key, row in enumerate(data):\n if key != 0:\n try:\n gps_matrix[0][key] = float(row[2].replace('\"', ''))\n gps_matrix[1][key] = float(row[1].replace('\"', ''))\n except:\n a = float(row[1].replace(',', ''))\n print('problem string to float')\nD = spatial.distance.pdist(gps_matrix, lambda u, v: getDistanceByHaversine(\n u, v))\ncenters = [[1, 1], [-1, -1], [1, -1]]\nX, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,\n random_state=0)\nX = StandardScaler().fit_transform(X)\ndb = DBSCAN(eps=0.3, min_samples=10).fit(X)\ncore_samples_mask = np.zeros_like(db.labels_, dtype=bool)\ncore_samples_mask[db.core_sample_indices_] = True\nlabels = db.labels_\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\nprint('Estimated number of clusters: %d' % n_clusters_)\nprint('Homogeneity: %0.3f' % metrics.homogeneity_score(labels_true, labels))\nprint('Completeness: %0.3f' % metrics.completeness_score(labels_true, labels))\nprint('V-measure: %0.3f' % metrics.v_measure_score(labels_true, labels))\nprint('Adjusted Rand Index: %0.3f' % metrics.adjusted_rand_score(\n labels_true, labels))\nprint('Adjusted Mutual Information: %0.3f' % metrics.\n adjusted_mutual_info_score(labels_true, labels))\nprint('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, labels))\nimport matplotlib.pyplot as plt\nunique_labels = set(labels)\ncolors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))\nfor k, col in zip(unique_labels, colors):\n if k == -1:\n col = 'k'\n class_member_mask = labels == k\n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=\n 'k', markersize=14)\n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=\n 'k', markersize=6)\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()\n",
"<docstring token>\n<import token>\nprint(__doc__)\n<import token>\nEARTHRADIUS = 6371.0\n\n\ndef getDistanceByHaversine(loc1, loc2):\n \"\"\"Haversine formula - give coordinates as a 2D numpy array of\n (lat_denter link description hereecimal,lon_decimal) pairs\"\"\"\n lat1 = loc1[1]\n lon1 = loc1[0]\n lat2 = loc2[1]\n lon2 = loc2[0]\n lon1 = lon1 * sp.pi / 180.0\n lon2 = lon2 * sp.pi / 180.0\n lat1 = lat1 * sp.pi / 180.0\n lat2 = lat2 * sp.pi / 180.0\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0\n ) ** 2\n c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))\n km = EARTHRADIUS * c\n return km\n\n\n<import token>\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n row_count = sum(1 for row in data)\n gps_matrix = [[(0) for i in range(row_count)] for j in range(2)]\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n for key, row in enumerate(data):\n if key != 0:\n try:\n gps_matrix[0][key] = float(row[2].replace('\"', ''))\n gps_matrix[1][key] = float(row[1].replace('\"', ''))\n except:\n a = float(row[1].replace(',', ''))\n print('problem string to float')\nD = spatial.distance.pdist(gps_matrix, lambda u, v: getDistanceByHaversine(\n u, v))\ncenters = [[1, 1], [-1, -1], [1, -1]]\nX, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,\n random_state=0)\nX = StandardScaler().fit_transform(X)\ndb = DBSCAN(eps=0.3, min_samples=10).fit(X)\ncore_samples_mask = np.zeros_like(db.labels_, dtype=bool)\ncore_samples_mask[db.core_sample_indices_] = True\nlabels = db.labels_\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\nprint('Estimated number of clusters: %d' % n_clusters_)\nprint('Homogeneity: %0.3f' % metrics.homogeneity_score(labels_true, labels))\nprint('Completeness: %0.3f' % metrics.completeness_score(labels_true, labels))\nprint('V-measure: %0.3f' % metrics.v_measure_score(labels_true, labels))\nprint('Adjusted Rand Index: %0.3f' % metrics.adjusted_rand_score(\n labels_true, labels))\nprint('Adjusted Mutual Information: %0.3f' % metrics.\n adjusted_mutual_info_score(labels_true, labels))\nprint('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, labels))\n<import token>\nunique_labels = set(labels)\ncolors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))\nfor k, col in zip(unique_labels, colors):\n if k == -1:\n col = 'k'\n class_member_mask = labels == k\n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=\n 'k', markersize=14)\n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=\n 'k', markersize=6)\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()\n",
"<docstring token>\n<import token>\nprint(__doc__)\n<import token>\n<assignment token>\n\n\ndef getDistanceByHaversine(loc1, loc2):\n \"\"\"Haversine formula - give coordinates as a 2D numpy array of\n (lat_denter link description hereecimal,lon_decimal) pairs\"\"\"\n lat1 = loc1[1]\n lon1 = loc1[0]\n lat2 = loc2[1]\n lon2 = loc2[0]\n lon1 = lon1 * sp.pi / 180.0\n lon2 = lon2 * sp.pi / 180.0\n lat1 = lat1 * sp.pi / 180.0\n lat2 = lat2 * sp.pi / 180.0\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0\n ) ** 2\n c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))\n km = EARTHRADIUS * c\n return km\n\n\n<import token>\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n row_count = sum(1 for row in data)\n gps_matrix = [[(0) for i in range(row_count)] for j in range(2)]\nwith open('users_bcn.csv', 'rb') as csvfile:\n data = csv.reader(csvfile, delimiter=',', quotechar='|')\n for key, row in enumerate(data):\n if key != 0:\n try:\n gps_matrix[0][key] = float(row[2].replace('\"', ''))\n gps_matrix[1][key] = float(row[1].replace('\"', ''))\n except:\n a = float(row[1].replace(',', ''))\n print('problem string to float')\n<assignment token>\nprint('Estimated number of clusters: %d' % n_clusters_)\nprint('Homogeneity: %0.3f' % metrics.homogeneity_score(labels_true, labels))\nprint('Completeness: %0.3f' % metrics.completeness_score(labels_true, labels))\nprint('V-measure: %0.3f' % metrics.v_measure_score(labels_true, labels))\nprint('Adjusted Rand Index: %0.3f' % metrics.adjusted_rand_score(\n labels_true, labels))\nprint('Adjusted Mutual Information: %0.3f' % metrics.\n adjusted_mutual_info_score(labels_true, labels))\nprint('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(X, labels))\n<import token>\n<assignment token>\nfor k, col in zip(unique_labels, colors):\n if k == -1:\n col = 'k'\n class_member_mask = labels == k\n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=\n 'k', markersize=14)\n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor=\n 'k', markersize=6)\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef getDistanceByHaversine(loc1, loc2):\n \"\"\"Haversine formula - give coordinates as a 2D numpy array of\n (lat_denter link description hereecimal,lon_decimal) pairs\"\"\"\n lat1 = loc1[1]\n lon1 = loc1[0]\n lat2 = loc2[1]\n lon2 = loc2[0]\n lon1 = lon1 * sp.pi / 180.0\n lon2 = lon2 * sp.pi / 180.0\n lat1 = lat1 * sp.pi / 180.0\n lat2 = lat2 * sp.pi / 180.0\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0\n ) ** 2\n c = 2.0 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))\n km = EARTHRADIUS * c\n return km\n\n\n<import token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
1,038 |
166a1dfbd3baf766230080361d98648ec0a64455
|
#coding=utf8
"""
Created on Thu Feb 20 00:53:28 2020
@author: Neal LONG
"""
import json
import requests
fake_header = { "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36",
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate, sdch",
"Accept-Language":"zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-CN;q=0.2"
}
s = requests.Session()
r=s.get('https://xueqiu.com',headers = fake_header)
r = s.get('https://xueqiu.com/query/v1/symbol/search/status?u=401582129017261&uuid=1230174898194894848&count=10&comment=0&symbol=TSLA&hl=0&source=all&sort=&page=1&q=&session_token=null&access_token=b2f87b997a1558e1023f18af36cab23af8d202ea',headers = fake_header)
#print(r.text)
parsed_json1 = r.json
parsed_json2 = json.loads(r.text)
print(parsed_json2)
|
[
"#coding=utf8\r\n\"\"\"\r\nCreated on Thu Feb 20 00:53:28 2020\r\n\r\n@author: Neal LONG\r\n\"\"\"\r\n\r\nimport json\r\nimport requests\r\nfake_header = { \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36\",\r\n \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\r\n \"Accept-Encoding\":\"gzip, deflate, sdch\",\r\n \"Accept-Language\":\"zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-CN;q=0.2\"\r\n }\r\n\r\ns = requests.Session()\r\n\r\nr=s.get('https://xueqiu.com',headers = fake_header)\r\nr = s.get('https://xueqiu.com/query/v1/symbol/search/status?u=401582129017261&uuid=1230174898194894848&count=10&comment=0&symbol=TSLA&hl=0&source=all&sort=&page=1&q=&session_token=null&access_token=b2f87b997a1558e1023f18af36cab23af8d202ea',headers = fake_header)\r\n\r\n#print(r.text)\r\nparsed_json1 = r.json\r\nparsed_json2 = json.loads(r.text)\r\n\r\nprint(parsed_json2)\r\n",
"<docstring token>\nimport json\nimport requests\nfake_header = {'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'\n , 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language':\n 'zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-CN;q=0.2'}\ns = requests.Session()\nr = s.get('https://xueqiu.com', headers=fake_header)\nr = s.get(\n 'https://xueqiu.com/query/v1/symbol/search/status?u=401582129017261&uuid=1230174898194894848&count=10&comment=0&symbol=TSLA&hl=0&source=all&sort=&page=1&q=&session_token=null&access_token=b2f87b997a1558e1023f18af36cab23af8d202ea'\n , headers=fake_header)\nparsed_json1 = r.json\nparsed_json2 = json.loads(r.text)\nprint(parsed_json2)\n",
"<docstring token>\n<import token>\nfake_header = {'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'\n , 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language':\n 'zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-CN;q=0.2'}\ns = requests.Session()\nr = s.get('https://xueqiu.com', headers=fake_header)\nr = s.get(\n 'https://xueqiu.com/query/v1/symbol/search/status?u=401582129017261&uuid=1230174898194894848&count=10&comment=0&symbol=TSLA&hl=0&source=all&sort=&page=1&q=&session_token=null&access_token=b2f87b997a1558e1023f18af36cab23af8d202ea'\n , headers=fake_header)\nparsed_json1 = r.json\nparsed_json2 = json.loads(r.text)\nprint(parsed_json2)\n",
"<docstring token>\n<import token>\n<assignment token>\nprint(parsed_json2)\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
1,039 |
9ef5d57d536f5c88f705b1032cc0936e2d4cd565
|
from Shapes import *
c1 = Circle(5)
r1 = Rectangle(3,2)
c2 = Circle(3)
c3 = Circle(1)
r2 = Rectangle(1,1)
listShapes = [c1,r1,c2,c3,r2]
for item in listShapes:
print(item.toString())
print("Area: " + str(item.area()))
print("Perimeter: " + str(item.perimeter()))
|
[
"from Shapes import *\n\nc1 = Circle(5)\nr1 = Rectangle(3,2)\nc2 = Circle(3)\nc3 = Circle(1)\nr2 = Rectangle(1,1)\n\nlistShapes = [c1,r1,c2,c3,r2]\n\nfor item in listShapes:\n\tprint(item.toString())\n\tprint(\"Area: \" + str(item.area()))\n\tprint(\"Perimeter: \" + str(item.perimeter()))\n"
] | true |
1,040 |
813d27e8f9c1a416dab2f891dd71e4791bb92dbb
|
import sys
import pytest
from presidio_evaluator.evaluation import Evaluator
from tests.conftest import assert_model_results_gt
from presidio_evaluator.models.flair_model import FlairModel
@pytest.mark.slow
@pytest.mark.skipif("flair" not in sys.modules, reason="requires the Flair library")
def test_flair_simple(small_dataset):
flair_model = FlairModel(model_path="ner", entities_to_keep=["PERSON"])
evaluator = Evaluator(model=flair_model)
evaluation_results = evaluator.evaluate_all(small_dataset)
scores = evaluator.calculate_score(evaluation_results)
assert_model_results_gt(scores, "PERSON", 0)
|
[
"import sys\n\nimport pytest\n\nfrom presidio_evaluator.evaluation import Evaluator\nfrom tests.conftest import assert_model_results_gt\nfrom presidio_evaluator.models.flair_model import FlairModel\n\n\[email protected]\[email protected](\"flair\" not in sys.modules, reason=\"requires the Flair library\")\ndef test_flair_simple(small_dataset):\n\n flair_model = FlairModel(model_path=\"ner\", entities_to_keep=[\"PERSON\"])\n evaluator = Evaluator(model=flair_model)\n evaluation_results = evaluator.evaluate_all(small_dataset)\n scores = evaluator.calculate_score(evaluation_results)\n\n assert_model_results_gt(scores, \"PERSON\", 0)\n",
"import sys\nimport pytest\nfrom presidio_evaluator.evaluation import Evaluator\nfrom tests.conftest import assert_model_results_gt\nfrom presidio_evaluator.models.flair_model import FlairModel\n\n\[email protected]\[email protected]('flair' not in sys.modules, reason=\n 'requires the Flair library')\ndef test_flair_simple(small_dataset):\n flair_model = FlairModel(model_path='ner', entities_to_keep=['PERSON'])\n evaluator = Evaluator(model=flair_model)\n evaluation_results = evaluator.evaluate_all(small_dataset)\n scores = evaluator.calculate_score(evaluation_results)\n assert_model_results_gt(scores, 'PERSON', 0)\n",
"<import token>\n\n\[email protected]\[email protected]('flair' not in sys.modules, reason=\n 'requires the Flair library')\ndef test_flair_simple(small_dataset):\n flair_model = FlairModel(model_path='ner', entities_to_keep=['PERSON'])\n evaluator = Evaluator(model=flair_model)\n evaluation_results = evaluator.evaluate_all(small_dataset)\n scores = evaluator.calculate_score(evaluation_results)\n assert_model_results_gt(scores, 'PERSON', 0)\n",
"<import token>\n<function token>\n"
] | false |
1,041 |
cceda9a8a0188499ae0aa588701bb8104b5ed313
|
from pymongo import MongoClient, GEOSPHERE, GEO2D
import os, sys, json, pprint
sys.path.insert(0, '../utils')
import path_functions
client = MongoClient( 'localhost', 27017 )
db = client[ 'nfcdata' ]
json_files_path_list = path_functions.get_json_files('../../ftp-data/geojson-files/quikscat-l2b12')
for json_file in json_files_path_list:
current_collection = 'GeoJSON-quikscat-l2b12-' + path_functions.get_file_name( json_file )
print(current_collection)
collection_list = db.collection_names()
if current_collection not in collection_list:
collection = db[current_collection]
collection.create_index([( "geometry", GEOSPHERE )])
json_docs = json.load( open( json_file ) )
for doc in json_docs['features']:
collection.insert( doc )
# -- DROP COLLECTIONS --
# collection_list = db.collection_names()
# for collection in collection_list:
# db.drop_collection(collection)
# -- PRINT COLLECTIONS --
print( db.collection_names() )
# # -- PRINT INDEXES --
# collection_list = db.collection_names()
# for current_collection in collection_list:
# collection = db[current_collection]
# print( 'Index: ', sorted( list( collection.index_information() ) ) )
# -- PRINT DATA --
# collection = db['GeoJSON-quikscat-l2b12-005']
# cursor = collection.find({})
# for document in cursor:
# print('\n - - - - - - - DOCUMENTO - - - - - - - \n')
# print(document)
# -- SPATIAL QUERYING USING 2D INDEX
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[ current_collection ]
for doc in collection.find(
{ "geometry": {
"$geoWithin": {
"$geometry" : {
"type": "Polygon" ,
"coordinates" : [
[
[-77.49, -89.70],
[0.00, 0.00],
[10.00, 10.00],
[-77.49, -89.70]
]
]
} } } } ):
pprint.pprint( doc )
# -- TEMPORAL QUERYING USING 2D INDEX
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[current_collection]
for doc in collection.find( { "properties.time": 2009002 } ).limit(3):
pprint.pprint(doc)
# -- TEMPORAL-SPATIAL QUERYING USING 2D INDEX
collection_list = db.collection_names()
for current_collection in collection_list:
collection = db[ current_collection ]
for doc in collection.find(
{ "geometry": {
"$geoWithin": {
"$geometry" : {
"type": "Polygon" ,
"coordinates" : [
[
[-77.49, -89.70],
[0.00, 0.00],
[10.00, 10.00],
[-77.49, -89.70]
]
]
} } }, "properties.time": 2009003 } ):
pprint.pprint( doc )
# collection = db['quikscat-l2b12-001']
# cursor = collection.find({})
# for document in cursor:
# pprint.pprint( document )
|
[
"\nfrom pymongo import MongoClient, GEOSPHERE, GEO2D\n\nimport os, sys, json, pprint\nsys.path.insert(0, '../utils') \nimport path_functions \n\n\nclient = MongoClient( 'localhost', 27017 )\ndb = client[ 'nfcdata' ]\n\njson_files_path_list = path_functions.get_json_files('../../ftp-data/geojson-files/quikscat-l2b12')\n\nfor json_file in json_files_path_list:\n \n current_collection = 'GeoJSON-quikscat-l2b12-' + path_functions.get_file_name( json_file )\n print(current_collection)\n collection_list = db.collection_names()\n\n if current_collection not in collection_list:\n collection = db[current_collection]\n collection.create_index([( \"geometry\", GEOSPHERE )])\n\n json_docs = json.load( open( json_file ) )\n for doc in json_docs['features']:\n collection.insert( doc )\n\n\n# -- DROP COLLECTIONS --\n# collection_list = db.collection_names()\n# for collection in collection_list:\n# db.drop_collection(collection)\n\n# -- PRINT COLLECTIONS --\nprint( db.collection_names() )\n\n# # -- PRINT INDEXES --\n# collection_list = db.collection_names()\n# for current_collection in collection_list:\n# collection = db[current_collection]\n# print( 'Index: ', sorted( list( collection.index_information() ) ) )\n\n# -- PRINT DATA --\n# collection = db['GeoJSON-quikscat-l2b12-005']\n# cursor = collection.find({})\n# for document in cursor:\n# print('\\n - - - - - - - DOCUMENTO - - - - - - - \\n')\n# print(document) \n\n# -- SPATIAL QUERYING USING 2D INDEX\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[ current_collection ]\n\n for doc in collection.find( \n { \"geometry\": { \n \"$geoWithin\": {\n \"$geometry\" : {\n \"type\": \"Polygon\" , \n \"coordinates\" : [ \n [\n [-77.49, -89.70],\n [0.00, 0.00],\n [10.00, 10.00],\n [-77.49, -89.70]\n ]\n ]\n } } } } ):\n pprint.pprint( doc )\n\n# -- TEMPORAL QUERYING USING 2D INDEX\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find( { \"properties.time\": 2009002 } ).limit(3):\n pprint.pprint(doc)\n\n# -- TEMPORAL-SPATIAL QUERYING USING 2D INDEX\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[ current_collection ]\n\n for doc in collection.find( \n { \"geometry\": { \n \"$geoWithin\": {\n \"$geometry\" : {\n \"type\": \"Polygon\" , \n \"coordinates\" : [ \n [\n [-77.49, -89.70],\n [0.00, 0.00],\n [10.00, 10.00],\n [-77.49, -89.70]\n ]\n ]\n } } }, \"properties.time\": 2009003 } ):\n pprint.pprint( doc )\n\n# collection = db['quikscat-l2b12-001']\n# cursor = collection.find({})\n# for document in cursor:\n# pprint.pprint( document )\n",
"from pymongo import MongoClient, GEOSPHERE, GEO2D\nimport os, sys, json, pprint\nsys.path.insert(0, '../utils')\nimport path_functions\nclient = MongoClient('localhost', 27017)\ndb = client['nfcdata']\njson_files_path_list = path_functions.get_json_files(\n '../../ftp-data/geojson-files/quikscat-l2b12')\nfor json_file in json_files_path_list:\n current_collection = ('GeoJSON-quikscat-l2b12-' + path_functions.\n get_file_name(json_file))\n print(current_collection)\n collection_list = db.collection_names()\n if current_collection not in collection_list:\n collection = db[current_collection]\n collection.create_index([('geometry', GEOSPHERE)])\n json_docs = json.load(open(json_file))\n for doc in json_docs['features']:\n collection.insert(doc)\nprint(db.collection_names())\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {\n 'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [\n 10.0, 10.0], [-77.49, -89.7]]]}}}}):\n pprint.pprint(doc)\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'properties.time': 2009002}).limit(3):\n pprint.pprint(doc)\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {\n 'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [\n 10.0, 10.0], [-77.49, -89.7]]]}}}, 'properties.time': 2009003}):\n pprint.pprint(doc)\n",
"<import token>\nsys.path.insert(0, '../utils')\n<import token>\nclient = MongoClient('localhost', 27017)\ndb = client['nfcdata']\njson_files_path_list = path_functions.get_json_files(\n '../../ftp-data/geojson-files/quikscat-l2b12')\nfor json_file in json_files_path_list:\n current_collection = ('GeoJSON-quikscat-l2b12-' + path_functions.\n get_file_name(json_file))\n print(current_collection)\n collection_list = db.collection_names()\n if current_collection not in collection_list:\n collection = db[current_collection]\n collection.create_index([('geometry', GEOSPHERE)])\n json_docs = json.load(open(json_file))\n for doc in json_docs['features']:\n collection.insert(doc)\nprint(db.collection_names())\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {\n 'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [\n 10.0, 10.0], [-77.49, -89.7]]]}}}}):\n pprint.pprint(doc)\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'properties.time': 2009002}).limit(3):\n pprint.pprint(doc)\ncollection_list = db.collection_names()\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {\n 'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [\n 10.0, 10.0], [-77.49, -89.7]]]}}}, 'properties.time': 2009003}):\n pprint.pprint(doc)\n",
"<import token>\nsys.path.insert(0, '../utils')\n<import token>\n<assignment token>\nfor json_file in json_files_path_list:\n current_collection = ('GeoJSON-quikscat-l2b12-' + path_functions.\n get_file_name(json_file))\n print(current_collection)\n collection_list = db.collection_names()\n if current_collection not in collection_list:\n collection = db[current_collection]\n collection.create_index([('geometry', GEOSPHERE)])\n json_docs = json.load(open(json_file))\n for doc in json_docs['features']:\n collection.insert(doc)\nprint(db.collection_names())\n<assignment token>\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {\n 'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [\n 10.0, 10.0], [-77.49, -89.7]]]}}}}):\n pprint.pprint(doc)\n<assignment token>\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'properties.time': 2009002}).limit(3):\n pprint.pprint(doc)\n<assignment token>\nfor current_collection in collection_list:\n collection = db[current_collection]\n for doc in collection.find({'geometry': {'$geoWithin': {'$geometry': {\n 'type': 'Polygon', 'coordinates': [[[-77.49, -89.7], [0.0, 0.0], [\n 10.0, 10.0], [-77.49, -89.7]]]}}}, 'properties.time': 2009003}):\n pprint.pprint(doc)\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
1,042 |
65d5cee6899b0b75474e3898459bf2cfa8b3635b
|
def solve(bt):
if len(bt) == n:
print(*bt, sep="")
exit()
for i in [1, 2, 3]:
if is_good(bt + [i]):
solve(bt + [i])
def is_good(arr):
for i in range(1, len(arr)//2+1):
if arr[-i:] == arr[-(i*2):-i]:
return False
return True
if __name__ == "__main__":
n = int(input())
solve([1])
|
[
"def solve(bt):\n if len(bt) == n:\n print(*bt, sep=\"\")\n exit()\n\n for i in [1, 2, 3]:\n if is_good(bt + [i]):\n solve(bt + [i])\n\n\ndef is_good(arr):\n for i in range(1, len(arr)//2+1):\n if arr[-i:] == arr[-(i*2):-i]:\n return False\n return True\n\nif __name__ == \"__main__\":\n n = int(input())\n\n solve([1])",
"def solve(bt):\n if len(bt) == n:\n print(*bt, sep='')\n exit()\n for i in [1, 2, 3]:\n if is_good(bt + [i]):\n solve(bt + [i])\n\n\ndef is_good(arr):\n for i in range(1, len(arr) // 2 + 1):\n if arr[-i:] == arr[-(i * 2):-i]:\n return False\n return True\n\n\nif __name__ == '__main__':\n n = int(input())\n solve([1])\n",
"def solve(bt):\n if len(bt) == n:\n print(*bt, sep='')\n exit()\n for i in [1, 2, 3]:\n if is_good(bt + [i]):\n solve(bt + [i])\n\n\ndef is_good(arr):\n for i in range(1, len(arr) // 2 + 1):\n if arr[-i:] == arr[-(i * 2):-i]:\n return False\n return True\n\n\n<code token>\n",
"def solve(bt):\n if len(bt) == n:\n print(*bt, sep='')\n exit()\n for i in [1, 2, 3]:\n if is_good(bt + [i]):\n solve(bt + [i])\n\n\n<function token>\n<code token>\n",
"<function token>\n<function token>\n<code token>\n"
] | false |
1,043 |
6be285f9c48a20934c1846785232a73373c7d547
|
##armstrong number##
##n= int(input('enter a number '))
##a=n
##s=0
##
##while n>0:
## rem= n%10
## s= s+rem*rem*rem
## n= n//10
##if a==s:
## print(a,' is an armstrong number')
##else:
## print(a,' is not an armstrong number')
##palindrome or not##
##n= int(input('enter a number '))
##a=n
##rev=0
##
##while n>0:
## rem= n%10
## rev= rev*10+rem
## n= n//10
##if a==rev:
## print(a,' is a palindrome number')
##else:
## print(a,' is not a palindrome number')
##factorial of a number using while loop##
##n= int(input('enter a number '))
##i=1
##a=n
##fact=1
##
##while i<=n:##n>0
## fact*= n*i##fact*=n
## n-=1
##print(fact,' is the factorial of ',a)
##factorial of a number using for loop##
##n= int(input('enter a number '))
##a=n
##fact=1
##
##for i in range(1,n+1):##(n,0,-1)
## fact*=i
##print(fact,' is the factorial of ',a)
##harshed number ##
##n= int(input('enter a number '))
##a=n
##s=0
##
##while n>0:
## rem= n%10
## s+=rem
## n//=10
##print(s,' is the sum of ',a)
##if a%s==0:
## print(a,' is a harshed number')
##else:
## print(a,' is not a harshed number')
##fibonocci series using while loop##
##n= int(input('enter a range '))
##a=0
##b=1
##print(a,b,end=" ")
##count=3
##
##while count<=n:
## s= a+b
## print(s,end=" ")
## a=b
## b=s
## count+=1
##fibonocci series using for loop##
##n= int(input('enter a range '))
##a=0
##b=1
##print(a,b,end=' ')
##
##for count in range(1,n-1):##(2,n)
## s= a+b
## print(s,end=' ')
## a=b
## b=s
##previous number of fibnocci series from the given number##
##n= int(input('enter a number '))
##a=0
##b=1
##for i in range(3,n+1):
## s=a+b
## a=b
## b=s
## if b>=n:
## print(a)
## break
##next number of fibnocci series from the given number##
##n= int(input('enter a number '))
##a=0
##b=1
##for i in range(3,n+1):
## s=a+b
## a=b
## b=s
## if b>=n:
## print(b)
## break
##perfect numbers using for loop##
##n= int(input('enter a number '))
##a=n
##s=0
##
##for i in range(1,n):#(1,(n//2)+1)#
## div=n%i
## if n%i==0:
## s+=i
## else:
## continue
##
##if s==a:
## print(a,' is a perfect number')
##else:
## print(a,' is not a perfect number')
##perfect numbers using while loop##
##n= int(input('enter a number '))
##a=n
##s=0
##i=1
##
##while i<n:
## if n%i==0:
## s+=i
## i+=1
##
##if s==a:
## print(a,' is a perfect number')
##else:
## print(a,' is not a perfect number')
|
[
"##armstrong number##\r\n##n= int(input('enter a number '))\r\n##a=n\r\n##s=0\r\n##\r\n##while n>0:\r\n## rem= n%10\r\n## s= s+rem*rem*rem\r\n## n= n//10\r\n##if a==s:\r\n## print(a,' is an armstrong number')\r\n##else:\r\n## print(a,' is not an armstrong number')\r\n\r\n\r\n\r\n\r\n\r\n##palindrome or not##\r\n##n= int(input('enter a number '))\r\n##a=n\r\n##rev=0\r\n##\r\n##while n>0:\r\n## rem= n%10\r\n## rev= rev*10+rem\r\n## n= n//10\r\n##if a==rev:\r\n## print(a,' is a palindrome number')\r\n##else:\r\n## print(a,' is not a palindrome number')\r\n\r\n\r\n\r\n\r\n\r\n##factorial of a number using while loop##\r\n##n= int(input('enter a number '))\r\n##i=1\r\n##a=n\r\n##fact=1\r\n##\r\n##while i<=n:##n>0\r\n## fact*= n*i##fact*=n\r\n## n-=1\r\n##print(fact,' is the factorial of ',a)\r\n\r\n\r\n\r\n\r\n\r\n##factorial of a number using for loop##\r\n##n= int(input('enter a number '))\r\n##a=n\r\n##fact=1\r\n##\r\n##for i in range(1,n+1):##(n,0,-1)\r\n## fact*=i\r\n##print(fact,' is the factorial of ',a)\r\n \r\n\r\n\r\n\r\n\r\n##harshed number ##\r\n##n= int(input('enter a number '))\r\n##a=n\r\n##s=0\r\n##\r\n##while n>0:\r\n## rem= n%10\r\n## s+=rem\r\n## n//=10\r\n##print(s,' is the sum of ',a)\r\n##if a%s==0:\r\n## print(a,' is a harshed number')\r\n##else:\r\n## print(a,' is not a harshed number')\r\n\r\n\r\n\r\n\r\n\r\n##fibonocci series using while loop##\r\n##n= int(input('enter a range '))\r\n##a=0\r\n##b=1\r\n##print(a,b,end=\" \")\r\n##count=3\r\n##\r\n##while count<=n:\r\n## s= a+b\r\n## print(s,end=\" \")\r\n## a=b\r\n## b=s\r\n## count+=1\r\n\r\n\r\n\r\n\r\n\r\n##fibonocci series using for loop##\r\n##n= int(input('enter a range '))\r\n##a=0\r\n##b=1\r\n##print(a,b,end=' ')\r\n##\r\n##for count in range(1,n-1):##(2,n)\r\n## s= a+b\r\n## print(s,end=' ')\r\n## a=b\r\n## b=s\r\n\r\n\r\n\r\n\r\n\r\n##previous number of fibnocci series from the given number##\r\n##n= int(input('enter a number '))\r\n##a=0\r\n##b=1\r\n##for i in range(3,n+1):\r\n## s=a+b\r\n## a=b\r\n## b=s\r\n## if b>=n:\r\n## print(a)\r\n## break\r\n\r\n\r\n\r\n\r\n\r\n##next number of fibnocci series from the given number##\r\n##n= int(input('enter a number '))\r\n##a=0\r\n##b=1\r\n##for i in range(3,n+1):\r\n## s=a+b\r\n## a=b\r\n## b=s\r\n## if b>=n:\r\n## print(b)\r\n## break\r\n\r\n\r\n\r\n\r\n\r\n##perfect numbers using for loop##\r\n##n= int(input('enter a number '))\r\n##a=n\r\n##s=0\r\n##\r\n##for i in range(1,n):#(1,(n//2)+1)#\r\n## div=n%i\r\n## if n%i==0:\r\n## s+=i\r\n## else:\r\n## continue\r\n## \r\n##if s==a:\r\n## print(a,' is a perfect number')\r\n##else:\r\n## print(a,' is not a perfect number')\r\n\r\n\r\n\r\n\r\n\r\n##perfect numbers using while loop##\r\n##n= int(input('enter a number '))\r\n##a=n\r\n##s=0\r\n##i=1\r\n##\r\n##while i<n:\r\n## if n%i==0:\r\n## s+=i\r\n## i+=1\r\n## \r\n##if s==a:\r\n## print(a,' is a perfect number')\r\n##else:\r\n## print(a,' is not a perfect number')\r\n",
""
] | false |
1,044 |
3908d303d0e41677aae332fbdbe9b681bffe5391
|
import os
from datetime import timedelta
ROOT_PATH = os.path.split(os.path.abspath(__name__))[0]
DEBUG = True
JWT_SECRET_KEY = 'shop'
# SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(
# os.path.join(ROOT_PATH, 's_shop_flask.db'))
SQLALCHEMY_TRACK_MODIFICATIONS = False
user = 'shop'
passwd = 'shopadmin'
db = 'shopdb'
SQLALCHEMY_DATABASE_URI = f'mysql+pymysql://{user}:{passwd}@10.10.10.105:6606/{db}'
JWT_AUTH_USERNAME_KEY = 'username'
JWT_AUTH_PASSWORD_KEY = 'password'
JWT_AUTH_HEADER_PREFIX = 'JWT'
JWT_EXPIRATION_DELTA = timedelta(days=30)
JWT_ALGORITHM = 'HS256'
JWT_REQUIRED_CLAIMS = ['exp', 'iat', 'nbf']
# 图片上传路径
UPLOADED_PHOTOS_DEST = os.path.join(ROOT_PATH, 'uploads/')
|
[
"import os\nfrom datetime import timedelta\n\nROOT_PATH = os.path.split(os.path.abspath(__name__))[0]\n\nDEBUG = True\nJWT_SECRET_KEY = 'shop'\n# SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(\n# os.path.join(ROOT_PATH, 's_shop_flask.db'))\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nuser = 'shop'\npasswd = 'shopadmin'\ndb = 'shopdb'\n\nSQLALCHEMY_DATABASE_URI = f'mysql+pymysql://{user}:{passwd}@10.10.10.105:6606/{db}'\n\nJWT_AUTH_USERNAME_KEY = 'username'\nJWT_AUTH_PASSWORD_KEY = 'password'\nJWT_AUTH_HEADER_PREFIX = 'JWT'\nJWT_EXPIRATION_DELTA = timedelta(days=30)\nJWT_ALGORITHM = 'HS256'\nJWT_REQUIRED_CLAIMS = ['exp', 'iat', 'nbf']\n\n# 图片上传路径\nUPLOADED_PHOTOS_DEST = os.path.join(ROOT_PATH, 'uploads/')\n",
"import os\nfrom datetime import timedelta\nROOT_PATH = os.path.split(os.path.abspath(__name__))[0]\nDEBUG = True\nJWT_SECRET_KEY = 'shop'\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nuser = 'shop'\npasswd = 'shopadmin'\ndb = 'shopdb'\nSQLALCHEMY_DATABASE_URI = (\n f'mysql+pymysql://{user}:{passwd}@10.10.10.105:6606/{db}')\nJWT_AUTH_USERNAME_KEY = 'username'\nJWT_AUTH_PASSWORD_KEY = 'password'\nJWT_AUTH_HEADER_PREFIX = 'JWT'\nJWT_EXPIRATION_DELTA = timedelta(days=30)\nJWT_ALGORITHM = 'HS256'\nJWT_REQUIRED_CLAIMS = ['exp', 'iat', 'nbf']\nUPLOADED_PHOTOS_DEST = os.path.join(ROOT_PATH, 'uploads/')\n",
"<import token>\nROOT_PATH = os.path.split(os.path.abspath(__name__))[0]\nDEBUG = True\nJWT_SECRET_KEY = 'shop'\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nuser = 'shop'\npasswd = 'shopadmin'\ndb = 'shopdb'\nSQLALCHEMY_DATABASE_URI = (\n f'mysql+pymysql://{user}:{passwd}@10.10.10.105:6606/{db}')\nJWT_AUTH_USERNAME_KEY = 'username'\nJWT_AUTH_PASSWORD_KEY = 'password'\nJWT_AUTH_HEADER_PREFIX = 'JWT'\nJWT_EXPIRATION_DELTA = timedelta(days=30)\nJWT_ALGORITHM = 'HS256'\nJWT_REQUIRED_CLAIMS = ['exp', 'iat', 'nbf']\nUPLOADED_PHOTOS_DEST = os.path.join(ROOT_PATH, 'uploads/')\n",
"<import token>\n<assignment token>\n"
] | false |
1,045 |
874b87ca20385aa15cc7299707c9c1c0360ace43
|
from PyQt4 import QtCore
SceneName = "sphere"
DefaultColor = QtCore.Qt.yellow
|
[
"from PyQt4 import QtCore\r\n\r\n\r\nSceneName = \"sphere\"\r\nDefaultColor = QtCore.Qt.yellow\r\n",
"from PyQt4 import QtCore\nSceneName = 'sphere'\nDefaultColor = QtCore.Qt.yellow\n",
"<import token>\nSceneName = 'sphere'\nDefaultColor = QtCore.Qt.yellow\n",
"<import token>\n<assignment token>\n"
] | false |
1,046 |
86fdea2ae8e253aa4639bb3114de70c693536760
|
from django.db import models
from django.contrib import admin
from django.utils import timezone
class Libros(models.Model):
ISBN = models.CharField(max_length=13,primary_key=True)
Titulo = models.CharField(max_length=15)
# Portada = models.ImageField(upload_to='imagen/')
Autor = models.CharField(max_length=100)
Editorial = models.CharField(max_length=100)
Pais=models.CharField(max_length=100)
anno= models.IntegerField()
def __str__(self):
return self.Titulo
class Usuario(models.Model):
DPI = models.CharField(max_length=20)
NombreCompleto= models.CharField(max_length=100)
def __str__(self):
return self.DPI
class Prestamo (models.Model):
Fecha_Prestamo=models.DateTimeField(default=timezone.now)
Fecha_Devolucion=models.DateField()
Fecha_Devolucion_Real=models.DateField()
Libro=models.ForeignKey(Libros,on_delete=models.CASCADE)
Usuario=models.ForeignKey(Usuario,on_delete=models.CASCADE)
class PrestamoInLine(admin.TabularInline):
model=Prestamo
extra=1
class LibroAdmin(admin.ModelAdmin):
inlines = (PrestamoInLine,)
class UsuarioAdmin(admin.ModelAdmin):
inlines = (PrestamoInLine,)
|
[
"from django.db import models\nfrom django.contrib import admin\nfrom django.utils import timezone\n\nclass Libros(models.Model):\n ISBN = models.CharField(max_length=13,primary_key=True)\n Titulo = models.CharField(max_length=15)\n # Portada = models.ImageField(upload_to='imagen/')\n Autor = models.CharField(max_length=100)\n Editorial = models.CharField(max_length=100)\n Pais=models.CharField(max_length=100)\n anno= models.IntegerField()\n\n def __str__(self):\n return self.Titulo\n\nclass Usuario(models.Model):\n DPI = models.CharField(max_length=20)\n NombreCompleto= models.CharField(max_length=100)\n\n def __str__(self):\n return self.DPI\n\n\n\nclass Prestamo (models.Model):\n Fecha_Prestamo=models.DateTimeField(default=timezone.now)\n Fecha_Devolucion=models.DateField()\n Fecha_Devolucion_Real=models.DateField()\n Libro=models.ForeignKey(Libros,on_delete=models.CASCADE)\n Usuario=models.ForeignKey(Usuario,on_delete=models.CASCADE)\n\nclass PrestamoInLine(admin.TabularInline):\n model=Prestamo\n extra=1\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = (PrestamoInLine,)\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = (PrestamoInLine,)\n",
"from django.db import models\nfrom django.contrib import admin\nfrom django.utils import timezone\n\n\nclass Libros(models.Model):\n ISBN = models.CharField(max_length=13, primary_key=True)\n Titulo = models.CharField(max_length=15)\n Autor = models.CharField(max_length=100)\n Editorial = models.CharField(max_length=100)\n Pais = models.CharField(max_length=100)\n anno = models.IntegerField()\n\n def __str__(self):\n return self.Titulo\n\n\nclass Usuario(models.Model):\n DPI = models.CharField(max_length=20)\n NombreCompleto = models.CharField(max_length=100)\n\n def __str__(self):\n return self.DPI\n\n\nclass Prestamo(models.Model):\n Fecha_Prestamo = models.DateTimeField(default=timezone.now)\n Fecha_Devolucion = models.DateField()\n Fecha_Devolucion_Real = models.DateField()\n Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)\n Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"<import token>\n\n\nclass Libros(models.Model):\n ISBN = models.CharField(max_length=13, primary_key=True)\n Titulo = models.CharField(max_length=15)\n Autor = models.CharField(max_length=100)\n Editorial = models.CharField(max_length=100)\n Pais = models.CharField(max_length=100)\n anno = models.IntegerField()\n\n def __str__(self):\n return self.Titulo\n\n\nclass Usuario(models.Model):\n DPI = models.CharField(max_length=20)\n NombreCompleto = models.CharField(max_length=100)\n\n def __str__(self):\n return self.DPI\n\n\nclass Prestamo(models.Model):\n Fecha_Prestamo = models.DateTimeField(default=timezone.now)\n Fecha_Devolucion = models.DateField()\n Fecha_Devolucion_Real = models.DateField()\n Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)\n Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"<import token>\n\n\nclass Libros(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.Titulo\n\n\nclass Usuario(models.Model):\n DPI = models.CharField(max_length=20)\n NombreCompleto = models.CharField(max_length=100)\n\n def __str__(self):\n return self.DPI\n\n\nclass Prestamo(models.Model):\n Fecha_Prestamo = models.DateTimeField(default=timezone.now)\n Fecha_Devolucion = models.DateField()\n Fecha_Devolucion_Real = models.DateField()\n Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)\n Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"<import token>\n\n\nclass Libros(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass Usuario(models.Model):\n DPI = models.CharField(max_length=20)\n NombreCompleto = models.CharField(max_length=100)\n\n def __str__(self):\n return self.DPI\n\n\nclass Prestamo(models.Model):\n Fecha_Prestamo = models.DateTimeField(default=timezone.now)\n Fecha_Devolucion = models.DateField()\n Fecha_Devolucion_Real = models.DateField()\n Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)\n Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"<import token>\n<class token>\n\n\nclass Usuario(models.Model):\n DPI = models.CharField(max_length=20)\n NombreCompleto = models.CharField(max_length=100)\n\n def __str__(self):\n return self.DPI\n\n\nclass Prestamo(models.Model):\n Fecha_Prestamo = models.DateTimeField(default=timezone.now)\n Fecha_Devolucion = models.DateField()\n Fecha_Devolucion_Real = models.DateField()\n Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)\n Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"<import token>\n<class token>\n\n\nclass Usuario(models.Model):\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.DPI\n\n\nclass Prestamo(models.Model):\n Fecha_Prestamo = models.DateTimeField(default=timezone.now)\n Fecha_Devolucion = models.DateField()\n Fecha_Devolucion_Real = models.DateField()\n Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)\n Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"<import token>\n<class token>\n\n\nclass Usuario(models.Model):\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass Prestamo(models.Model):\n Fecha_Prestamo = models.DateTimeField(default=timezone.now)\n Fecha_Devolucion = models.DateField()\n Fecha_Devolucion_Real = models.DateField()\n Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)\n Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"<import token>\n<class token>\n<class token>\n\n\nclass Prestamo(models.Model):\n Fecha_Prestamo = models.DateTimeField(default=timezone.now)\n Fecha_Devolucion = models.DateField()\n Fecha_Devolucion_Real = models.DateField()\n Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)\n Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"<import token>\n<class token>\n<class token>\n\n\nclass Prestamo(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass PrestamoInLine(admin.TabularInline):\n <assignment token>\n <assignment token>\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass LibroAdmin(admin.ModelAdmin):\n <assignment token>\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n <assignment token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
1,047 |
ca6b064dbd8200c49665eaa944fdf1fc80c25726
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data=pd.read_csv('regression.csv')
print(data)
x=data.iloc[:,0]
y=data.iloc[:,1]
mx=data['X1'].mean()
my=data['Y'].mean()
print(mx,my)
num, den = 0,0
for i in range(len(x)):
num += (x[i] - mx)*(y[i]-my)
den += (x[i]-mx)**2
beta1 = num/den
beta0 =my-(beta1*mx)
print(beta1,beta0)
Y_predict=beta1*x + beta0
plt.scatter(x,y)
plt.plot([min(x),max(x)],[min(Y_predict),max(Y_predict)], color='red')
plt.show()
ycap = []
for i in range(len(x)):
xdata =( beta1*x[i])+ beta0
ycap.append(xdata)
print(ycap)
residue=[]
for i in range(len(y)):
l = y[i] - ycap[i]
residue.append(l)
print(residue)
residualsum=sum(residue)
print(residualsum)
|
[
"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\ndata=pd.read_csv('regression.csv')\r\nprint(data)\r\nx=data.iloc[:,0]\r\ny=data.iloc[:,1]\r\nmx=data['X1'].mean()\r\nmy=data['Y'].mean()\r\nprint(mx,my)\r\n\r\nnum, den = 0,0\r\nfor i in range(len(x)):\r\n num += (x[i] - mx)*(y[i]-my)\r\n den += (x[i]-mx)**2\r\nbeta1 = num/den\r\nbeta0 =my-(beta1*mx)\r\nprint(beta1,beta0)\r\nY_predict=beta1*x + beta0\r\nplt.scatter(x,y)\r\n\r\nplt.plot([min(x),max(x)],[min(Y_predict),max(Y_predict)], color='red')\r\nplt.show()\r\n\r\nycap = []\r\nfor i in range(len(x)):\r\n xdata =( beta1*x[i])+ beta0\r\n ycap.append(xdata)\r\nprint(ycap)\r\nresidue=[]\r\nfor i in range(len(y)):\r\n l = y[i] - ycap[i]\r\n residue.append(l)\r\nprint(residue)\r\nresidualsum=sum(residue)\r\nprint(residualsum)\r\n\r\n\r\n\r\n",
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\ndata = pd.read_csv('regression.csv')\nprint(data)\nx = data.iloc[:, 0]\ny = data.iloc[:, 1]\nmx = data['X1'].mean()\nmy = data['Y'].mean()\nprint(mx, my)\nnum, den = 0, 0\nfor i in range(len(x)):\n num += (x[i] - mx) * (y[i] - my)\n den += (x[i] - mx) ** 2\nbeta1 = num / den\nbeta0 = my - beta1 * mx\nprint(beta1, beta0)\nY_predict = beta1 * x + beta0\nplt.scatter(x, y)\nplt.plot([min(x), max(x)], [min(Y_predict), max(Y_predict)], color='red')\nplt.show()\nycap = []\nfor i in range(len(x)):\n xdata = beta1 * x[i] + beta0\n ycap.append(xdata)\nprint(ycap)\nresidue = []\nfor i in range(len(y)):\n l = y[i] - ycap[i]\n residue.append(l)\nprint(residue)\nresidualsum = sum(residue)\nprint(residualsum)\n",
"<import token>\ndata = pd.read_csv('regression.csv')\nprint(data)\nx = data.iloc[:, 0]\ny = data.iloc[:, 1]\nmx = data['X1'].mean()\nmy = data['Y'].mean()\nprint(mx, my)\nnum, den = 0, 0\nfor i in range(len(x)):\n num += (x[i] - mx) * (y[i] - my)\n den += (x[i] - mx) ** 2\nbeta1 = num / den\nbeta0 = my - beta1 * mx\nprint(beta1, beta0)\nY_predict = beta1 * x + beta0\nplt.scatter(x, y)\nplt.plot([min(x), max(x)], [min(Y_predict), max(Y_predict)], color='red')\nplt.show()\nycap = []\nfor i in range(len(x)):\n xdata = beta1 * x[i] + beta0\n ycap.append(xdata)\nprint(ycap)\nresidue = []\nfor i in range(len(y)):\n l = y[i] - ycap[i]\n residue.append(l)\nprint(residue)\nresidualsum = sum(residue)\nprint(residualsum)\n",
"<import token>\n<assignment token>\nprint(data)\n<assignment token>\nprint(mx, my)\n<assignment token>\nfor i in range(len(x)):\n num += (x[i] - mx) * (y[i] - my)\n den += (x[i] - mx) ** 2\n<assignment token>\nprint(beta1, beta0)\n<assignment token>\nplt.scatter(x, y)\nplt.plot([min(x), max(x)], [min(Y_predict), max(Y_predict)], color='red')\nplt.show()\n<assignment token>\nfor i in range(len(x)):\n xdata = beta1 * x[i] + beta0\n ycap.append(xdata)\nprint(ycap)\n<assignment token>\nfor i in range(len(y)):\n l = y[i] - ycap[i]\n residue.append(l)\nprint(residue)\n<assignment token>\nprint(residualsum)\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
1,048 |
5c415d5bf9d6952863a662d300cb1f706ef02a8f
|
import openerp
from openerp import pooler
from openerp.report import report_sxw
import xlwt
from openerp.addons.report_xls.report_xls import report_xls
from openerp.tools.translate import _
class openacademy_course_xls_parser(report_sxw.rml_parse):
def __init__(self, cursor, uid, name, context):
super(openacademy_course_xls_parser, self).__init__(cursor, uid, name, context=context)
self.pool = pooler.get_pool(self.cr.dbname)
self.cursor = self.cr
self.localcontext.update({
'cr': cursor,
'uid': uid,
'report_name': _('COURSE LIST'),
})
_column_sizes = [
('0',30),
('1',30),
('2',20)
]
import time
class openacademy_course_xls(report_xls):
column_sizes = [x[1] for x in _column_sizes]
def generate_xls_report(self, _p, _xs, data, objects, wb):
ws = wb.add_sheet(_p.report_name[:31])
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0
ws.fit_width_to_pages = 1
row_pos = 6
ws.header_str = self.xls_headers['standard']
ws.footer_str = self.xls_footers['standard']
#write empty to define column
c_sizes = self.column_sizes
c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in range(0,len(c_sizes))]
cell_format = _xs['bold'] + _xs['underline']
so_style = xlwt.easyxf(cell_format)
cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']
table_title_style = xlwt.easyxf(cell_format)
cell_format = _xs['right']
right_style = xlwt.easyxf(cell_format)
cell_format = _xs['underline'] + _xs['right']
underline_style = xlwt.easyxf(cell_format)
for so in objects:
c_specs = [('title',3,0,'text','Subject: %s' %(so.name)),]
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(ws, row_pos, row_data)
ws.set_horz_split_pos(row_pos)
openacademy_course_xls('report.openacademy.course.list.xls','openacademy.course', parser=openacademy_course_xls_parser)
|
[
"import openerp\nfrom openerp import pooler\nfrom openerp.report import report_sxw\nimport xlwt\nfrom openerp.addons.report_xls.report_xls import report_xls\nfrom openerp.tools.translate import _\n\nclass openacademy_course_xls_parser(report_sxw.rml_parse):\n\tdef __init__(self, cursor, uid, name, context):\n\t\tsuper(openacademy_course_xls_parser, self).__init__(cursor, uid, name, context=context)\n\t\tself.pool = pooler.get_pool(self.cr.dbname)\n\t\tself.cursor = self.cr\n\n\t\tself.localcontext.update({\n\t\t\t'cr': cursor,\n\t\t\t'uid': uid,\n\t\t\t'report_name': _('COURSE LIST'),\n\t\t\t})\n\n_column_sizes = [\n\t('0',30),\n\t('1',30),\n\t('2',20)\n]\n\nimport time\n\nclass openacademy_course_xls(report_xls):\n\tcolumn_sizes = [x[1] for x in _column_sizes]\n\n\tdef generate_xls_report(self, _p, _xs, data, objects, wb):\n\t\tws = wb.add_sheet(_p.report_name[:31])\n\t\tws.panes_frozen = True\n\t\tws.remove_splits = True\n\t\tws.portrait = 0\n\t\tws.fit_width_to_pages = 1\n\t\trow_pos = 6\n\n\t\tws.header_str = self.xls_headers['standard']\n\t\tws.footer_str = self.xls_footers['standard']\n\n\t\t#write empty to define column\n\t\tc_sizes = self.column_sizes\n\t\tc_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in range(0,len(c_sizes))]\n\t\tcell_format = _xs['bold'] + _xs['underline']\n\t\tso_style = xlwt.easyxf(cell_format)\n\n\t\tcell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']\n\t\ttable_title_style = xlwt.easyxf(cell_format)\n\n\t\tcell_format = _xs['right']\n\t\tright_style = xlwt.easyxf(cell_format)\n\n\t\tcell_format = _xs['underline'] + _xs['right']\n\t\tunderline_style = xlwt.easyxf(cell_format)\n\n\t\tfor so in objects:\n\t\t\tc_specs = [('title',3,0,'text','Subject: %s' %(so.name)),]\n\n\t\t\trow_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])\n\t\t\trow_pos = self.xls_write_row(ws, row_pos, row_data)\n\t\t\tws.set_horz_split_pos(row_pos)\n\n\n\nopenacademy_course_xls('report.openacademy.course.list.xls','openacademy.course', parser=openacademy_course_xls_parser)",
"import openerp\nfrom openerp import pooler\nfrom openerp.report import report_sxw\nimport xlwt\nfrom openerp.addons.report_xls.report_xls import report_xls\nfrom openerp.tools.translate import _\n\n\nclass openacademy_course_xls_parser(report_sxw.rml_parse):\n\n def __init__(self, cursor, uid, name, context):\n super(openacademy_course_xls_parser, self).__init__(cursor, uid,\n name, context=context)\n self.pool = pooler.get_pool(self.cr.dbname)\n self.cursor = self.cr\n self.localcontext.update({'cr': cursor, 'uid': uid, 'report_name':\n _('COURSE LIST')})\n\n\n_column_sizes = [('0', 30), ('1', 30), ('2', 20)]\nimport time\n\n\nclass openacademy_course_xls(report_xls):\n column_sizes = [x[1] for x in _column_sizes]\n\n def generate_xls_report(self, _p, _xs, data, objects, wb):\n ws = wb.add_sheet(_p.report_name[:31])\n ws.panes_frozen = True\n ws.remove_splits = True\n ws.portrait = 0\n ws.fit_width_to_pages = 1\n row_pos = 6\n ws.header_str = self.xls_headers['standard']\n ws.footer_str = self.xls_footers['standard']\n c_sizes = self.column_sizes\n c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in\n range(0, len(c_sizes))]\n cell_format = _xs['bold'] + _xs['underline']\n so_style = xlwt.easyxf(cell_format)\n cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']\n table_title_style = xlwt.easyxf(cell_format)\n cell_format = _xs['right']\n right_style = xlwt.easyxf(cell_format)\n cell_format = _xs['underline'] + _xs['right']\n underline_style = xlwt.easyxf(cell_format)\n for so in objects:\n c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]\n row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])\n row_pos = self.xls_write_row(ws, row_pos, row_data)\n ws.set_horz_split_pos(row_pos)\n\n\nopenacademy_course_xls('report.openacademy.course.list.xls',\n 'openacademy.course', parser=openacademy_course_xls_parser)\n",
"<import token>\n\n\nclass openacademy_course_xls_parser(report_sxw.rml_parse):\n\n def __init__(self, cursor, uid, name, context):\n super(openacademy_course_xls_parser, self).__init__(cursor, uid,\n name, context=context)\n self.pool = pooler.get_pool(self.cr.dbname)\n self.cursor = self.cr\n self.localcontext.update({'cr': cursor, 'uid': uid, 'report_name':\n _('COURSE LIST')})\n\n\n_column_sizes = [('0', 30), ('1', 30), ('2', 20)]\n<import token>\n\n\nclass openacademy_course_xls(report_xls):\n column_sizes = [x[1] for x in _column_sizes]\n\n def generate_xls_report(self, _p, _xs, data, objects, wb):\n ws = wb.add_sheet(_p.report_name[:31])\n ws.panes_frozen = True\n ws.remove_splits = True\n ws.portrait = 0\n ws.fit_width_to_pages = 1\n row_pos = 6\n ws.header_str = self.xls_headers['standard']\n ws.footer_str = self.xls_footers['standard']\n c_sizes = self.column_sizes\n c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in\n range(0, len(c_sizes))]\n cell_format = _xs['bold'] + _xs['underline']\n so_style = xlwt.easyxf(cell_format)\n cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']\n table_title_style = xlwt.easyxf(cell_format)\n cell_format = _xs['right']\n right_style = xlwt.easyxf(cell_format)\n cell_format = _xs['underline'] + _xs['right']\n underline_style = xlwt.easyxf(cell_format)\n for so in objects:\n c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]\n row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])\n row_pos = self.xls_write_row(ws, row_pos, row_data)\n ws.set_horz_split_pos(row_pos)\n\n\nopenacademy_course_xls('report.openacademy.course.list.xls',\n 'openacademy.course', parser=openacademy_course_xls_parser)\n",
"<import token>\n\n\nclass openacademy_course_xls_parser(report_sxw.rml_parse):\n\n def __init__(self, cursor, uid, name, context):\n super(openacademy_course_xls_parser, self).__init__(cursor, uid,\n name, context=context)\n self.pool = pooler.get_pool(self.cr.dbname)\n self.cursor = self.cr\n self.localcontext.update({'cr': cursor, 'uid': uid, 'report_name':\n _('COURSE LIST')})\n\n\n<assignment token>\n<import token>\n\n\nclass openacademy_course_xls(report_xls):\n column_sizes = [x[1] for x in _column_sizes]\n\n def generate_xls_report(self, _p, _xs, data, objects, wb):\n ws = wb.add_sheet(_p.report_name[:31])\n ws.panes_frozen = True\n ws.remove_splits = True\n ws.portrait = 0\n ws.fit_width_to_pages = 1\n row_pos = 6\n ws.header_str = self.xls_headers['standard']\n ws.footer_str = self.xls_footers['standard']\n c_sizes = self.column_sizes\n c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in\n range(0, len(c_sizes))]\n cell_format = _xs['bold'] + _xs['underline']\n so_style = xlwt.easyxf(cell_format)\n cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']\n table_title_style = xlwt.easyxf(cell_format)\n cell_format = _xs['right']\n right_style = xlwt.easyxf(cell_format)\n cell_format = _xs['underline'] + _xs['right']\n underline_style = xlwt.easyxf(cell_format)\n for so in objects:\n c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]\n row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])\n row_pos = self.xls_write_row(ws, row_pos, row_data)\n ws.set_horz_split_pos(row_pos)\n\n\nopenacademy_course_xls('report.openacademy.course.list.xls',\n 'openacademy.course', parser=openacademy_course_xls_parser)\n",
"<import token>\n\n\nclass openacademy_course_xls_parser(report_sxw.rml_parse):\n\n def __init__(self, cursor, uid, name, context):\n super(openacademy_course_xls_parser, self).__init__(cursor, uid,\n name, context=context)\n self.pool = pooler.get_pool(self.cr.dbname)\n self.cursor = self.cr\n self.localcontext.update({'cr': cursor, 'uid': uid, 'report_name':\n _('COURSE LIST')})\n\n\n<assignment token>\n<import token>\n\n\nclass openacademy_course_xls(report_xls):\n column_sizes = [x[1] for x in _column_sizes]\n\n def generate_xls_report(self, _p, _xs, data, objects, wb):\n ws = wb.add_sheet(_p.report_name[:31])\n ws.panes_frozen = True\n ws.remove_splits = True\n ws.portrait = 0\n ws.fit_width_to_pages = 1\n row_pos = 6\n ws.header_str = self.xls_headers['standard']\n ws.footer_str = self.xls_footers['standard']\n c_sizes = self.column_sizes\n c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in\n range(0, len(c_sizes))]\n cell_format = _xs['bold'] + _xs['underline']\n so_style = xlwt.easyxf(cell_format)\n cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']\n table_title_style = xlwt.easyxf(cell_format)\n cell_format = _xs['right']\n right_style = xlwt.easyxf(cell_format)\n cell_format = _xs['underline'] + _xs['right']\n underline_style = xlwt.easyxf(cell_format)\n for so in objects:\n c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]\n row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])\n row_pos = self.xls_write_row(ws, row_pos, row_data)\n ws.set_horz_split_pos(row_pos)\n\n\n<code token>\n",
"<import token>\n\n\nclass openacademy_course_xls_parser(report_sxw.rml_parse):\n <function token>\n\n\n<assignment token>\n<import token>\n\n\nclass openacademy_course_xls(report_xls):\n column_sizes = [x[1] for x in _column_sizes]\n\n def generate_xls_report(self, _p, _xs, data, objects, wb):\n ws = wb.add_sheet(_p.report_name[:31])\n ws.panes_frozen = True\n ws.remove_splits = True\n ws.portrait = 0\n ws.fit_width_to_pages = 1\n row_pos = 6\n ws.header_str = self.xls_headers['standard']\n ws.footer_str = self.xls_footers['standard']\n c_sizes = self.column_sizes\n c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in\n range(0, len(c_sizes))]\n cell_format = _xs['bold'] + _xs['underline']\n so_style = xlwt.easyxf(cell_format)\n cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']\n table_title_style = xlwt.easyxf(cell_format)\n cell_format = _xs['right']\n right_style = xlwt.easyxf(cell_format)\n cell_format = _xs['underline'] + _xs['right']\n underline_style = xlwt.easyxf(cell_format)\n for so in objects:\n c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]\n row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])\n row_pos = self.xls_write_row(ws, row_pos, row_data)\n ws.set_horz_split_pos(row_pos)\n\n\n<code token>\n",
"<import token>\n<class token>\n<assignment token>\n<import token>\n\n\nclass openacademy_course_xls(report_xls):\n column_sizes = [x[1] for x in _column_sizes]\n\n def generate_xls_report(self, _p, _xs, data, objects, wb):\n ws = wb.add_sheet(_p.report_name[:31])\n ws.panes_frozen = True\n ws.remove_splits = True\n ws.portrait = 0\n ws.fit_width_to_pages = 1\n row_pos = 6\n ws.header_str = self.xls_headers['standard']\n ws.footer_str = self.xls_footers['standard']\n c_sizes = self.column_sizes\n c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in\n range(0, len(c_sizes))]\n cell_format = _xs['bold'] + _xs['underline']\n so_style = xlwt.easyxf(cell_format)\n cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']\n table_title_style = xlwt.easyxf(cell_format)\n cell_format = _xs['right']\n right_style = xlwt.easyxf(cell_format)\n cell_format = _xs['underline'] + _xs['right']\n underline_style = xlwt.easyxf(cell_format)\n for so in objects:\n c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]\n row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])\n row_pos = self.xls_write_row(ws, row_pos, row_data)\n ws.set_horz_split_pos(row_pos)\n\n\n<code token>\n",
"<import token>\n<class token>\n<assignment token>\n<import token>\n\n\nclass openacademy_course_xls(report_xls):\n <assignment token>\n\n def generate_xls_report(self, _p, _xs, data, objects, wb):\n ws = wb.add_sheet(_p.report_name[:31])\n ws.panes_frozen = True\n ws.remove_splits = True\n ws.portrait = 0\n ws.fit_width_to_pages = 1\n row_pos = 6\n ws.header_str = self.xls_headers['standard']\n ws.footer_str = self.xls_footers['standard']\n c_sizes = self.column_sizes\n c_specs = [('empty%s' % i, 1, c_sizes[i], 'text', None) for i in\n range(0, len(c_sizes))]\n cell_format = _xs['bold'] + _xs['underline']\n so_style = xlwt.easyxf(cell_format)\n cell_format = _xs['bold'] + _xs['borders_all'] + _xs['center']\n table_title_style = xlwt.easyxf(cell_format)\n cell_format = _xs['right']\n right_style = xlwt.easyxf(cell_format)\n cell_format = _xs['underline'] + _xs['right']\n underline_style = xlwt.easyxf(cell_format)\n for so in objects:\n c_specs = [('title', 3, 0, 'text', 'Subject: %s' % so.name)]\n row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])\n row_pos = self.xls_write_row(ws, row_pos, row_data)\n ws.set_horz_split_pos(row_pos)\n\n\n<code token>\n",
"<import token>\n<class token>\n<assignment token>\n<import token>\n\n\nclass openacademy_course_xls(report_xls):\n <assignment token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<assignment token>\n<import token>\n<class token>\n<code token>\n"
] | false |
1,049 |
88590aef975f7e473ef964ee0c4004cff7e24b07
|
#!/usr/bin/env python3
import optparse
from bs4 import BeautifulSoup
import re
import jieba
import pickle
import requests
import asyncio
if __name__ == '__main__':
# 读取10000个关键词
fs = open("./src/keywords.txt", "rb")
keywords = fs.read().decode("utf-8").split(",")
fs.close()
# 找出特征
def find_features(doc):
words = set(doc)
features = {}
for word in keywords:
features["contains %s" % word] = (word in words)
return features
# 读取预先做好的nltk分词器
fs = open('./src/my_classifier.pickle', 'rb')
classifier = pickle.load(fs)
# 匹配中文字符
regex = re.compile("[\u4e00-\u9fa5]")
p = optparse.OptionParser(usage="usage: %prog [options] arg1 arg2", version="%prog 0.1", prog="url-tagger")
p.add_option("--url", "-u", help="Your url")
p.add_option("--file", "-f", help="Your url file. One line one url")
(options, arguments) = p.parse_args()
url_list = []
for key, value in options.__dict__.items():
if value is not None:
print("%s: %s" % (key, value))
if key is "url":
url_list.append(value)
else:
url_file = open(value, "rb+")
for line in url_file.readlines():
url_list.append(str(line, encoding="utf-8").strip())
# 异步发起http请求
@asyncio.coroutine
def get_docs(url):
response = requests.get(url=url, headers={'Accept-Encoding': ''})
# print(response.apparent_encoding)
html = str(response.content, encoding=response.apparent_encoding, errors="ignore")
soup = BeautifulSoup(html, "lxml")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "".join(chunk for chunk in chunks if chunk)
# print(text)
return url, text
loop = asyncio.get_event_loop()
tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)), url_list))
data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))
loop.close()
# 分类器进行分类
results = [(url, classifier.classify(find_features(jieba.lcut("".join(regex.findall(data)))))) for (url, data)
in data_list]
# 打印结果
for (url, category) in results:
print("%s: %s" % (url, category))
|
[
"#!/usr/bin/env python3\n\nimport optparse\nfrom bs4 import BeautifulSoup\nimport re\nimport jieba\nimport pickle\nimport requests\nimport asyncio\n\nif __name__ == '__main__':\n\n # 读取10000个关键词\n fs = open(\"./src/keywords.txt\", \"rb\")\n keywords = fs.read().decode(\"utf-8\").split(\",\")\n fs.close()\n\n # 找出特征\n def find_features(doc):\n words = set(doc)\n features = {}\n for word in keywords:\n features[\"contains %s\" % word] = (word in words)\n return features\n\n # 读取预先做好的nltk分词器\n fs = open('./src/my_classifier.pickle', 'rb')\n classifier = pickle.load(fs)\n\n # 匹配中文字符\n regex = re.compile(\"[\\u4e00-\\u9fa5]\")\n\n p = optparse.OptionParser(usage=\"usage: %prog [options] arg1 arg2\", version=\"%prog 0.1\", prog=\"url-tagger\")\n p.add_option(\"--url\", \"-u\", help=\"Your url\")\n p.add_option(\"--file\", \"-f\", help=\"Your url file. One line one url\")\n (options, arguments) = p.parse_args()\n\n url_list = []\n for key, value in options.__dict__.items():\n if value is not None:\n print(\"%s: %s\" % (key, value))\n if key is \"url\":\n url_list.append(value)\n else:\n url_file = open(value, \"rb+\")\n for line in url_file.readlines():\n url_list.append(str(line, encoding=\"utf-8\").strip())\n\n\n # 异步发起http请求\n @asyncio.coroutine\n def get_docs(url):\n response = requests.get(url=url, headers={'Accept-Encoding': ''})\n # print(response.apparent_encoding)\n html = str(response.content, encoding=response.apparent_encoding, errors=\"ignore\")\n soup = BeautifulSoup(html, \"lxml\")\n for script in soup([\"script\", \"style\"]):\n script.extract()\n text = soup.get_text()\n lines = (line.strip() for line in text.splitlines())\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n text = \"\".join(chunk for chunk in chunks if chunk)\n # print(text)\n return url, text\n\n loop = asyncio.get_event_loop()\n tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)), url_list))\n data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))\n loop.close()\n\n # 分类器进行分类\n results = [(url, classifier.classify(find_features(jieba.lcut(\"\".join(regex.findall(data)))))) for (url, data)\n in data_list]\n\n # 打印结果\n for (url, category) in results:\n print(\"%s: %s\" % (url, category))\n\n",
"import optparse\nfrom bs4 import BeautifulSoup\nimport re\nimport jieba\nimport pickle\nimport requests\nimport asyncio\nif __name__ == '__main__':\n fs = open('./src/keywords.txt', 'rb')\n keywords = fs.read().decode('utf-8').split(',')\n fs.close()\n\n def find_features(doc):\n words = set(doc)\n features = {}\n for word in keywords:\n features['contains %s' % word] = word in words\n return features\n fs = open('./src/my_classifier.pickle', 'rb')\n classifier = pickle.load(fs)\n regex = re.compile('[一-龥]')\n p = optparse.OptionParser(usage='usage: %prog [options] arg1 arg2',\n version='%prog 0.1', prog='url-tagger')\n p.add_option('--url', '-u', help='Your url')\n p.add_option('--file', '-f', help='Your url file. One line one url')\n options, arguments = p.parse_args()\n url_list = []\n for key, value in options.__dict__.items():\n if value is not None:\n print('%s: %s' % (key, value))\n if key is 'url':\n url_list.append(value)\n else:\n url_file = open(value, 'rb+')\n for line in url_file.readlines():\n url_list.append(str(line, encoding='utf-8').strip())\n\n @asyncio.coroutine\n def get_docs(url):\n response = requests.get(url=url, headers={'Accept-Encoding': ''})\n html = str(response.content, encoding=response.apparent_encoding,\n errors='ignore')\n soup = BeautifulSoup(html, 'lxml')\n for script in soup(['script', 'style']):\n script.extract()\n text = soup.get_text()\n lines = (line.strip() for line in text.splitlines())\n chunks = (phrase.strip() for line in lines for phrase in line.split\n (' '))\n text = ''.join(chunk for chunk in chunks if chunk)\n return url, text\n loop = asyncio.get_event_loop()\n tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)),\n url_list))\n data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))\n loop.close()\n results = [(url, classifier.classify(find_features(jieba.lcut(''.join(\n regex.findall(data)))))) for url, data in data_list]\n for url, category in results:\n print('%s: %s' % (url, category))\n",
"<import token>\nif __name__ == '__main__':\n fs = open('./src/keywords.txt', 'rb')\n keywords = fs.read().decode('utf-8').split(',')\n fs.close()\n\n def find_features(doc):\n words = set(doc)\n features = {}\n for word in keywords:\n features['contains %s' % word] = word in words\n return features\n fs = open('./src/my_classifier.pickle', 'rb')\n classifier = pickle.load(fs)\n regex = re.compile('[一-龥]')\n p = optparse.OptionParser(usage='usage: %prog [options] arg1 arg2',\n version='%prog 0.1', prog='url-tagger')\n p.add_option('--url', '-u', help='Your url')\n p.add_option('--file', '-f', help='Your url file. One line one url')\n options, arguments = p.parse_args()\n url_list = []\n for key, value in options.__dict__.items():\n if value is not None:\n print('%s: %s' % (key, value))\n if key is 'url':\n url_list.append(value)\n else:\n url_file = open(value, 'rb+')\n for line in url_file.readlines():\n url_list.append(str(line, encoding='utf-8').strip())\n\n @asyncio.coroutine\n def get_docs(url):\n response = requests.get(url=url, headers={'Accept-Encoding': ''})\n html = str(response.content, encoding=response.apparent_encoding,\n errors='ignore')\n soup = BeautifulSoup(html, 'lxml')\n for script in soup(['script', 'style']):\n script.extract()\n text = soup.get_text()\n lines = (line.strip() for line in text.splitlines())\n chunks = (phrase.strip() for line in lines for phrase in line.split\n (' '))\n text = ''.join(chunk for chunk in chunks if chunk)\n return url, text\n loop = asyncio.get_event_loop()\n tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)),\n url_list))\n data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))\n loop.close()\n results = [(url, classifier.classify(find_features(jieba.lcut(''.join(\n regex.findall(data)))))) for url, data in data_list]\n for url, category in results:\n print('%s: %s' % (url, category))\n",
"<import token>\n<code token>\n"
] | false |
1,050 |
7fe7ea89908f9d233dbdb9e46bf2d677406ab324
|
import networkx as nx
import pytest
from caldera.utils.nx import nx_copy
def add_data(g):
g.add_node(1)
g.add_node(2, x=5)
g.add_edge(1, 2, y=6)
g.add_edge(2, 3, z=[])
def assert_graph_data(g1, g2):
assert g1 is not g2
assert g2.nodes[1] == {}
assert g2.nodes[2] == {"x": 5}
assert g2.edges[(1, 2)] == {"y": 6}
assert g2.edges[(2, 3)] == {"z": []}
assert g2.nodes[2] is not g1.nodes[2]
assert g2.edges[(2, 3)] is not g1.edges[(2, 3)]
@pytest.mark.parametrize("do_deepcopy", [True, False])
def test_nx_copy_with_deepcopy(do_deepcopy):
g = nx.Graph()
g2 = nx.DiGraph()
add_data(g)
nx_copy(g, g2, deepcopy=do_deepcopy)
assert_graph_data(g, g2)
assert (g2.edges[(2, 3)]["z"] is g.edges[(2, 3)]["z"]) != do_deepcopy
def test_nx_copy_with_none():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, None)
assert_graph_data(g, g2)
def test_nx_copy_with_class():
g = nx.Graph()
add_data(g)
g2 = nx_copy(g, nx.OrderedDiGraph)
assert isinstance(nx.OrderedDiGraph, type) and issubclass(
nx.OrderedDiGraph, nx.Graph
)
assert isinstance(g2, nx.OrderedDiGraph)
assert_graph_data(g, g2)
def test_nx_copy_node_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
def node_transform(nodes):
for n, ndata in nodes:
yield str(n), ndata
g2 = nx_copy(g, None, node_transform=node_transform)
assert g2.number_of_nodes() == 3
assert g2.number_of_edges() == 2
assert "1" in g2
assert "2" in g2
assert 1 not in g2
assert 2 not in g2
assert g2.edges[("1", "2")] == {"f": 4}
assert g2.edges[("2", "3")] == {"f": 5}
def test_nx_copy_edge_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def edge_transform(edges):
for n1, n2, edata in edges:
if n1 != 4:
yield n1, n2, {"f": 8}
g2 = nx_copy(g, None, edge_transform=edge_transform)
assert g2.number_of_nodes() == 5
assert g2.number_of_edges() == 2
assert g2.edges[(1, 2)] == {"f": 8}
assert g2.edges[(2, 3)] == {"f": 8}
def test_nx_copy_global_transform():
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1, 2, f=4)
g.add_edge(2, 3, f=5)
g.add_edge(4, 5)
g.get_global()["f"] = 8
assert g.number_of_edges() == 3
assert g.number_of_nodes() == 5
def global_transform(g):
for _, gdata in g:
gdata["x"] = 4
yield _, gdata
g2 = nx_copy(g, None, global_transform=global_transform)
assert g2.get_global() == {"x": 4, "f": 8}
|
[
"import networkx as nx\nimport pytest\n\nfrom caldera.utils.nx import nx_copy\n\n\ndef add_data(g):\n g.add_node(1)\n g.add_node(2, x=5)\n g.add_edge(1, 2, y=6)\n g.add_edge(2, 3, z=[])\n\n\ndef assert_graph_data(g1, g2):\n assert g1 is not g2\n assert g2.nodes[1] == {}\n assert g2.nodes[2] == {\"x\": 5}\n assert g2.edges[(1, 2)] == {\"y\": 6}\n assert g2.edges[(2, 3)] == {\"z\": []}\n assert g2.nodes[2] is not g1.nodes[2]\n assert g2.edges[(2, 3)] is not g1.edges[(2, 3)]\n\n\[email protected](\"do_deepcopy\", [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[(2, 3)][\"z\"] is g.edges[(2, 3)][\"z\"]) != do_deepcopy\n\n\ndef test_nx_copy_with_none():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, None)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_with_class():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, nx.OrderedDiGraph)\n assert isinstance(nx.OrderedDiGraph, type) and issubclass(\n nx.OrderedDiGraph, nx.Graph\n )\n assert isinstance(g2, nx.OrderedDiGraph)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_node_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n\n def node_transform(nodes):\n for n, ndata in nodes:\n yield str(n), ndata\n\n g2 = nx_copy(g, None, node_transform=node_transform)\n assert g2.number_of_nodes() == 3\n assert g2.number_of_edges() == 2\n assert \"1\" in g2\n assert \"2\" in g2\n assert 1 not in g2\n assert 2 not in g2\n assert g2.edges[(\"1\", \"2\")] == {\"f\": 4}\n assert g2.edges[(\"2\", \"3\")] == {\"f\": 5}\n\n\ndef test_nx_copy_edge_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def edge_transform(edges):\n for n1, n2, edata in edges:\n if n1 != 4:\n yield n1, n2, {\"f\": 8}\n\n g2 = nx_copy(g, None, edge_transform=edge_transform)\n assert g2.number_of_nodes() == 5\n assert g2.number_of_edges() == 2\n assert g2.edges[(1, 2)] == {\"f\": 8}\n assert g2.edges[(2, 3)] == {\"f\": 8}\n\n\ndef test_nx_copy_global_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n g.get_global()[\"f\"] = 8\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def global_transform(g):\n for _, gdata in g:\n gdata[\"x\"] = 4\n yield _, gdata\n\n g2 = nx_copy(g, None, global_transform=global_transform)\n assert g2.get_global() == {\"x\": 4, \"f\": 8}\n",
"import networkx as nx\nimport pytest\nfrom caldera.utils.nx import nx_copy\n\n\ndef add_data(g):\n g.add_node(1)\n g.add_node(2, x=5)\n g.add_edge(1, 2, y=6)\n g.add_edge(2, 3, z=[])\n\n\ndef assert_graph_data(g1, g2):\n assert g1 is not g2\n assert g2.nodes[1] == {}\n assert g2.nodes[2] == {'x': 5}\n assert g2.edges[1, 2] == {'y': 6}\n assert g2.edges[2, 3] == {'z': []}\n assert g2.nodes[2] is not g1.nodes[2]\n assert g2.edges[2, 3] is not g1.edges[2, 3]\n\n\[email protected]('do_deepcopy', [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy\n\n\ndef test_nx_copy_with_none():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, None)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_with_class():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, nx.OrderedDiGraph)\n assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.\n OrderedDiGraph, nx.Graph)\n assert isinstance(g2, nx.OrderedDiGraph)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_node_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n\n def node_transform(nodes):\n for n, ndata in nodes:\n yield str(n), ndata\n g2 = nx_copy(g, None, node_transform=node_transform)\n assert g2.number_of_nodes() == 3\n assert g2.number_of_edges() == 2\n assert '1' in g2\n assert '2' in g2\n assert 1 not in g2\n assert 2 not in g2\n assert g2.edges['1', '2'] == {'f': 4}\n assert g2.edges['2', '3'] == {'f': 5}\n\n\ndef test_nx_copy_edge_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def edge_transform(edges):\n for n1, n2, edata in edges:\n if n1 != 4:\n yield n1, n2, {'f': 8}\n g2 = nx_copy(g, None, edge_transform=edge_transform)\n assert g2.number_of_nodes() == 5\n assert g2.number_of_edges() == 2\n assert g2.edges[1, 2] == {'f': 8}\n assert g2.edges[2, 3] == {'f': 8}\n\n\ndef test_nx_copy_global_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n g.get_global()['f'] = 8\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def global_transform(g):\n for _, gdata in g:\n gdata['x'] = 4\n yield _, gdata\n g2 = nx_copy(g, None, global_transform=global_transform)\n assert g2.get_global() == {'x': 4, 'f': 8}\n",
"<import token>\n\n\ndef add_data(g):\n g.add_node(1)\n g.add_node(2, x=5)\n g.add_edge(1, 2, y=6)\n g.add_edge(2, 3, z=[])\n\n\ndef assert_graph_data(g1, g2):\n assert g1 is not g2\n assert g2.nodes[1] == {}\n assert g2.nodes[2] == {'x': 5}\n assert g2.edges[1, 2] == {'y': 6}\n assert g2.edges[2, 3] == {'z': []}\n assert g2.nodes[2] is not g1.nodes[2]\n assert g2.edges[2, 3] is not g1.edges[2, 3]\n\n\[email protected]('do_deepcopy', [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy\n\n\ndef test_nx_copy_with_none():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, None)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_with_class():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, nx.OrderedDiGraph)\n assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.\n OrderedDiGraph, nx.Graph)\n assert isinstance(g2, nx.OrderedDiGraph)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_node_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n\n def node_transform(nodes):\n for n, ndata in nodes:\n yield str(n), ndata\n g2 = nx_copy(g, None, node_transform=node_transform)\n assert g2.number_of_nodes() == 3\n assert g2.number_of_edges() == 2\n assert '1' in g2\n assert '2' in g2\n assert 1 not in g2\n assert 2 not in g2\n assert g2.edges['1', '2'] == {'f': 4}\n assert g2.edges['2', '3'] == {'f': 5}\n\n\ndef test_nx_copy_edge_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def edge_transform(edges):\n for n1, n2, edata in edges:\n if n1 != 4:\n yield n1, n2, {'f': 8}\n g2 = nx_copy(g, None, edge_transform=edge_transform)\n assert g2.number_of_nodes() == 5\n assert g2.number_of_edges() == 2\n assert g2.edges[1, 2] == {'f': 8}\n assert g2.edges[2, 3] == {'f': 8}\n\n\ndef test_nx_copy_global_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n g.get_global()['f'] = 8\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def global_transform(g):\n for _, gdata in g:\n gdata['x'] = 4\n yield _, gdata\n g2 = nx_copy(g, None, global_transform=global_transform)\n assert g2.get_global() == {'x': 4, 'f': 8}\n",
"<import token>\n\n\ndef add_data(g):\n g.add_node(1)\n g.add_node(2, x=5)\n g.add_edge(1, 2, y=6)\n g.add_edge(2, 3, z=[])\n\n\ndef assert_graph_data(g1, g2):\n assert g1 is not g2\n assert g2.nodes[1] == {}\n assert g2.nodes[2] == {'x': 5}\n assert g2.edges[1, 2] == {'y': 6}\n assert g2.edges[2, 3] == {'z': []}\n assert g2.nodes[2] is not g1.nodes[2]\n assert g2.edges[2, 3] is not g1.edges[2, 3]\n\n\[email protected]('do_deepcopy', [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy\n\n\ndef test_nx_copy_with_none():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, None)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_with_class():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, nx.OrderedDiGraph)\n assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.\n OrderedDiGraph, nx.Graph)\n assert isinstance(g2, nx.OrderedDiGraph)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_node_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n\n def node_transform(nodes):\n for n, ndata in nodes:\n yield str(n), ndata\n g2 = nx_copy(g, None, node_transform=node_transform)\n assert g2.number_of_nodes() == 3\n assert g2.number_of_edges() == 2\n assert '1' in g2\n assert '2' in g2\n assert 1 not in g2\n assert 2 not in g2\n assert g2.edges['1', '2'] == {'f': 4}\n assert g2.edges['2', '3'] == {'f': 5}\n\n\ndef test_nx_copy_edge_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def edge_transform(edges):\n for n1, n2, edata in edges:\n if n1 != 4:\n yield n1, n2, {'f': 8}\n g2 = nx_copy(g, None, edge_transform=edge_transform)\n assert g2.number_of_nodes() == 5\n assert g2.number_of_edges() == 2\n assert g2.edges[1, 2] == {'f': 8}\n assert g2.edges[2, 3] == {'f': 8}\n\n\n<function token>\n",
"<import token>\n<function token>\n\n\ndef assert_graph_data(g1, g2):\n assert g1 is not g2\n assert g2.nodes[1] == {}\n assert g2.nodes[2] == {'x': 5}\n assert g2.edges[1, 2] == {'y': 6}\n assert g2.edges[2, 3] == {'z': []}\n assert g2.nodes[2] is not g1.nodes[2]\n assert g2.edges[2, 3] is not g1.edges[2, 3]\n\n\[email protected]('do_deepcopy', [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy\n\n\ndef test_nx_copy_with_none():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, None)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_with_class():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, nx.OrderedDiGraph)\n assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.\n OrderedDiGraph, nx.Graph)\n assert isinstance(g2, nx.OrderedDiGraph)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_node_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n\n def node_transform(nodes):\n for n, ndata in nodes:\n yield str(n), ndata\n g2 = nx_copy(g, None, node_transform=node_transform)\n assert g2.number_of_nodes() == 3\n assert g2.number_of_edges() == 2\n assert '1' in g2\n assert '2' in g2\n assert 1 not in g2\n assert 2 not in g2\n assert g2.edges['1', '2'] == {'f': 4}\n assert g2.edges['2', '3'] == {'f': 5}\n\n\ndef test_nx_copy_edge_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def edge_transform(edges):\n for n1, n2, edata in edges:\n if n1 != 4:\n yield n1, n2, {'f': 8}\n g2 = nx_copy(g, None, edge_transform=edge_transform)\n assert g2.number_of_nodes() == 5\n assert g2.number_of_edges() == 2\n assert g2.edges[1, 2] == {'f': 8}\n assert g2.edges[2, 3] == {'f': 8}\n\n\n<function token>\n",
"<import token>\n<function token>\n\n\ndef assert_graph_data(g1, g2):\n assert g1 is not g2\n assert g2.nodes[1] == {}\n assert g2.nodes[2] == {'x': 5}\n assert g2.edges[1, 2] == {'y': 6}\n assert g2.edges[2, 3] == {'z': []}\n assert g2.nodes[2] is not g1.nodes[2]\n assert g2.edges[2, 3] is not g1.edges[2, 3]\n\n\[email protected]('do_deepcopy', [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy\n\n\ndef test_nx_copy_with_none():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, None)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_with_class():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, nx.OrderedDiGraph)\n assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.\n OrderedDiGraph, nx.Graph)\n assert isinstance(g2, nx.OrderedDiGraph)\n assert_graph_data(g, g2)\n\n\n<function token>\n\n\ndef test_nx_copy_edge_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def edge_transform(edges):\n for n1, n2, edata in edges:\n if n1 != 4:\n yield n1, n2, {'f': 8}\n g2 = nx_copy(g, None, edge_transform=edge_transform)\n assert g2.number_of_nodes() == 5\n assert g2.number_of_edges() == 2\n assert g2.edges[1, 2] == {'f': 8}\n assert g2.edges[2, 3] == {'f': 8}\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n\n\[email protected]('do_deepcopy', [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy\n\n\ndef test_nx_copy_with_none():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, None)\n assert_graph_data(g, g2)\n\n\ndef test_nx_copy_with_class():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, nx.OrderedDiGraph)\n assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.\n OrderedDiGraph, nx.Graph)\n assert isinstance(g2, nx.OrderedDiGraph)\n assert_graph_data(g, g2)\n\n\n<function token>\n\n\ndef test_nx_copy_edge_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def edge_transform(edges):\n for n1, n2, edata in edges:\n if n1 != 4:\n yield n1, n2, {'f': 8}\n g2 = nx_copy(g, None, edge_transform=edge_transform)\n assert g2.number_of_nodes() == 5\n assert g2.number_of_edges() == 2\n assert g2.edges[1, 2] == {'f': 8}\n assert g2.edges[2, 3] == {'f': 8}\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n\n\[email protected]('do_deepcopy', [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy\n\n\n<function token>\n\n\ndef test_nx_copy_with_class():\n g = nx.Graph()\n add_data(g)\n g2 = nx_copy(g, nx.OrderedDiGraph)\n assert isinstance(nx.OrderedDiGraph, type) and issubclass(nx.\n OrderedDiGraph, nx.Graph)\n assert isinstance(g2, nx.OrderedDiGraph)\n assert_graph_data(g, g2)\n\n\n<function token>\n\n\ndef test_nx_copy_edge_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def edge_transform(edges):\n for n1, n2, edata in edges:\n if n1 != 4:\n yield n1, n2, {'f': 8}\n g2 = nx_copy(g, None, edge_transform=edge_transform)\n assert g2.number_of_nodes() == 5\n assert g2.number_of_edges() == 2\n assert g2.edges[1, 2] == {'f': 8}\n assert g2.edges[2, 3] == {'f': 8}\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n\n\[email protected]('do_deepcopy', [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef test_nx_copy_edge_transform():\n g = nx.Graph()\n g.add_node(1)\n g.add_node(2)\n g.add_edge(1, 2, f=4)\n g.add_edge(2, 3, f=5)\n g.add_edge(4, 5)\n assert g.number_of_edges() == 3\n assert g.number_of_nodes() == 5\n\n def edge_transform(edges):\n for n1, n2, edata in edges:\n if n1 != 4:\n yield n1, n2, {'f': 8}\n g2 = nx_copy(g, None, edge_transform=edge_transform)\n assert g2.number_of_nodes() == 5\n assert g2.number_of_edges() == 2\n assert g2.edges[1, 2] == {'f': 8}\n assert g2.edges[2, 3] == {'f': 8}\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n\n\[email protected]('do_deepcopy', [True, False])\ndef test_nx_copy_with_deepcopy(do_deepcopy):\n g = nx.Graph()\n g2 = nx.DiGraph()\n add_data(g)\n nx_copy(g, g2, deepcopy=do_deepcopy)\n assert_graph_data(g, g2)\n assert (g2.edges[2, 3]['z'] is g.edges[2, 3]['z']) != do_deepcopy\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
1,051 |
7cb75195df567a5b65fe2385423b0082f3b9de4b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Brateaqu, Farolflu"
__copyright__ = "Copyright 2019"
__credits__ = ["Quentin BRATEAU", "Luca FAROLFI"]
__license__ = "GPL"
__version__ = "1.0"
__email__ = ["[email protected]", "[email protected]"]
# Importing modules
import numpy as np
from GamePieces import Animal, Boulder
class GameMap(list):
"""
The Gamemap module
==================
Creating the Gamemap.
This creates the 5x5 gamemap with the moves and position of the gamepieces to play at the King of Siam. It is inherited from a list.
:Example:
>>> m = GameMap()
.. seealso:: :class:`GamePieces.Animal()`, :class:`GamePieces.Boulder()`, :class:`GamePieces.Crosses()`
.. moduleauthor:: Quentin BRATEAU <[email protected]>, Luca FAROLFI <[email protected]>
"""
def __init__(self):
super().__init__()
self.xmax = 5
self.ymax = 5
self.__nb_elephants = 0
self.__nb_rhinoceros = 0
self.nb_boulders = 0
self.nb_crosses = 0
self.playerTurn = "Elephant"
self.winner = ""
for k in range(self.ymax):
y = []
for i in range(self.ymax):
y.append(0)
self.append(y)
for k in range(3): # Setting up the 3 Boulders
self[2][1+k] = Boulder(2, 1+k)
self.nb_boulders += 1
@property
def nb_elephants(self):
"""
This is the number of elephant on the gamemap.
:Getter: Return the number of elephant on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> ne = m.nb_elephants
.. note:: The elephant's number can not exceed 5.
.. warning:: the number of elephant can't be changed by hand.
"""
return self.__nb_elephants
@nb_elephants.setter
def nb_elephants(self, x):
"""
Setting the elephant's number.
.. warning:: the number of elephant can't be changed by hand.
"""
print('Warning ! Changing the number of Elephant is not possible!')
@property
def nb_rhinoceros(self):
"""
This is the number of rinoceros on the gamemap.
:Getter: Return the number of rhinoceros on the gamemap.
:Type: int
:Getter's example:
>>> m = GameMap()
>>> nr = m.nb_rhinoceros
.. note:: The rhinoceros's number can not exceed 5.
.. warning:: the number of rhinoceros can't be changed by hand.
"""
return self.__nb_rhinoceros
@nb_rhinoceros.setter
def nb_rhinoceros(self, x):
"""
Setting the rhinoceros's number.
.. warning:: the number of rhinoceros can't be changed by hand.
"""
print('Warning ! Changing the number of Rhinoceros is not possible!')
def add(self, animal):
"""
This method add a new animal onto the board, with position and orientation
It returns whether the placement was possible or not.
:Args:
:param animal (Animal): the animal to add on the GameMap.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.add(a)
.. note:: the turn does not count if the insertion was not possible
.. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see
if insertion is possible
.. sectionauthor:: Quentin BRATEAU <[email protected]>
"""
x, y = animal.coords
if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_elephants += 1
self.playerTurn = "Rhinoceros"
elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:
self[x][y] = animal
self.__nb_rhinoceros += 1
self.playerTurn = "Elephant"
else:
return False
def delete(self, animal):
"""
This method removes an animal from the board
It reduces by one the number of animals of that species
:Args:
:param animal (Animal): the animal to delete.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.delete(a)
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: if removal of a boulder, game ends?
.. warning:: error if piece is not on the edge
"""
x, y = animal.coords
if x == 0 or x == 4 or y == 0 or y == 4:
self[x][y] = 0
if animal.species == 'Elephant':
self.__nb_elephants -= 1
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
if self.playerTurn == "Elephant":
self.playerTurn = "Rhinoceros"
elif self.playerTurn == "Rhinoceros":
self.playerTurn = "Elephant"
else:
return False
def push_counter(self, x, y, cx, cy, counter = 1, k = 0):
"""
This recursive method determines if a push move is possible by counting the elements having to be pushed,
and taking into account their orientation.
It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.
:Args:
:param x (int): is the abscissa of the current GamePiece,
:param y (int): is the ordinate of the current GamePiece,
:param cx (int): the direction of the move following the x-axis,
:param cy (int): the direction of the move following the y-axis,
:param counter (int): the sum of the scalar product of each animals in a row,
:param k (int): the counter of pawns in a row.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.push_counter(0, 1, 1, 0)
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: The function has a double use, as without it "move" wouldn't know how many pieces to move
.. warning:: ...
.. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.
"""
k += 1
if not (0 <= (x+cx) <= 4 and 0 <= y+cy <= 4):
return counter, k
elif self[x + cx][y + cy] == 0:
return counter, k
elif isinstance(self[x + cx][y + cy], Animal):
if self[x + cx][y + cy].direction @ + np.array([cx, cy]) == 1:
counter += 1
elif self[x + cx][y + cy].direction @ + np.array([cx, cy]) == -1:
counter -= 2
elif isinstance(self[x + cx][y + cy], Boulder):
counter -= 1
return self.push_counter(x + cx, y + cy, cx, cy, counter, k)
def move(self, animal, ncoords, ndir):
"""
This method moves an animal from on the board, as well as turns it
If the coords to which the animal is moving are taken, the the animal pushes
:Args:
:param animal (Animal): the animal to move,
:param ncoords (tuple): the new coordinates of the animal,
:param ndir (np.array): the new direction of the animal.
:Example:
>>> a = Animal(0, 1, np.array([0,1]), "Elephant")
>>> g = GameMap()
>>> g.move(a,(1,1),np.array([0,1]))
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: player turn does not change if move is not possible
.. warning:: ...
.. info:: it is possible to both rotate and move to another position in the same turn
"""
x, y = animal.coords
(nx, ny) = ncoords
cx, cy = nx - x, ny - y
if abs(cx) > 1 or abs(cy) > 1:
return False
elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == 1 and cy == 0) and (animal.direction[0] == cx and animal.direction[1] == cy):
res = self.push_counter(x, y, cx, cy, 1)
c = res[0]
k = res[1]
if c >= 0:
for i in range(k, 0, -1):
if (x + i * cx) == -1 or (x + i * cx) == 5 or (y + i * cy) == -1 or (y + i * cy) == 5:
if isinstance(self[x + (i-1)*cx][y + (i-1)*cy], Animal):
self[x + (i-1)*cx][y + (i-1)*cy] = animal
if animal.species == 'Elephant':
self.__nb_elephants -= 1
self[x + (i-1)*cx][y + (i-1)*cy] = 0
elif animal.species == 'Rhinoceros':
self.__nb_rhinoceros -= 1
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
else:
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
for k in range(5):
if isinstance(self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy], Animal) and [self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].direction[0], self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].direction[1]] == [cx, cy]:
self.winner=self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].species
print("winner is", self.winner)
break
else:
self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][y + (i - 1) * cy]
self[x + (i - 1) * cx][y + (i - 1) * cy] = 0
self[x + i * cx][y + i * cy].coords = (x + i * cx, y + i * cy)
if self.playerTurn == "Elephant":
self.playerTurn = "Rhinoceros"
elif self.playerTurn == "Rhinoceros":
self.playerTurn = "Elephant"
else:
print("Push not possible")
return (False)
elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == 1 and cy == 0) or (cx == 0 and cy == 0):
animal.coords = (nx, ny)
animal.direction = ndir
self[x][y] = 0
self[nx][ny] = animal
if self.playerTurn == "Elephant":
self.playerTurn = "Rhinoceros"
elif self.playerTurn == "Rhinoceros":
self.playerTurn = "Elephant"
else:
return False
def __str__(self):
"""
Show the current state of the game board
:return: the string with the characteristics of the board
:rtype: str
"""
s = ''
for i in range(5):
for j in range(5):
ani = False
if self[i][j] == 0:
s += ' 0 '
elif self[i][j].species == 'Elephant':
s += ' E'
ani = True
elif self[i][j].species == 'Rhinoceros':
s += ' R'
ani = True
else:
s += ' B '
if ani:
if self[i][j].direction[0] == 0 and self[i][j].direction[1] == 1:
d = '> '
elif self[i][j].direction[0] == -1 and self[i][j].direction[1] == 0:
d = '∧ '
elif self[i][j].direction[0] == 0 and self[i][j].direction[1] == -1:
d = '< '
else:
d = '∨ '
s += d
s += '\n \n'
return s
def save(self, file):
"""
This method save a GameMap in a KingOfSiam file with the .kos extension.
:Args:
:param file (file object): is file in which to write.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Luca FAROLFI <[email protected]>
.. note:: this method take in argument a file object.
"""
boulders = []
elephants = []
rhinos = []
for i in range(5):
for j in range(5):
if self[i][j]!= 0:
piece = self[i][j]
L = []
if not isinstance(self[i][j], Boulder):
L.append(self[i][j].direction[0])
L.append(self[i][j].direction[1])
if piece.species == "Elephant":
elephants.append("(" + str(i) + "," + str(j)+ ") : np.array(["+str(L[0])+ "," + str(L[1])+"])")
elif piece.species == "Rhinoceros":
rhinos.append("("+str(i)+"," +str(j)+ ") : np.array(["+str(L[0]) + "," + str(L[1])+"])")
elif isinstance(piece, Boulder):
boulders.append("(" + str(i) + "," + str(j) + ")")
file.write("# King of Siam GameFile \n\nplayer_turn {\n " + self.playerTurn + "\n}\n\n")
file.write("Boulder {")
for k in range(len(boulders)):
file.write("\n " + boulders[k] + ";")
file.write("\n}\n\nElephant {")
for elt in elephants:
file.write("\n " + elt + ";")
file.write("\n}\n\nRhinoceros {")
for elt in rhinos:
file.write("\n " + elt + ";")
file.write("\n}")
file.close()
def load(self, file):
"""
This method load a KingOfSiam file with the .kos extension in a GameMap object.
:Args:
:param file (file object): is file to load.
:Example:
>>> g = GameMap()
>>> file = open('save.kos', 'r')
>>> g.load(file)
.. sectionauthor:: Quentin BRATEAU <[email protected]>
.. note:: this method take in argument a file object.
"""
for i in range(5):
for j in range(5):
self[i][j] = 0
f = file.readlines()
k = 0
while k < len(f) and "Boulder {" not in f[k]:
k += 1
k += 1
while ";" in f[k]:
coords = f[k][5:8].split(",")
x, y = int(coords[0]), int(coords[1])
self[x][y] = Boulder(x, y)
k += 1
while k < len(f) and "Elephant {" not in f[k]:
k += 1
k += 1
while ":" in f[k] and ";" in f[k]:
coords = f[k][5:8].split(",")
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split("]")[0].split(",")
xdir, ydir = 0, 0
if d[0] == "1":
xdir = 1
elif d[0] == "-1":
xdir = -1
if d[1] == "1":
ydir = 1
elif d[1] == "-1":
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Elephant')
k += 1
while k < len(f) and "Rhinoceros {" not in f[k]:
k += 1
k += 1
while ":" in f[k] and ";" in f[k]:
coords = f[k][5:8].split(",")
x, y = int(coords[0]), int(coords[1])
d = f[k][22:].split("]")[0].split(",")
xdir, ydir = 0, 0
if d[0] == "1":
xdir = 1
elif d[0] == "-1":
xdir = -1
if d[1] == "1":
ydir = 1
elif d[1] == "-1":
ydir = -1
direction = np.array([xdir, ydir])
self[x][y] = Animal(x, y, direction, 'Rhinoceros')
k += 1
file.close()
if __name__ == '__main__':
g = GameMap()
print(g)
|
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n__author__ = \"Brateaqu, Farolflu\"\n__copyright__ = \"Copyright 2019\"\n__credits__ = [\"Quentin BRATEAU\", \"Luca FAROLFI\"]\n\n__license__ = \"GPL\"\n__version__ = \"1.0\"\n__email__ = [\"[email protected]\", \"[email protected]\"]\n\n\n# Importing modules\nimport numpy as np\nfrom GamePieces import Animal, Boulder\n\n\nclass GameMap(list):\n \"\"\"\n The Gamemap module\n ==================\n\n Creating the Gamemap.\n\n This creates the 5x5 gamemap with the moves and position of the gamepieces to play at the King of Siam. It is inherited from a list.\n\n :Example:\n >>> m = GameMap()\n\n .. seealso:: :class:`GamePieces.Animal()`, :class:`GamePieces.Boulder()`, :class:`GamePieces.Crosses()`\n .. moduleauthor:: Quentin BRATEAU <[email protected]>, Luca FAROLFI <[email protected]>\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = \"Elephant\"\n self.winner = \"\"\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3): # Setting up the 3 Boulders\n self[2][1+k] = Boulder(2, 1+k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n\n @nb_elephants.setter\n def nb_elephants(self, x):\n \"\"\"\n Setting the elephant's number.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Elephant is not possible!')\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n\n def add(self, animal):\n \"\"\"\n This method add a new animal onto the board, with position and orientation\n It returns whether the placement was possible or not.\n\n :Args:\n :param animal (Animal): the animal to add on the GameMap.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.add(a)\n\n .. note:: the turn does not count if the insertion was not possible\n .. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see\n if insertion is possible\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n \"\"\"\n x, y = animal.coords\n if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_elephants += 1\n self.playerTurn = \"Rhinoceros\"\n\n elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_rhinoceros += 1\n self.playerTurn = \"Elephant\"\n else:\n return False\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == \"Elephant\":\n self.playerTurn = \"Rhinoceros\"\n elif self.playerTurn == \"Rhinoceros\":\n self.playerTurn = \"Elephant\"\n else:\n return False\n\n def push_counter(self, x, y, cx, cy, counter = 1, k = 0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= (x+cx) <= 4 and 0 <= y+cy <= 4):\n return counter, k\n\n elif self[x + cx][y + cy] == 0:\n return counter, k\n\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ + np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ + np.array([cx, cy]) == -1:\n counter -= 2\n\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n (nx, ny) = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == 1 and cy == 0) and (animal.direction[0] == cx and animal.direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx) == -1 or (x + i * cx) == 5 or (y + i * cy) == -1 or (y + i * cy) == 5:\n if isinstance(self[x + (i-1)*cx][y + (i-1)*cy], Animal):\n self[x + (i-1)*cx][y + (i-1)*cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i-1)*cx][y + (i-1)*cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy], Animal) and [self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].direction[0], self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].direction[1]] == [cx, cy]:\n self.winner=self[x + (i - 1 - k) * cx][y + (i - 1 - k) * cy].species\n print(\"winner is\", self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy].coords = (x + i * cx, y + i * cy)\n\n if self.playerTurn == \"Elephant\":\n self.playerTurn = \"Rhinoceros\"\n elif self.playerTurn == \"Rhinoceros\":\n self.playerTurn = \"Elephant\"\n else:\n print(\"Push not possible\")\n return (False)\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == 1 and cy == 0) or (cx == 0 and cy == 0):\n animal.coords = (nx, ny)\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == \"Elephant\":\n self.playerTurn = \"Rhinoceros\"\n elif self.playerTurn == \"Rhinoceros\":\n self.playerTurn = \"Elephant\"\n else:\n return False\n\n def __str__(self):\n \"\"\"\n Show the current state of the game board\n\n :return: the string with the characteristics of the board\n :rtype: str\n \"\"\"\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s\n\n def save(self, file):\n \"\"\"\n This method save a GameMap in a KingOfSiam file with the .kos extension.\n\n :Args:\n :param file (file object): is file in which to write.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: this method take in argument a file object.\n \"\"\"\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j]!= 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == \"Elephant\":\n elephants.append(\"(\" + str(i) + \",\" + str(j)+ \") : np.array([\"+str(L[0])+ \",\" + str(L[1])+\"])\")\n elif piece.species == \"Rhinoceros\":\n rhinos.append(\"(\"+str(i)+\",\" +str(j)+ \") : np.array([\"+str(L[0]) + \",\" + str(L[1])+\"])\")\n elif isinstance(piece, Boulder):\n boulders.append(\"(\" + str(i) + \",\" + str(j) + \")\")\n file.write(\"# King of Siam GameFile \\n\\nplayer_turn {\\n \" + self.playerTurn + \"\\n}\\n\\n\")\n file.write(\"Boulder {\")\n for k in range(len(boulders)):\n file.write(\"\\n \" + boulders[k] + \";\")\n file.write(\"\\n}\\n\\nElephant {\")\n for elt in elephants:\n file.write(\"\\n \" + elt + \";\")\n file.write(\"\\n}\\n\\nRhinoceros {\")\n for elt in rhinos:\n file.write(\"\\n \" + elt + \";\")\n file.write(\"\\n}\")\n\n file.close()\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n\n f = file.readlines()\n k = 0\n while k < len(f) and \"Boulder {\" not in f[k]:\n k += 1\n k += 1\n while \";\" in f[k]:\n coords = f[k][5:8].split(\",\")\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n\n while k < len(f) and \"Elephant {\" not in f[k]:\n k += 1\n k += 1\n while \":\" in f[k] and \";\" in f[k]:\n coords = f[k][5:8].split(\",\")\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(\"]\")[0].split(\",\")\n xdir, ydir = 0, 0\n if d[0] == \"1\":\n xdir = 1\n elif d[0] == \"-1\":\n xdir = -1\n if d[1] == \"1\":\n ydir = 1\n elif d[1] == \"-1\":\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n\n while k < len(f) and \"Rhinoceros {\" not in f[k]:\n k += 1\n k += 1\n while \":\" in f[k] and \";\" in f[k]:\n coords = f[k][5:8].split(\",\")\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(\"]\")[0].split(\",\")\n xdir, ydir = 0, 0\n if d[0] == \"1\":\n xdir = 1\n elif d[0] == \"-1\":\n xdir = -1\n if d[1] == \"1\":\n ydir = 1\n elif d[1] == \"-1\":\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n\n file.close()\n\n\nif __name__ == '__main__':\n g = GameMap()\n print(g)",
"__author__ = 'Brateaqu, Farolflu'\n__copyright__ = 'Copyright 2019'\n__credits__ = ['Quentin BRATEAU', 'Luca FAROLFI']\n__license__ = 'GPL'\n__version__ = '1.0'\n__email__ = ['[email protected]',\n '[email protected]']\nimport numpy as np\nfrom GamePieces import Animal, Boulder\n\n\nclass GameMap(list):\n \"\"\"\n The Gamemap module\n ==================\n\n Creating the Gamemap.\n\n This creates the 5x5 gamemap with the moves and position of the gamepieces to play at the King of Siam. It is inherited from a list.\n\n :Example:\n >>> m = GameMap()\n\n .. seealso:: :class:`GamePieces.Animal()`, :class:`GamePieces.Boulder()`, :class:`GamePieces.Crosses()`\n .. moduleauthor:: Quentin BRATEAU <[email protected]>, Luca FAROLFI <[email protected]>\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n\n @nb_elephants.setter\n def nb_elephants(self, x):\n \"\"\"\n Setting the elephant's number.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Elephant is not possible!')\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n\n def add(self, animal):\n \"\"\"\n This method add a new animal onto the board, with position and orientation\n It returns whether the placement was possible or not.\n\n :Args:\n :param animal (Animal): the animal to add on the GameMap.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.add(a)\n\n .. note:: the turn does not count if the insertion was not possible\n .. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see\n if insertion is possible\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n \"\"\"\n x, y = animal.coords\n if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==\n 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_elephants += 1\n self.playerTurn = 'Rhinoceros'\n elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (\n x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_rhinoceros += 1\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def push_counter(self, x, y, cx, cy, counter=1, k=0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):\n return counter, k\n elif self[x + cx][y + cy] == 0:\n return counter, k\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:\n counter -= 2\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def __str__(self):\n \"\"\"\n Show the current state of the game board\n\n :return: the string with the characteristics of the board\n :rtype: str\n \"\"\"\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1\n ] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j\n ].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[\n 1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s\n\n def save(self, file):\n \"\"\"\n This method save a GameMap in a KingOfSiam file with the .kos extension.\n\n :Args:\n :param file (file object): is file in which to write.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: this method take in argument a file object.\n \"\"\"\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j] != 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == 'Elephant':\n elephants.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif piece.species == 'Rhinoceros':\n rhinos.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif isinstance(piece, Boulder):\n boulders.append('(' + str(i) + ',' + str(j) + ')')\n file.write('# King of Siam GameFile \\n\\nplayer_turn {\\n ' + self\n .playerTurn + '\\n}\\n\\n')\n file.write('Boulder {')\n for k in range(len(boulders)):\n file.write('\\n ' + boulders[k] + ';')\n file.write('\\n}\\n\\nElephant {')\n for elt in elephants:\n file.write('\\n ' + elt + ';')\n file.write('\\n}\\n\\nRhinoceros {')\n for elt in rhinos:\n file.write('\\n ' + elt + ';')\n file.write('\\n}')\n file.close()\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\nif __name__ == '__main__':\n g = GameMap()\n print(g)\n",
"__author__ = 'Brateaqu, Farolflu'\n__copyright__ = 'Copyright 2019'\n__credits__ = ['Quentin BRATEAU', 'Luca FAROLFI']\n__license__ = 'GPL'\n__version__ = '1.0'\n__email__ = ['[email protected]',\n '[email protected]']\n<import token>\n\n\nclass GameMap(list):\n \"\"\"\n The Gamemap module\n ==================\n\n Creating the Gamemap.\n\n This creates the 5x5 gamemap with the moves and position of the gamepieces to play at the King of Siam. It is inherited from a list.\n\n :Example:\n >>> m = GameMap()\n\n .. seealso:: :class:`GamePieces.Animal()`, :class:`GamePieces.Boulder()`, :class:`GamePieces.Crosses()`\n .. moduleauthor:: Quentin BRATEAU <[email protected]>, Luca FAROLFI <[email protected]>\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n\n @nb_elephants.setter\n def nb_elephants(self, x):\n \"\"\"\n Setting the elephant's number.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Elephant is not possible!')\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n\n def add(self, animal):\n \"\"\"\n This method add a new animal onto the board, with position and orientation\n It returns whether the placement was possible or not.\n\n :Args:\n :param animal (Animal): the animal to add on the GameMap.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.add(a)\n\n .. note:: the turn does not count if the insertion was not possible\n .. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see\n if insertion is possible\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n \"\"\"\n x, y = animal.coords\n if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==\n 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_elephants += 1\n self.playerTurn = 'Rhinoceros'\n elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (\n x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_rhinoceros += 1\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def push_counter(self, x, y, cx, cy, counter=1, k=0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):\n return counter, k\n elif self[x + cx][y + cy] == 0:\n return counter, k\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:\n counter -= 2\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def __str__(self):\n \"\"\"\n Show the current state of the game board\n\n :return: the string with the characteristics of the board\n :rtype: str\n \"\"\"\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1\n ] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j\n ].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[\n 1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s\n\n def save(self, file):\n \"\"\"\n This method save a GameMap in a KingOfSiam file with the .kos extension.\n\n :Args:\n :param file (file object): is file in which to write.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: this method take in argument a file object.\n \"\"\"\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j] != 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == 'Elephant':\n elephants.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif piece.species == 'Rhinoceros':\n rhinos.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif isinstance(piece, Boulder):\n boulders.append('(' + str(i) + ',' + str(j) + ')')\n file.write('# King of Siam GameFile \\n\\nplayer_turn {\\n ' + self\n .playerTurn + '\\n}\\n\\n')\n file.write('Boulder {')\n for k in range(len(boulders)):\n file.write('\\n ' + boulders[k] + ';')\n file.write('\\n}\\n\\nElephant {')\n for elt in elephants:\n file.write('\\n ' + elt + ';')\n file.write('\\n}\\n\\nRhinoceros {')\n for elt in rhinos:\n file.write('\\n ' + elt + ';')\n file.write('\\n}')\n file.close()\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\nif __name__ == '__main__':\n g = GameMap()\n print(g)\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n \"\"\"\n The Gamemap module\n ==================\n\n Creating the Gamemap.\n\n This creates the 5x5 gamemap with the moves and position of the gamepieces to play at the King of Siam. It is inherited from a list.\n\n :Example:\n >>> m = GameMap()\n\n .. seealso:: :class:`GamePieces.Animal()`, :class:`GamePieces.Boulder()`, :class:`GamePieces.Crosses()`\n .. moduleauthor:: Quentin BRATEAU <[email protected]>, Luca FAROLFI <[email protected]>\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n\n @nb_elephants.setter\n def nb_elephants(self, x):\n \"\"\"\n Setting the elephant's number.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Elephant is not possible!')\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n\n def add(self, animal):\n \"\"\"\n This method add a new animal onto the board, with position and orientation\n It returns whether the placement was possible or not.\n\n :Args:\n :param animal (Animal): the animal to add on the GameMap.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.add(a)\n\n .. note:: the turn does not count if the insertion was not possible\n .. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see\n if insertion is possible\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n \"\"\"\n x, y = animal.coords\n if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==\n 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_elephants += 1\n self.playerTurn = 'Rhinoceros'\n elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (\n x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_rhinoceros += 1\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def push_counter(self, x, y, cx, cy, counter=1, k=0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):\n return counter, k\n elif self[x + cx][y + cy] == 0:\n return counter, k\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:\n counter -= 2\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def __str__(self):\n \"\"\"\n Show the current state of the game board\n\n :return: the string with the characteristics of the board\n :rtype: str\n \"\"\"\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1\n ] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j\n ].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[\n 1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s\n\n def save(self, file):\n \"\"\"\n This method save a GameMap in a KingOfSiam file with the .kos extension.\n\n :Args:\n :param file (file object): is file in which to write.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: this method take in argument a file object.\n \"\"\"\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j] != 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == 'Elephant':\n elephants.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif piece.species == 'Rhinoceros':\n rhinos.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif isinstance(piece, Boulder):\n boulders.append('(' + str(i) + ',' + str(j) + ')')\n file.write('# King of Siam GameFile \\n\\nplayer_turn {\\n ' + self\n .playerTurn + '\\n}\\n\\n')\n file.write('Boulder {')\n for k in range(len(boulders)):\n file.write('\\n ' + boulders[k] + ';')\n file.write('\\n}\\n\\nElephant {')\n for elt in elephants:\n file.write('\\n ' + elt + ';')\n file.write('\\n}\\n\\nRhinoceros {')\n for elt in rhinos:\n file.write('\\n ' + elt + ';')\n file.write('\\n}')\n file.close()\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\nif __name__ == '__main__':\n g = GameMap()\n print(g)\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n \"\"\"\n The Gamemap module\n ==================\n\n Creating the Gamemap.\n\n This creates the 5x5 gamemap with the moves and position of the gamepieces to play at the King of Siam. It is inherited from a list.\n\n :Example:\n >>> m = GameMap()\n\n .. seealso:: :class:`GamePieces.Animal()`, :class:`GamePieces.Boulder()`, :class:`GamePieces.Crosses()`\n .. moduleauthor:: Quentin BRATEAU <[email protected]>, Luca FAROLFI <[email protected]>\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n\n @nb_elephants.setter\n def nb_elephants(self, x):\n \"\"\"\n Setting the elephant's number.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Elephant is not possible!')\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n\n def add(self, animal):\n \"\"\"\n This method add a new animal onto the board, with position and orientation\n It returns whether the placement was possible or not.\n\n :Args:\n :param animal (Animal): the animal to add on the GameMap.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.add(a)\n\n .. note:: the turn does not count if the insertion was not possible\n .. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see\n if insertion is possible\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n \"\"\"\n x, y = animal.coords\n if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==\n 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_elephants += 1\n self.playerTurn = 'Rhinoceros'\n elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (\n x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_rhinoceros += 1\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def push_counter(self, x, y, cx, cy, counter=1, k=0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):\n return counter, k\n elif self[x + cx][y + cy] == 0:\n return counter, k\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:\n counter -= 2\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def __str__(self):\n \"\"\"\n Show the current state of the game board\n\n :return: the string with the characteristics of the board\n :rtype: str\n \"\"\"\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1\n ] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j\n ].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[\n 1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s\n\n def save(self, file):\n \"\"\"\n This method save a GameMap in a KingOfSiam file with the .kos extension.\n\n :Args:\n :param file (file object): is file in which to write.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: this method take in argument a file object.\n \"\"\"\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j] != 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == 'Elephant':\n elephants.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif piece.species == 'Rhinoceros':\n rhinos.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif isinstance(piece, Boulder):\n boulders.append('(' + str(i) + ',' + str(j) + ')')\n file.write('# King of Siam GameFile \\n\\nplayer_turn {\\n ' + self\n .playerTurn + '\\n}\\n\\n')\n file.write('Boulder {')\n for k in range(len(boulders)):\n file.write('\\n ' + boulders[k] + ';')\n file.write('\\n}\\n\\nElephant {')\n for elt in elephants:\n file.write('\\n ' + elt + ';')\n file.write('\\n}\\n\\nRhinoceros {')\n for elt in rhinos:\n file.write('\\n ' + elt + ';')\n file.write('\\n}')\n file.close()\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n <docstring token>\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n\n @nb_elephants.setter\n def nb_elephants(self, x):\n \"\"\"\n Setting the elephant's number.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Elephant is not possible!')\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n\n def add(self, animal):\n \"\"\"\n This method add a new animal onto the board, with position and orientation\n It returns whether the placement was possible or not.\n\n :Args:\n :param animal (Animal): the animal to add on the GameMap.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.add(a)\n\n .. note:: the turn does not count if the insertion was not possible\n .. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see\n if insertion is possible\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n \"\"\"\n x, y = animal.coords\n if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==\n 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_elephants += 1\n self.playerTurn = 'Rhinoceros'\n elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (\n x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_rhinoceros += 1\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def push_counter(self, x, y, cx, cy, counter=1, k=0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):\n return counter, k\n elif self[x + cx][y + cy] == 0:\n return counter, k\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:\n counter -= 2\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def __str__(self):\n \"\"\"\n Show the current state of the game board\n\n :return: the string with the characteristics of the board\n :rtype: str\n \"\"\"\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1\n ] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j\n ].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[\n 1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s\n\n def save(self, file):\n \"\"\"\n This method save a GameMap in a KingOfSiam file with the .kos extension.\n\n :Args:\n :param file (file object): is file in which to write.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: this method take in argument a file object.\n \"\"\"\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j] != 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == 'Elephant':\n elephants.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif piece.species == 'Rhinoceros':\n rhinos.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif isinstance(piece, Boulder):\n boulders.append('(' + str(i) + ',' + str(j) + ')')\n file.write('# King of Siam GameFile \\n\\nplayer_turn {\\n ' + self\n .playerTurn + '\\n}\\n\\n')\n file.write('Boulder {')\n for k in range(len(boulders)):\n file.write('\\n ' + boulders[k] + ';')\n file.write('\\n}\\n\\nElephant {')\n for elt in elephants:\n file.write('\\n ' + elt + ';')\n file.write('\\n}\\n\\nRhinoceros {')\n for elt in rhinos:\n file.write('\\n ' + elt + ';')\n file.write('\\n}')\n file.close()\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n <docstring token>\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n <function token>\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n\n def add(self, animal):\n \"\"\"\n This method add a new animal onto the board, with position and orientation\n It returns whether the placement was possible or not.\n\n :Args:\n :param animal (Animal): the animal to add on the GameMap.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.add(a)\n\n .. note:: the turn does not count if the insertion was not possible\n .. warning:: if the location of the insertion is already taken by another piece, add calls upon move to see\n if insertion is possible\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n \"\"\"\n x, y = animal.coords\n if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x ==\n 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_elephants += 1\n self.playerTurn = 'Rhinoceros'\n elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (\n x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_rhinoceros += 1\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def push_counter(self, x, y, cx, cy, counter=1, k=0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):\n return counter, k\n elif self[x + cx][y + cy] == 0:\n return counter, k\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:\n counter -= 2\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def __str__(self):\n \"\"\"\n Show the current state of the game board\n\n :return: the string with the characteristics of the board\n :rtype: str\n \"\"\"\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1\n ] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j\n ].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[\n 1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s\n\n def save(self, file):\n \"\"\"\n This method save a GameMap in a KingOfSiam file with the .kos extension.\n\n :Args:\n :param file (file object): is file in which to write.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: this method take in argument a file object.\n \"\"\"\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j] != 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == 'Elephant':\n elephants.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif piece.species == 'Rhinoceros':\n rhinos.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif isinstance(piece, Boulder):\n boulders.append('(' + str(i) + ',' + str(j) + ')')\n file.write('# King of Siam GameFile \\n\\nplayer_turn {\\n ' + self\n .playerTurn + '\\n}\\n\\n')\n file.write('Boulder {')\n for k in range(len(boulders)):\n file.write('\\n ' + boulders[k] + ';')\n file.write('\\n}\\n\\nElephant {')\n for elt in elephants:\n file.write('\\n ' + elt + ';')\n file.write('\\n}\\n\\nRhinoceros {')\n for elt in rhinos:\n file.write('\\n ' + elt + ';')\n file.write('\\n}')\n file.close()\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n <docstring token>\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n <function token>\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n <function token>\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def push_counter(self, x, y, cx, cy, counter=1, k=0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):\n return counter, k\n elif self[x + cx][y + cy] == 0:\n return counter, k\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:\n counter -= 2\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def __str__(self):\n \"\"\"\n Show the current state of the game board\n\n :return: the string with the characteristics of the board\n :rtype: str\n \"\"\"\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1\n ] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j\n ].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[\n 1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s\n\n def save(self, file):\n \"\"\"\n This method save a GameMap in a KingOfSiam file with the .kos extension.\n\n :Args:\n :param file (file object): is file in which to write.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: this method take in argument a file object.\n \"\"\"\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j] != 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == 'Elephant':\n elephants.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif piece.species == 'Rhinoceros':\n rhinos.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif isinstance(piece, Boulder):\n boulders.append('(' + str(i) + ',' + str(j) + ')')\n file.write('# King of Siam GameFile \\n\\nplayer_turn {\\n ' + self\n .playerTurn + '\\n}\\n\\n')\n file.write('Boulder {')\n for k in range(len(boulders)):\n file.write('\\n ' + boulders[k] + ';')\n file.write('\\n}\\n\\nElephant {')\n for elt in elephants:\n file.write('\\n ' + elt + ';')\n file.write('\\n}\\n\\nRhinoceros {')\n for elt in rhinos:\n file.write('\\n ' + elt + ';')\n file.write('\\n}')\n file.close()\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n <docstring token>\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n <function token>\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n <function token>\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def push_counter(self, x, y, cx, cy, counter=1, k=0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):\n return counter, k\n elif self[x + cx][y + cy] == 0:\n return counter, k\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:\n counter -= 2\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n <function token>\n\n def save(self, file):\n \"\"\"\n This method save a GameMap in a KingOfSiam file with the .kos extension.\n\n :Args:\n :param file (file object): is file in which to write.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: this method take in argument a file object.\n \"\"\"\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j] != 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == 'Elephant':\n elephants.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif piece.species == 'Rhinoceros':\n rhinos.append('(' + str(i) + ',' + str(j) +\n ') : np.array([' + str(L[0]) + ',' + str(L[1]) +\n '])')\n elif isinstance(piece, Boulder):\n boulders.append('(' + str(i) + ',' + str(j) + ')')\n file.write('# King of Siam GameFile \\n\\nplayer_turn {\\n ' + self\n .playerTurn + '\\n}\\n\\n')\n file.write('Boulder {')\n for k in range(len(boulders)):\n file.write('\\n ' + boulders[k] + ';')\n file.write('\\n}\\n\\nElephant {')\n for elt in elephants:\n file.write('\\n ' + elt + ';')\n file.write('\\n}\\n\\nRhinoceros {')\n for elt in rhinos:\n file.write('\\n ' + elt + ';')\n file.write('\\n}')\n file.close()\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n <docstring token>\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n\n @property\n def nb_elephants(self):\n \"\"\"\n This is the number of elephant on the gamemap.\n\n :Getter: Return the number of elephant on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> ne = m.nb_elephants\n\n .. note:: The elephant's number can not exceed 5.\n .. warning:: the number of elephant can't be changed by hand.\n \"\"\"\n return self.__nb_elephants\n <function token>\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n <function token>\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def push_counter(self, x, y, cx, cy, counter=1, k=0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):\n return counter, k\n elif self[x + cx][y + cy] == 0:\n return counter, k\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:\n counter -= 2\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n <function token>\n <function token>\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n <docstring token>\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n <function token>\n <function token>\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n <function token>\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n\n def push_counter(self, x, y, cx, cy, counter=1, k=0):\n \"\"\"\n This recursive method determines if a push move is possible by counting the elements having to be pushed,\n and taking into account their orientation.\n It returns the number of pieces that are being pushed aswell as a counter. If the counter not negative, the push occurs.\n\n :Args:\n :param x (int): is the abscissa of the current GamePiece,\n :param y (int): is the ordinate of the current GamePiece,\n :param cx (int): the direction of the move following the x-axis,\n :param cy (int): the direction of the move following the y-axis,\n :param counter (int): the sum of the scalar product of each animals in a row,\n :param k (int): the counter of pawns in a row.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.push_counter(0, 1, 1, 0)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: The function has a double use, as without it \"move\" wouldn't know how many pieces to move\n .. warning:: ...\n .. info:: An animal placed sideways does not impact a push, an opponent's animal in the right direction helps the push.\n \"\"\"\n k += 1\n if not (0 <= x + cx <= 4 and 0 <= y + cy <= 4):\n return counter, k\n elif self[x + cx][y + cy] == 0:\n return counter, k\n elif isinstance(self[x + cx][y + cy], Animal):\n if self[x + cx][y + cy].direction @ +np.array([cx, cy]) == 1:\n counter += 1\n elif self[x + cx][y + cy].direction @ +np.array([cx, cy]) == -1:\n counter -= 2\n elif isinstance(self[x + cx][y + cy], Boulder):\n counter -= 1\n return self.push_counter(x + cx, y + cy, cx, cy, counter, k)\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n <function token>\n <function token>\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n <docstring token>\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n <function token>\n <function token>\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n <function token>\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n <function token>\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n <function token>\n <function token>\n\n def load(self, file):\n \"\"\"\n This method load a KingOfSiam file with the .kos extension in a GameMap object.\n\n :Args:\n :param file (file object): is file to load.\n\n :Example:\n >>> g = GameMap()\n >>> file = open('save.kos', 'r')\n >>> g.load(file)\n\n .. sectionauthor:: Quentin BRATEAU <[email protected]>\n\n .. note:: this method take in argument a file object.\n \"\"\"\n for i in range(5):\n for j in range(5):\n self[i][j] = 0\n f = file.readlines()\n k = 0\n while k < len(f) and 'Boulder {' not in f[k]:\n k += 1\n k += 1\n while ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n self[x][y] = Boulder(x, y)\n k += 1\n while k < len(f) and 'Elephant {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Elephant')\n k += 1\n while k < len(f) and 'Rhinoceros {' not in f[k]:\n k += 1\n k += 1\n while ':' in f[k] and ';' in f[k]:\n coords = f[k][5:8].split(',')\n x, y = int(coords[0]), int(coords[1])\n d = f[k][22:].split(']')[0].split(',')\n xdir, ydir = 0, 0\n if d[0] == '1':\n xdir = 1\n elif d[0] == '-1':\n xdir = -1\n if d[1] == '1':\n ydir = 1\n elif d[1] == '-1':\n ydir = -1\n direction = np.array([xdir, ydir])\n self[x][y] = Animal(x, y, direction, 'Rhinoceros')\n k += 1\n file.close()\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n <docstring token>\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n <function token>\n <function token>\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n <function token>\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n <function token>\n\n def move(self, animal, ncoords, ndir):\n \"\"\"\n This method moves an animal from on the board, as well as turns it\n If the coords to which the animal is moving are taken, the the animal pushes\n\n :Args:\n :param animal (Animal): the animal to move,\n :param ncoords (tuple): the new coordinates of the animal,\n :param ndir (np.array): the new direction of the animal.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.move(a,(1,1),np.array([0,1]))\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n\n .. note:: player turn does not change if move is not possible\n .. warning:: ...\n .. info:: it is possible to both rotate and move to another position in the same turn\n \"\"\"\n x, y = animal.coords\n nx, ny = ncoords\n cx, cy = nx - x, ny - y\n if abs(cx) > 1 or abs(cy) > 1:\n return False\n elif self[nx][ny] != 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) and (animal.direction[0] == cx and animal.\n direction[1] == cy):\n res = self.push_counter(x, y, cx, cy, 1)\n c = res[0]\n k = res[1]\n if c >= 0:\n for i in range(k, 0, -1):\n if (x + i * cx == -1 or x + i * cx == 5 or y + i * cy ==\n -1 or y + i * cy == 5):\n if isinstance(self[x + (i - 1) * cx][y + (i - 1) *\n cy], Animal):\n self[x + (i - 1) * cx][y + (i - 1) * cy] = animal\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n else:\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n for k in range(5):\n if isinstance(self[x + (i - 1 - k) * cx][y +\n (i - 1 - k) * cy], Animal) and [self[x +\n (i - 1 - k) * cx][y + (i - 1 - k) * cy]\n .direction[0], self[x + (i - 1 - k) *\n cx][y + (i - 1 - k) * cy].direction[1]\n ] == [cx, cy]:\n self.winner = self[x + (i - 1 - k) * cx][\n y + (i - 1 - k) * cy].species\n print('winner is', self.winner)\n break\n else:\n self[x + i * cx][y + i * cy] = self[x + (i - 1) * cx][\n y + (i - 1) * cy]\n self[x + (i - 1) * cx][y + (i - 1) * cy] = 0\n self[x + i * cx][y + i * cy\n ].coords = x + i * cx, y + i * cy\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n print('Push not possible')\n return False\n elif self[nx][ny] == 0 and (cx == 0 and abs(cy) == 1 or abs(cx) == \n 1 and cy == 0) or cx == 0 and cy == 0:\n animal.coords = nx, ny\n animal.direction = ndir\n self[x][y] = 0\n self[nx][ny] = animal\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n <docstring token>\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n <function token>\n <function token>\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n <function token>\n\n def delete(self, animal):\n \"\"\"\n This method removes an animal from the board\n It reduces by one the number of animals of that species\n\n :Args:\n :param animal (Animal): the animal to delete.\n\n :Example:\n >>> a = Animal(0, 1, np.array([0,1]), \"Elephant\")\n >>> g = GameMap()\n >>> g.delete(a)\n\n .. sectionauthor:: Luca FAROLFI <[email protected]>\n\n .. note:: if removal of a boulder, game ends?\n .. warning:: error if piece is not on the edge\n \"\"\"\n x, y = animal.coords\n if x == 0 or x == 4 or y == 0 or y == 4:\n self[x][y] = 0\n if animal.species == 'Elephant':\n self.__nb_elephants -= 1\n elif animal.species == 'Rhinoceros':\n self.__nb_rhinoceros -= 1\n if self.playerTurn == 'Elephant':\n self.playerTurn = 'Rhinoceros'\n elif self.playerTurn == 'Rhinoceros':\n self.playerTurn = 'Elephant'\n else:\n return False\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n <docstring token>\n\n def __init__(self):\n super().__init__()\n self.xmax = 5\n self.ymax = 5\n self.__nb_elephants = 0\n self.__nb_rhinoceros = 0\n self.nb_boulders = 0\n self.nb_crosses = 0\n self.playerTurn = 'Elephant'\n self.winner = ''\n for k in range(self.ymax):\n y = []\n for i in range(self.ymax):\n y.append(0)\n self.append(y)\n for k in range(3):\n self[2][1 + k] = Boulder(2, 1 + k)\n self.nb_boulders += 1\n <function token>\n <function token>\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n\n @nb_rhinoceros.setter\n def nb_rhinoceros(self, x):\n \"\"\"\n Setting the rhinoceros's number.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n print('Warning ! Changing the number of Rhinoceros is not possible!')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n @property\n def nb_rhinoceros(self):\n \"\"\"\n This is the number of rinoceros on the gamemap.\n\n :Getter: Return the number of rhinoceros on the gamemap.\n :Type: int\n\n :Getter's example:\n >>> m = GameMap()\n >>> nr = m.nb_rhinoceros\n\n .. note:: The rhinoceros's number can not exceed 5.\n .. warning:: the number of rhinoceros can't be changed by hand.\n \"\"\"\n return self.__nb_rhinoceros\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<assignment token>\n<import token>\n\n\nclass GameMap(list):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<assignment token>\n<import token>\n<class token>\n<code token>\n"
] | false |
1,052 |
fecaf41152e8c98784585abfdb3777fc0a4824f3
|
string1 = "Vegetable"
#string2 = "Fruit"
string2 = "vegetable"
print(string1 == string2)
print(string1 != string2)
if string1.lower() == string2.lower():
print("The strings are equal")
else:
print("The strings are not equal")
number1 = 25
number2 = 30
# ==
# !=
# >
# <
# >=
# <=
if number1 <= number2:
print("number 1 is greater")
name_1 = "Stephen"
name_2 = "stephen"
number_1 = 45
number_2 = 30
if name_1.lower() == name_2.lower() and number_1 < number_2:
print("We passed the test")
if name_1.lower() == name_2.lower() or number_1 < number_2:
print("We passed the test")
|
[
"\nstring1 = \"Vegetable\"\n#string2 = \"Fruit\"\nstring2 = \"vegetable\"\n\nprint(string1 == string2)\n\nprint(string1 != string2)\n\n\nif string1.lower() == string2.lower():\n print(\"The strings are equal\")\nelse:\n print(\"The strings are not equal\")\n\nnumber1 = 25\nnumber2 = 30\n\n# ==\n# !=\n# >\n# <\n# >=\n# <=\n\nif number1 <= number2:\n print(\"number 1 is greater\")\n\n\n\nname_1 = \"Stephen\"\nname_2 = \"stephen\"\n\nnumber_1 = 45\nnumber_2 = 30\nif name_1.lower() == name_2.lower() and number_1 < number_2:\n print(\"We passed the test\")\n\nif name_1.lower() == name_2.lower() or number_1 < number_2:\n print(\"We passed the test\")",
"string1 = 'Vegetable'\nstring2 = 'vegetable'\nprint(string1 == string2)\nprint(string1 != string2)\nif string1.lower() == string2.lower():\n print('The strings are equal')\nelse:\n print('The strings are not equal')\nnumber1 = 25\nnumber2 = 30\nif number1 <= number2:\n print('number 1 is greater')\nname_1 = 'Stephen'\nname_2 = 'stephen'\nnumber_1 = 45\nnumber_2 = 30\nif name_1.lower() == name_2.lower() and number_1 < number_2:\n print('We passed the test')\nif name_1.lower() == name_2.lower() or number_1 < number_2:\n print('We passed the test')\n",
"<assignment token>\nprint(string1 == string2)\nprint(string1 != string2)\nif string1.lower() == string2.lower():\n print('The strings are equal')\nelse:\n print('The strings are not equal')\n<assignment token>\nif number1 <= number2:\n print('number 1 is greater')\n<assignment token>\nif name_1.lower() == name_2.lower() and number_1 < number_2:\n print('We passed the test')\nif name_1.lower() == name_2.lower() or number_1 < number_2:\n print('We passed the test')\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
1,053 |
6bc400896c004f0fdddbbd3dd73ef9aaa19eb4db
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(verbose_name='Date de création', auto_now_add=True)),
('modified', models.DateTimeField(verbose_name='Date de modification', auto_now=True)),
('corporate_name', models.CharField(verbose_name='Nom', max_length=255)),
],
options={
'abstract': False,
'ordering': ('-created',),
},
),
]
|
[
"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', models.DateTimeField(verbose_name='Date de création', auto_now_add=True)),\n ('modified', models.DateTimeField(verbose_name='Date de modification', auto_now=True)),\n ('corporate_name', models.CharField(verbose_name='Nom', max_length=255)),\n ],\n options={\n 'abstract': False,\n 'ordering': ('-created',),\n },\n ),\n ]\n",
"from __future__ import unicode_literals\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Customer', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('created', models.DateTimeField(\n verbose_name='Date de création', auto_now_add=True)), ('modified',\n models.DateTimeField(verbose_name='Date de modification', auto_now=\n True)), ('corporate_name', models.CharField(verbose_name='Nom',\n max_length=255))], options={'abstract': False, 'ordering': (\n '-created',)})]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Customer', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('created', models.DateTimeField(\n verbose_name='Date de création', auto_now_add=True)), ('modified',\n models.DateTimeField(verbose_name='Date de modification', auto_now=\n True)), ('corporate_name', models.CharField(verbose_name='Nom',\n max_length=255))], options={'abstract': False, 'ordering': (\n '-created',)})]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
1,054 |
513a2bbcf7a63baf900b73b18cf25618937dc7d0
|
"""
Prog: helloworld.py
Name: Samuel doyle
Date: 18/04/18
Desc: My first program!
"""
print('Hello, world!')
|
[
"\"\"\"\nProg: helloworld.py\nName: Samuel doyle\nDate: 18/04/18\nDesc: My first program!\n\"\"\"\n\nprint('Hello, world!')\n",
"<docstring token>\nprint('Hello, world!')\n",
"<docstring token>\n<code token>\n"
] | false |
1,055 |
a21ac29911931bb71460175cba584e0011fa2ece
|
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import base64
import configobj
import datetime
import os
config = configobj.ConfigObj('.env')
port = 2525
smtp_server = "smtp.mailtrap.io"
login = config['SMTP_USERNAME']
password = config['SMTP_PASSWORD']
sender_email = "[email protected]"
receiver_email = "[email protected]"
last_sent = datetime.datetime.now()
last_index_sent = 0
def timeFromLastSent():
if(last_sent is None):
return 10
else:
return (datetime.datetime.now() - last_sent).total_seconds()
# send your email
def send():
global last_index_sent
global last_sent
DIR = './videos'
videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])
for i in range(last_index_sent, videosToSend + 1):
last_index_sent=i
last_sent = datetime.datetime.now()
encoded = base64.b64encode(open("frame.jpg", "rb").read()).decode()
html = f"""\
<html>
<body>
<img src="data:image/jpg;base64,{encoded}">
<a href="http://localhost:3000/{last_index_sent}">Gravar</a>
</body>
</html>
"""
message = MIMEMultipart("alternative")
message["Subject"] = "inline embedding"
message["From"] = sender_email
message["To"] = receiver_email
part = MIMEText(html, "html")
message.attach(part)
with smtplib.SMTP("smtp.mailtrap.io", 2525) as server:
server.login(login, password)
server.sendmail(
sender_email, receiver_email, message.as_string() )
print('Sent')
return
|
[
"import smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nimport base64\nimport configobj\nimport datetime\nimport os\nconfig = configobj.ConfigObj('.env')\nport = 2525\nsmtp_server = \"smtp.mailtrap.io\"\nlogin = config['SMTP_USERNAME'] \npassword = config['SMTP_PASSWORD'] \n\nsender_email = \"[email protected]\"\nreceiver_email = \"[email protected]\"\n\n\n\nlast_sent = datetime.datetime.now()\nlast_index_sent = 0\ndef timeFromLastSent():\n if(last_sent is None):\n return 10\n else:\n return (datetime.datetime.now() - last_sent).total_seconds()\n\n# send your email\ndef send():\n global last_index_sent\n global last_sent\n DIR = './videos'\n videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])\n for i in range(last_index_sent, videosToSend + 1):\n last_index_sent=i\n last_sent = datetime.datetime.now()\n encoded = base64.b64encode(open(\"frame.jpg\", \"rb\").read()).decode()\n html = f\"\"\"\\\n <html>\n <body>\n <img src=\"data:image/jpg;base64,{encoded}\">\n <a href=\"http://localhost:3000/{last_index_sent}\">Gravar</a>\n </body>\n </html>\n \"\"\"\n\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = \"inline embedding\"\n message[\"From\"] = sender_email\n message[\"To\"] = receiver_email\n\n part = MIMEText(html, \"html\")\n message.attach(part)\n \n with smtplib.SMTP(\"smtp.mailtrap.io\", 2525) as server:\n server.login(login, password)\n server.sendmail(\n sender_email, receiver_email, message.as_string() )\n print('Sent')\n return\n",
"import smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nimport base64\nimport configobj\nimport datetime\nimport os\nconfig = configobj.ConfigObj('.env')\nport = 2525\nsmtp_server = 'smtp.mailtrap.io'\nlogin = config['SMTP_USERNAME']\npassword = config['SMTP_PASSWORD']\nsender_email = '[email protected]'\nreceiver_email = '[email protected]'\nlast_sent = datetime.datetime.now()\nlast_index_sent = 0\n\n\ndef timeFromLastSent():\n if last_sent is None:\n return 10\n else:\n return (datetime.datetime.now() - last_sent).total_seconds()\n\n\ndef send():\n global last_index_sent\n global last_sent\n DIR = './videos'\n videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(\n os.path.join(DIR, name))])\n for i in range(last_index_sent, videosToSend + 1):\n last_index_sent = i\n last_sent = datetime.datetime.now()\n encoded = base64.b64encode(open('frame.jpg', 'rb').read()).decode()\n html = f\"\"\" <html>\n <body>\n <img src=\"data:image/jpg;base64,{encoded}\">\n <a href=\"http://localhost:3000/{last_index_sent}\">Gravar</a>\n </body>\n </html>\n \"\"\"\n message = MIMEMultipart('alternative')\n message['Subject'] = 'inline embedding'\n message['From'] = sender_email\n message['To'] = receiver_email\n part = MIMEText(html, 'html')\n message.attach(part)\n with smtplib.SMTP('smtp.mailtrap.io', 2525) as server:\n server.login(login, password)\n server.sendmail(sender_email, receiver_email, message.as_string())\n print('Sent')\n return\n",
"<import token>\nconfig = configobj.ConfigObj('.env')\nport = 2525\nsmtp_server = 'smtp.mailtrap.io'\nlogin = config['SMTP_USERNAME']\npassword = config['SMTP_PASSWORD']\nsender_email = '[email protected]'\nreceiver_email = '[email protected]'\nlast_sent = datetime.datetime.now()\nlast_index_sent = 0\n\n\ndef timeFromLastSent():\n if last_sent is None:\n return 10\n else:\n return (datetime.datetime.now() - last_sent).total_seconds()\n\n\ndef send():\n global last_index_sent\n global last_sent\n DIR = './videos'\n videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(\n os.path.join(DIR, name))])\n for i in range(last_index_sent, videosToSend + 1):\n last_index_sent = i\n last_sent = datetime.datetime.now()\n encoded = base64.b64encode(open('frame.jpg', 'rb').read()).decode()\n html = f\"\"\" <html>\n <body>\n <img src=\"data:image/jpg;base64,{encoded}\">\n <a href=\"http://localhost:3000/{last_index_sent}\">Gravar</a>\n </body>\n </html>\n \"\"\"\n message = MIMEMultipart('alternative')\n message['Subject'] = 'inline embedding'\n message['From'] = sender_email\n message['To'] = receiver_email\n part = MIMEText(html, 'html')\n message.attach(part)\n with smtplib.SMTP('smtp.mailtrap.io', 2525) as server:\n server.login(login, password)\n server.sendmail(sender_email, receiver_email, message.as_string())\n print('Sent')\n return\n",
"<import token>\n<assignment token>\n\n\ndef timeFromLastSent():\n if last_sent is None:\n return 10\n else:\n return (datetime.datetime.now() - last_sent).total_seconds()\n\n\ndef send():\n global last_index_sent\n global last_sent\n DIR = './videos'\n videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(\n os.path.join(DIR, name))])\n for i in range(last_index_sent, videosToSend + 1):\n last_index_sent = i\n last_sent = datetime.datetime.now()\n encoded = base64.b64encode(open('frame.jpg', 'rb').read()).decode()\n html = f\"\"\" <html>\n <body>\n <img src=\"data:image/jpg;base64,{encoded}\">\n <a href=\"http://localhost:3000/{last_index_sent}\">Gravar</a>\n </body>\n </html>\n \"\"\"\n message = MIMEMultipart('alternative')\n message['Subject'] = 'inline embedding'\n message['From'] = sender_email\n message['To'] = receiver_email\n part = MIMEText(html, 'html')\n message.attach(part)\n with smtplib.SMTP('smtp.mailtrap.io', 2525) as server:\n server.login(login, password)\n server.sendmail(sender_email, receiver_email, message.as_string())\n print('Sent')\n return\n",
"<import token>\n<assignment token>\n\n\ndef timeFromLastSent():\n if last_sent is None:\n return 10\n else:\n return (datetime.datetime.now() - last_sent).total_seconds()\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n"
] | false |
1,056 |
562b2c3567e42699cfd0804a5780af7ede142e13
|
## Filename: name.py
# Author: Marcelo Feitoza Parisi
#
# Description: Report the objects
# on the bucket sorted by name.
#
# ###########################
# # DISCLAIMER - IMPORTANT! #
# ###########################
#
# Stuff found here was built as a
# Proof-Of-Concept or Study material
# and should not be considered
# production ready!
#
# USE WITH CARE!
##
from lib import byte
from google.cloud import storage
from prettytable import PrettyTable
def exec(bucket_id, project_id, reverse_opt):
# Google Cloud Storage Client
client = storage.Client()
bucket = client.bucket(bucket_id, user_project=project_id)
blobs = bucket.list_blobs()
# Will hold our local list of objects
blob_list = []
try:
for blob in blobs:
# For each object we'll save name, owner, class, size and date
this_blob = { 'name': blob.name,
'owner': blob.owner,
'class': blob.storage_class,
'size' : blob.size,
'date' : str(blob.updated).split('.')[0].split('+')[0]
}
# Append object to our list
blob_list.append(this_blob)
except Exception as e:
print(e)
exit(1)
# Sort our object list by name using our reverse_opt
sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=reverse_opt)
# Generating our PrettyTable
report_table = PrettyTable()
report_table.field_names = ["NAME", "OWNER", "CLASS", "SIZE", "DATE"]
report_table.align["NAME"] = "l"
report_table.align["SIZE"] = "r"
report_table.align["DATE"] = "r"
for blob in sorted_list:
report_table.add_row([blob['name'], blob['owner'], blob['class'], str(byte.convert_size(blob['size'])), blob['date']])
print(report_table)
|
[
"## Filename: name.py\n # Author: Marcelo Feitoza Parisi\n # \n # Description: Report the objects\n # on the bucket sorted by name.\n # \n # ###########################\n # # DISCLAIMER - IMPORTANT! #\n # ###########################\n # \n # Stuff found here was built as a\n # Proof-Of-Concept or Study material\n # and should not be considered\n # production ready!\n # \n # USE WITH CARE!\n##\nfrom lib import byte\nfrom google.cloud import storage\nfrom prettytable import PrettyTable\n\ndef exec(bucket_id, project_id, reverse_opt):\n\n # Google Cloud Storage Client\n client = storage.Client()\n bucket = client.bucket(bucket_id, user_project=project_id)\n blobs = bucket.list_blobs()\n\n # Will hold our local list of objects\n blob_list = []\n\n try: \n for blob in blobs:\n # For each object we'll save name, owner, class, size and date\n this_blob = { 'name': blob.name,\n 'owner': blob.owner,\n 'class': blob.storage_class,\n 'size' : blob.size,\n 'date' : str(blob.updated).split('.')[0].split('+')[0]\n }\n # Append object to our list\n blob_list.append(this_blob)\n except Exception as e:\n print(e)\n exit(1)\n\n # Sort our object list by name using our reverse_opt\n sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=reverse_opt)\n\n # Generating our PrettyTable\n report_table = PrettyTable()\n report_table.field_names = [\"NAME\", \"OWNER\", \"CLASS\", \"SIZE\", \"DATE\"]\n report_table.align[\"NAME\"] = \"l\"\n report_table.align[\"SIZE\"] = \"r\"\n report_table.align[\"DATE\"] = \"r\"\n for blob in sorted_list:\n report_table.add_row([blob['name'], blob['owner'], blob['class'], str(byte.convert_size(blob['size'])), blob['date']])\n\n print(report_table)\n\n\n",
"from lib import byte\nfrom google.cloud import storage\nfrom prettytable import PrettyTable\n\n\ndef exec(bucket_id, project_id, reverse_opt):\n client = storage.Client()\n bucket = client.bucket(bucket_id, user_project=project_id)\n blobs = bucket.list_blobs()\n blob_list = []\n try:\n for blob in blobs:\n this_blob = {'name': blob.name, 'owner': blob.owner, 'class':\n blob.storage_class, 'size': blob.size, 'date': str(blob.\n updated).split('.')[0].split('+')[0]}\n blob_list.append(this_blob)\n except Exception as e:\n print(e)\n exit(1)\n sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=\n reverse_opt)\n report_table = PrettyTable()\n report_table.field_names = ['NAME', 'OWNER', 'CLASS', 'SIZE', 'DATE']\n report_table.align['NAME'] = 'l'\n report_table.align['SIZE'] = 'r'\n report_table.align['DATE'] = 'r'\n for blob in sorted_list:\n report_table.add_row([blob['name'], blob['owner'], blob['class'],\n str(byte.convert_size(blob['size'])), blob['date']])\n print(report_table)\n",
"<import token>\n\n\ndef exec(bucket_id, project_id, reverse_opt):\n client = storage.Client()\n bucket = client.bucket(bucket_id, user_project=project_id)\n blobs = bucket.list_blobs()\n blob_list = []\n try:\n for blob in blobs:\n this_blob = {'name': blob.name, 'owner': blob.owner, 'class':\n blob.storage_class, 'size': blob.size, 'date': str(blob.\n updated).split('.')[0].split('+')[0]}\n blob_list.append(this_blob)\n except Exception as e:\n print(e)\n exit(1)\n sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=\n reverse_opt)\n report_table = PrettyTable()\n report_table.field_names = ['NAME', 'OWNER', 'CLASS', 'SIZE', 'DATE']\n report_table.align['NAME'] = 'l'\n report_table.align['SIZE'] = 'r'\n report_table.align['DATE'] = 'r'\n for blob in sorted_list:\n report_table.add_row([blob['name'], blob['owner'], blob['class'],\n str(byte.convert_size(blob['size'])), blob['date']])\n print(report_table)\n",
"<import token>\n<function token>\n"
] | false |
1,057 |
358879d83ed3058530031d50fb69e3ce11fbd524
|
print(60*60)
seconds_per_hour=60*60
print(24*seconds_per_hour)
seconds_per_day=24*seconds_per_hour
print(seconds_per_day/seconds_per_hour)
print(seconds_per_day//seconds_per_hour)
|
[
"print(60*60)\r\n\r\nseconds_per_hour=60*60\r\n\r\nprint(24*seconds_per_hour)\r\n\r\nseconds_per_day=24*seconds_per_hour\r\n\r\nprint(seconds_per_day/seconds_per_hour)\r\n\r\nprint(seconds_per_day//seconds_per_hour)\r\n",
"print(60 * 60)\nseconds_per_hour = 60 * 60\nprint(24 * seconds_per_hour)\nseconds_per_day = 24 * seconds_per_hour\nprint(seconds_per_day / seconds_per_hour)\nprint(seconds_per_day // seconds_per_hour)\n",
"print(60 * 60)\n<assignment token>\nprint(24 * seconds_per_hour)\n<assignment token>\nprint(seconds_per_day / seconds_per_hour)\nprint(seconds_per_day // seconds_per_hour)\n",
"<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
1,058 |
5c1d81c973487f1b091e58a6ccf5947c3f2a7e6d
|
import unittest
from nldata.corpora import Telegram
import os
class TestTelegram(unittest.TestCase):
def test_export_iter(self):
pass
# telegram = Telegram(data_dir)
# it = telegram.split("train", n=20)
# samples = [s for s in it]
# self.assertEqual(len(samples), 20)
# list(map(print,samples))
if __name__ == '__main__':
unittest.main()
|
[
"import unittest\nfrom nldata.corpora import Telegram\nimport os\n\n\nclass TestTelegram(unittest.TestCase):\n def test_export_iter(self):\n pass\n # telegram = Telegram(data_dir)\n # it = telegram.split(\"train\", n=20)\n # samples = [s for s in it]\n # self.assertEqual(len(samples), 20)\n # list(map(print,samples))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"import unittest\nfrom nldata.corpora import Telegram\nimport os\n\n\nclass TestTelegram(unittest.TestCase):\n\n def test_export_iter(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass TestTelegram(unittest.TestCase):\n\n def test_export_iter(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass TestTelegram(unittest.TestCase):\n\n def test_export_iter(self):\n pass\n\n\n<code token>\n",
"<import token>\n\n\nclass TestTelegram(unittest.TestCase):\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
1,059 |
03629e62b11e66eeb0e111fee551c75c8463cbb8
|
from compas.geometry import Line
# This import is use to test __repr__.
from compas.geometry import Point # noqa: F401
def test_line():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line.start == p1
assert line.end == p2
def test_equality():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert (p1, p2) == line
assert line == Line(p1, p2)
assert line != (p2, p1)
assert line != 1
def test___repr__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line == eval(repr(line))
def test___getitem__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line[0] == p1
assert line[1] == p2
|
[
"from compas.geometry import Line\n\n# This import is use to test __repr__.\nfrom compas.geometry import Point # noqa: F401\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"from compas.geometry import Line\nfrom compas.geometry import Point\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"<import token>\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"<import token>\n<function token>\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"<import token>\n<function token>\n<function token>\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"<import token>\n<function token>\n<function token>\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
1,060 |
9bb6fd6fbe212bdc29e2d1ec37fa6ec6ca9a9469
|
#!/usr/bin/env python
# encoding: utf-8
import multiprocessing
import time
import sys
def daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
time.sleep(2)
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def non_daemon():
p = multiprocessing.current_process()
print('Starting:', p.name, p.pid)
sys.stdout.flush()
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
def main1():
d = multiprocessing.Process(name="daemon_process", target=daemon)
n = multiprocessing.Process(name="no_daemon_process", target=non_daemon)
print("daemon_process default daemon value: %s" % d.daemon)
print("no_daemon_process default daemon value: %s" % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
def main2():
d = multiprocessing.Process(name="daemon_process", target=daemon)
n = multiprocessing.Process(name="no_daemon_process", target=non_daemon)
print("daemon_process default daemon value: %s" % d.daemon)
print("no_daemon_process default daemon value: %s" % n.daemon)
d.daemon = True
n.daemon = False
d.start()
time.sleep(1)
n.start()
# 阻塞父进程,直到子进程结束为止。
# 从实验来看,子进程结束和join的先后顺序无关。
# 唯一的限制是父进程需要等所有join的子进程结束后,才会继续向下执行。
d.join()
n.join()
def main3():
d = multiprocessing.Process(name='daemon', target=daemon)
d.daemon = True
n = multiprocessing.Process(name='non-daemon', target=non_daemon)
n.daemon = False
d.start()
n.start()
# join接受一个timeout的参数,意思就是如果超过了timeout的时间,不管子进程是否结束,join函数也会直接返回。
d.join(1)
# 可以看到子进程d仍然未结束,但是父进程已经继续执行了。
print('d.is_alive()', d.is_alive())
n.join()
if __name__ == "__main__":
# main1()
# main2()
main3()
|
[
"#!/usr/bin/env python\n# encoding: utf-8\n\nimport multiprocessing\nimport time\nimport sys\n\n\ndef daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n time.sleep(2)\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef non_daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef main1():\n d = multiprocessing.Process(name=\"daemon_process\", target=daemon)\n n = multiprocessing.Process(name=\"no_daemon_process\", target=non_daemon)\n print(\"daemon_process default daemon value: %s\" % d.daemon)\n print(\"no_daemon_process default daemon value: %s\" % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n\n\ndef main2():\n d = multiprocessing.Process(name=\"daemon_process\", target=daemon)\n n = multiprocessing.Process(name=\"no_daemon_process\", target=non_daemon)\n print(\"daemon_process default daemon value: %s\" % d.daemon)\n print(\"no_daemon_process default daemon value: %s\" % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n # 阻塞父进程,直到子进程结束为止。\n # 从实验来看,子进程结束和join的先后顺序无关。\n # 唯一的限制是父进程需要等所有join的子进程结束后,才会继续向下执行。\n d.join()\n n.join()\n\n\ndef main3():\n d = multiprocessing.Process(name='daemon', target=daemon)\n d.daemon = True\n n = multiprocessing.Process(name='non-daemon', target=non_daemon)\n n.daemon = False\n d.start()\n n.start()\n # join接受一个timeout的参数,意思就是如果超过了timeout的时间,不管子进程是否结束,join函数也会直接返回。\n d.join(1)\n # 可以看到子进程d仍然未结束,但是父进程已经继续执行了。\n print('d.is_alive()', d.is_alive())\n n.join()\n\n\nif __name__ == \"__main__\":\n # main1()\n # main2()\n main3()\n",
"import multiprocessing\nimport time\nimport sys\n\n\ndef daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n time.sleep(2)\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef non_daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef main1():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n\n\ndef main2():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n d.join()\n n.join()\n\n\ndef main3():\n d = multiprocessing.Process(name='daemon', target=daemon)\n d.daemon = True\n n = multiprocessing.Process(name='non-daemon', target=non_daemon)\n n.daemon = False\n d.start()\n n.start()\n d.join(1)\n print('d.is_alive()', d.is_alive())\n n.join()\n\n\nif __name__ == '__main__':\n main3()\n",
"<import token>\n\n\ndef daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n time.sleep(2)\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef non_daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef main1():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n\n\ndef main2():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n d.join()\n n.join()\n\n\ndef main3():\n d = multiprocessing.Process(name='daemon', target=daemon)\n d.daemon = True\n n = multiprocessing.Process(name='non-daemon', target=non_daemon)\n n.daemon = False\n d.start()\n n.start()\n d.join(1)\n print('d.is_alive()', d.is_alive())\n n.join()\n\n\nif __name__ == '__main__':\n main3()\n",
"<import token>\n\n\ndef daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n time.sleep(2)\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef non_daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\ndef main1():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n\n\ndef main2():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n d.join()\n n.join()\n\n\ndef main3():\n d = multiprocessing.Process(name='daemon', target=daemon)\n d.daemon = True\n n = multiprocessing.Process(name='non-daemon', target=non_daemon)\n n.daemon = False\n d.start()\n n.start()\n d.join(1)\n print('d.is_alive()', d.is_alive())\n n.join()\n\n\n<code token>\n",
"<import token>\n\n\ndef daemon():\n p = multiprocessing.current_process()\n print('Starting:', p.name, p.pid)\n sys.stdout.flush()\n time.sleep(2)\n print('Exiting :', p.name, p.pid)\n sys.stdout.flush()\n\n\n<function token>\n\n\ndef main1():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n\n\ndef main2():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n d.join()\n n.join()\n\n\ndef main3():\n d = multiprocessing.Process(name='daemon', target=daemon)\n d.daemon = True\n n = multiprocessing.Process(name='non-daemon', target=non_daemon)\n n.daemon = False\n d.start()\n n.start()\n d.join(1)\n print('d.is_alive()', d.is_alive())\n n.join()\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef main1():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n\n\ndef main2():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n d.join()\n n.join()\n\n\ndef main3():\n d = multiprocessing.Process(name='daemon', target=daemon)\n d.daemon = True\n n = multiprocessing.Process(name='non-daemon', target=non_daemon)\n n.daemon = False\n d.start()\n n.start()\n d.join(1)\n print('d.is_alive()', d.is_alive())\n n.join()\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef main2():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n d.join()\n n.join()\n\n\ndef main3():\n d = multiprocessing.Process(name='daemon', target=daemon)\n d.daemon = True\n n = multiprocessing.Process(name='non-daemon', target=non_daemon)\n n.daemon = False\n d.start()\n n.start()\n d.join(1)\n print('d.is_alive()', d.is_alive())\n n.join()\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef main2():\n d = multiprocessing.Process(name='daemon_process', target=daemon)\n n = multiprocessing.Process(name='no_daemon_process', target=non_daemon)\n print('daemon_process default daemon value: %s' % d.daemon)\n print('no_daemon_process default daemon value: %s' % n.daemon)\n d.daemon = True\n n.daemon = False\n d.start()\n time.sleep(1)\n n.start()\n d.join()\n n.join()\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
1,061 |
267cb37f2ccad5b02a809d9b85327eacd9a49515
|
from flask import Flask, jsonify, request
import requests, json, random
from bs4 import BeautifulSoup
import gspread
import pandas as pd
import dataservices as dss
from oauth2client.service_account import ServiceAccountCredentials
# page = requests.get("https://www.worldometers.info/coronavirus/")
# soup = BeautifulSoup(page.content, 'html.parser')
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
# Initialize application
app = Flask(__name__)
@app.route("/")
def hello():
return "Flask setup"
def sheets_row_writer(data_list):
print("sheets method invoked")
credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(1)
# worksheet = client.open('corona-help-resource-management').BloodPal
worksheet.append_row(data_list)
print("Write complete")
def sheets_row_writer_donor(data_list_donor):
print("donor sheets method invoked")
credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(2)
# worksheet = client.open('corona-help-resource-management').BloodPal
worksheet.append_row(data_list_donor)
print("Write complete")
def death_global():
page = requests.get("https://www.worldometers.info/coronavirus/")
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all("div", {"class":"maincounter-number"})
cases_list = []
active = soup.find("div", {"class":"number-table-main"})
active_cases = active.text
for res in result:
cases_list.append(res.text)
return "There are"+cases_list[0]+" Total cases out of which"+cases_list[1]+" have died and"+cases_list[2]+" have recovered . There are still "+active_cases+" active cases."
app.route("/death/global", methods=['POST'])
def death_global_api():
data = request.get_json(silent=True)
page = requests.get("https://www.worldometers.info/coronavirus/")
response = death_global()
reply = { "fulfillmentText": response }
return jsonify(reply)
def death_country(id):
idu = id.upper()
page = requests.get("https://www.worldometers.info/coronavirus/country/"+id+"/")
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all("div", {"class":"maincounter-number"})
active = soup.find("div", {"class":"number-table-main"})
active_cases = active.text
cases_list = []
for res in result:
cases_list.append(res.text)
return "In " +idu+" There are"+cases_list[0]+"Total cases out of which"+cases_list[1]+"are dead and"+cases_list[2]+"have already recovered . There are still "+active_cases+ " active cases ."
@app.route('/get_country_detail', methods=['POST'])
def get_country_detail():
data = request.get_json(silent=True)
intent = data['queryResult']['intent']['displayName']
print (intent)
def news_nepal_int():
url = "https://nepalcorona.info/api/v1/news"
response = requests.get(url)
news = json.loads(response.text)
data = news['data']
data1 = data[0]
data2 = data[1]
data3 = data[2]
response2 = [{
"card":{
"title":data1['title'],
"subtitle":"Source: "+data1['source']+" >>",
"imageUri":data1['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data1['url']
},
{
"text":"Corona Symptoms",
"postback":"symptoms"
}
]
},
"platform":"FACEBOOK"
},
{
"card":{
"title":data2['title'],
"subtitle":"Source "+data2['source']+" >>",
"imageUri":data2['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data2['url']
},
{
"text":"Live Nepal Data",
"postback":"live-nepal-data"
}
]
},
"platform":"FACEBOOK"
},
{
"card":{
"title":data3['title'],
"subtitle":"Source "+data3['source']+" >>",
"imageUri":data3['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data3['url']
},
{
"text":"Self Isolation",
"postback":"self isolation"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response2 }
return reply
def i_need_help_yes():
name = data['queryResult']['parameters']['name-people']
place = data['queryResult']['parameters']['name-place']
item_required = data['queryResult']['parameters']['help-ent']
phone = data['queryResult']['parameters']['phone-number']
ilist = [item_required[0],name[0],phone[0],place[0]]
sheets_row_writer(ilist)
response2 = "Hello "+name[0]+" so you are looking for "+item_required[0]+" Your location is "+place[0]+" One of our Team will contact you @ " +phone[0]+" soon !"
response = [
{
"quickReplies": {
"title": response2,
"quickReplies": [
"Call a Doctor",
"Get Online Support"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def faq_ques_ans():
ff = data['originalDetectIntentRequest']['payload']['data']['message']['text']
url = "https://nepalcorona.info/api/v1/faqs"
response = requests.get(url)
todos = json.loads(response.text)
rand = random.randrange(0, 45, 1)
opt3 = ["Live Nepali Data","Latest Nepali News","Symptoms","Preventions","Self Isolation","Play Corona Quiz"]
faqs = todos['data']
faq = faqs[rand]
if(ff=="English FAQ" or ff =="More Quizzles" or ff =="भाषा परिवर्तन"):
randq= faq['question']
randa = faq['answer']
opt1 = "More Quizzles"
opt2 = "Switch Language"
else:
randq = faq['question_np']
randa = faq['answer_np']
opt1 = "अरु देखाउनुहोस >>"
opt2 = "भाषा परिवर्तन"
response2 = "Q. "+randq+"\n A. "+randa+"\n"
response = [{
"text": {
"text": [
randq
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"quickReplies": {
"title": randa,
"quickReplies": [
opt1,
opt2,
random.choice(opt3)
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def blood_pal_yes():
print (intent)
print (data)
blood_group = data['queryResult']['parameters']['blood-group']
blood_amount = data['queryResult']['parameters']['blood-pint']
location = data['queryResult']['parameters']['blood-location']
case = data['queryResult']['parameters']['blood-case']
date = data['queryResult']['parameters']['blood-date']
phone = data['queryResult']['parameters']['blood-number']
ilist = [blood_group,blood_amount,location,case,date,phone]
sheets_row_writer(ilist)
response3 = "For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!"
response = "The following request has been sent. We will contact you shortly. "+blood_group+" blood ("+str(blood_amount)+" ) required for "+case+" at "+location+" On "+date+" - "+phone+" Thank you ."
response2 = [{
"text": {
"text": [
response
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"text": {
"text": [
response3
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response2 }
return reply
def blood_pal_donor_yes():
print (intent)
print (data)
permananet_address = data['queryResult']['parameters']['permananet-address']
height = data['queryResult']['parameters']['height']
gender = data['queryResult']['parameters']['gender']
age = data['queryResult']['parameters']['age']
blood = data['queryResult']['parameters']['blood']
current_address = data['queryResult']['parameters']['current-address']
email = data['queryResult']['parameters']['email']
name = data['queryResult']['parameters']['name']
last_donation= data['queryResult']['parameters']['last-donation']
weight = data['queryResult']['parameters']['weight']
number = data['queryResult']['parameters']['number']
ilist = [name,number,email,current_address,permananet_address,age,height,weight,gender,blood,last_donation]
sheets_row_writer_donor(ilist)
response3 = "For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!"
response = "Thank you "+name+" for registration as a blood donor We will contact you at the time of urgency in your area."
response2 = [{
"text": {
"text": [
response
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"text": {
"text": [
response3
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response2 }
return reply
def world_data_live():
text = death_global()
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"Provience Data",
"Nepali News",
"World Data",
"Symptoms",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
#district summary all
def district_all_summary():
text = dss.district_all_summary()
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"Provience Summary",
"Nepali News",
"World Data",
"Symptoms",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
#provience summary all should remove
def province_all_summary():
text = dss.provience_all_summary()
print(text)
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"District-Summary",
"Province-Data",
"World Data",
"Preventions",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def proviencewise_detail():
#get provience name
#return dss.ard(provience)
#card
pcode = data['queryResult']['parameters']['custom-province-ent']
province = int(pcode)
print(type(province))
response_summary = dss.ardp(province)
print(response_summary)
response = [
{
"card":{
"title": "Covid-19 Provience: "+str(province)+" | Details",
"subtitle":response_summary,
"imageUri": "https://setopati.net/wp-content/uploads/2018/02/province6.jpg",
"buttons":[
{
"text":"Prov "+str(province)+" District Data",
"postback":"dis-vdc data detail int"
},
{
"text":"Prov "+str(province)+" Vdc-Mun Data",
"postback":"dis-vdc data detail int"
},
{
"text":"Latest Nepali News",
"postback":"news-nepal-int"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response }
return reply
def dis_vdc_detail():
cod = data['queryResult']['parameters']['custom-province-ent']
dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']
print(type(dvdc))
print(dvdc)
code = int(cod)
print(type(code))
# provincecode = pcode
if(dvdc=="vdc"):
print('inside vdc')
typ = "vdc"
else:
print('inside district')
typ = "district"
data_return = dss.ard(code,typ)
response = [
{
"quickReplies": {
"title": data_return,
"quickReplies": [
"District Summary",
"Province Summary",
"Nepali News",
"World Data",
"Preventions",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def nepal_data_new_main_int():
url = "https://nepalcorona.info/api/v1/data/nepal"
response = requests.get(url)
todos = json.loads(response.text)
covid_df = dss.create_covid_df()
response2 = "Nepal Cases \n Positive :"+str(todos["tested_positive"])+" | Recovered: "+str(todos["recovered"])+"| Deaths:"+str(todos["deaths"])+" "+"\n"
print(response2)
response_summary = dss.affected_summary()
response = [
{
"text": {
"text": [
response2
]
},
"platform": "FACEBOOK"
},
{
"text": {
"text": [
""
]
}
},
{
"card":{
"title": "Covid-19 Nepal | Stats",
"subtitle":response_summary,
# "subtitle": "Find details by Province, Municipals and Districts for Nepal",
"imageUri": "https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png",
"buttons":[
{
"text":"Province Summary",
"postback":"province data int"
},
{
"text":"District-Summary",
"postback":"district data int"
},
{
"text":"Latest Nepali News",
"postback":"news-nepal-int"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response }
return reply
def batti_update():
url = "https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM"
response = requests.get(url)
todos = json.loads(response.text)
feeds = todos["feeds"][0]
response2 = "Batti Status Now :"+str(feeds["field1"]+"\n Last Updated: "+str(feeds["created_at"]))
print(response2)
reply = { "fulfillmentText": response2 }
return reply
def default():
return "Incorrect Data"
switcher = {
"nepal data int": nepal_data_new_main_int,
"news-nepal-int": news_nepal_int,
"i need help main int - yes": i_need_help_yes,
"faq-que-ans-int": faq_ques_ans,
"bloodpal-need-blood-main-int - yes": blood_pal_yes,
"data world int": world_data_live,
"district data int": district_all_summary,
"province data int": province_all_summary,
"province-wise-data": proviencewise_detail,
"dis-vdc data detail int": dis_vdc_detail,
"bloodpal-become-donor-main-int":blood_pal_donor_yes,
"batti-update-intent":batti_update
}
def switch(intentname):
return switcher.get(intentname, default)()
reply = switch(intent)
return jsonify(reply)
if __name__ == '__main__':
app.run()
|
[
"from flask import Flask, jsonify, request\nimport requests, json, random\nfrom bs4 import BeautifulSoup\nimport gspread\nimport pandas as pd\nimport dataservices as dss\nfrom oauth2client.service_account import ServiceAccountCredentials\n# page = requests.get(\"https://www.worldometers.info/coronavirus/\")\n# soup = BeautifulSoup(page.content, 'html.parser')\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\n\n# Initialize application\napp = Flask(__name__)\n\n\[email protected](\"/\")\ndef hello():\n return \"Flask setup\"\n\ndef sheets_row_writer(data_list):\n print(\"sheets method invoked\")\n credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n # worksheet = client.open('corona-help-resource-management').BloodPal\n worksheet.append_row(data_list) \n print(\"Write complete\")\n\ndef sheets_row_writer_donor(data_list_donor):\n print(\"donor sheets method invoked\")\n credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n # worksheet = client.open('corona-help-resource-management').BloodPal\n worksheet.append_row(data_list_donor) \n print(\"Write complete\")\n\ndef death_global():\n page = requests.get(\"https://www.worldometers.info/coronavirus/\")\n soup = BeautifulSoup(page.content, 'html.parser')\n \n result = soup.find_all(\"div\", {\"class\":\"maincounter-number\"})\n cases_list = []\n\n active = soup.find(\"div\", {\"class\":\"number-table-main\"})\n active_cases = active.text\n\n for res in result:\n cases_list.append(res.text)\n\n return \"There are\"+cases_list[0]+\" Total cases out of which\"+cases_list[1]+\" have died and\"+cases_list[2]+\" have recovered . There are still \"+active_cases+\" active cases.\"\n\napp.route(\"/death/global\", methods=['POST'])\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get(\"https://www.worldometers.info/coronavirus/\")\n response = death_global()\n reply = { \"fulfillmentText\": response } \n return jsonify(reply)\n \n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\"https://www.worldometers.info/coronavirus/country/\"+id+\"/\")\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all(\"div\", {\"class\":\"maincounter-number\"})\n \n active = soup.find(\"div\", {\"class\":\"number-table-main\"})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n\n return \"In \" +idu+\" There are\"+cases_list[0]+\"Total cases out of which\"+cases_list[1]+\"are dead and\"+cases_list[2]+\"have already recovered . There are still \"+active_cases+ \" active cases .\"\n\[email protected]('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print (intent)\n \n def news_nepal_int():\n url = \"https://nepalcorona.info/api/v1/news\"\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n \n response2 = [{\n \"card\":{\n \"title\":data1['title'],\n \"subtitle\":\"Source: \"+data1['source']+\" >>\",\n \"imageUri\":data1['image_url'],\n \"buttons\":[\n {\n \"text\":\"Read Full Story\",\n \"postback\":data1['url']\n },\n {\n \"text\":\"Corona Symptoms\",\n \"postback\":\"symptoms\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"card\":{\n \"title\":data2['title'],\n \"subtitle\":\"Source \"+data2['source']+\" >>\",\n \"imageUri\":data2['image_url'],\n \"buttons\":[\n {\n \"text\":\"Read Full Story\",\n \"postback\":data2['url']\n },\n {\n \"text\":\"Live Nepal Data\",\n \"postback\":\"live-nepal-data\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"card\":{\n \"title\":data3['title'],\n \"subtitle\":\"Source \"+data3['source']+\" >>\",\n \"imageUri\":data3['image_url'],\n \"buttons\":[\n {\n \"text\":\"Read Full Story\",\n \"postback\":data3['url']\n },\n {\n \"text\":\"Self Isolation\",\n \"postback\":\"self isolation\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n\n ]\n\n reply = { \"fulfillmentMessages\": response2 }\n return reply\n \n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0],name[0],phone[0],place[0]]\n sheets_row_writer(ilist)\n response2 = \"Hello \"+name[0]+\" so you are looking for \"+item_required[0]+\" Your location is \"+place[0]+\" One of our Team will contact you @ \" +phone[0]+\" soon !\"\n response = [\n\n {\n \"quickReplies\": {\n \"title\": response2,\n \"quickReplies\": [\n \"Call a Doctor\",\n \"Get Online Support\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message']['text']\n url = \"https://nepalcorona.info/api/v1/faqs\"\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = [\"Live Nepali Data\",\"Latest Nepali News\",\"Symptoms\",\"Preventions\",\"Self Isolation\",\"Play Corona Quiz\"]\n faqs = todos['data']\n faq = faqs[rand]\n if(ff==\"English FAQ\" or ff ==\"More Quizzles\" or ff ==\"भाषा परिवर्तन\"):\n randq= faq['question']\n randa = faq['answer']\n opt1 = \"More Quizzles\"\n opt2 = \"Switch Language\"\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = \"अरु देखाउनुहोस >>\"\n opt2 = \"भाषा परिवर्तन\"\n\n response2 = \"Q. \"+randq+\"\\n A. \"+randa+\"\\n\"\n response = [{\n \"text\": {\n \"text\": [\n randq\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n\n {\n \"quickReplies\": {\n \"title\": randa,\n \"quickReplies\": [\n opt1,\n opt2,\n random.choice(opt3)\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n reply = { \"fulfillmentMessages\": response }\n\n return reply\n \n def blood_pal_yes():\n print (intent)\n print (data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group,blood_amount,location,case,date,phone]\n sheets_row_writer(ilist)\n response3 = \"For critical case, please contact \\n Kathmandu 9880998523 \\n Bhaktapur 9880998525 \\n Kavre 9869294490 \\n Purwanchal 9862176689 \\n Chitwan 9801070746 \\n Butwal 9807522664 \\n Dang 9801920169 \\n Stay connected with BloodPal!\"\n response = \"The following request has been sent. We will contact you shortly. \"+blood_group+\" blood (\"+str(blood_amount)+\" ) required for \"+case+\" at \"+location+\" On \"+date+\" - \"+phone+\" Thank you .\"\n response2 = [{\n \"text\": {\n \"text\": [\n response\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n {\n \"text\": {\n \"text\": [\n response3\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n reply = { \"fulfillmentMessages\": response2 }\n return reply\n \n def blood_pal_donor_yes():\n print (intent)\n print (data)\n permananet_address = data['queryResult']['parameters']['permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation= data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name,number,email,current_address,permananet_address,age,height,weight,gender,blood,last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"For critical case, please contact \\n Kathmandu 9880998523 \\n Bhaktapur 9880998525 \\n Kavre 9869294490 \\n Purwanchal 9862176689 \\n Chitwan 9801070746 \\n Butwal 9807522664 \\n Dang 9801920169 \\n Stay connected with BloodPal!\"\n response = \"Thank you \"+name+\" for registration as a blood donor We will contact you at the time of urgency in your area.\"\n response2 = [{\n \"text\": {\n \"text\": [\n response\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n {\n \"text\": {\n \"text\": [\n response3\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n reply = { \"fulfillmentMessages\": response2 }\n return reply\n\n def world_data_live():\n text = death_global()\n response = [\n {\n \"quickReplies\": {\n \"title\": text,\n \"quickReplies\": [\n \"Provience Data\",\n \"Nepali News\",\n \"World Data\",\n \"Symptoms\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n \n #district summary all\n def district_all_summary():\n text = dss.district_all_summary()\n response = [\n {\n \"quickReplies\": {\n \"title\": text,\n \"quickReplies\": [\n \"Provience Summary\",\n \"Nepali News\",\n \"World Data\",\n \"Symptoms\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n \n #provience summary all should remove \n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [\n {\n \"quickReplies\": {\n \"title\": text,\n \"quickReplies\": [\n \"District-Summary\",\n \"Province-Data\",\n \"World Data\",\n \"Preventions\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def proviencewise_detail():\n #get provience name\n #return dss.ard(provience)\n #card \n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n\n response = [\n {\n \"card\":{\n \"title\": \"Covid-19 Provience: \"+str(province)+\" | Details\",\n \"subtitle\":response_summary,\n \"imageUri\": \"https://setopati.net/wp-content/uploads/2018/02/province6.jpg\",\n \"buttons\":[\n {\n \"text\":\"Prov \"+str(province)+\" District Data\",\n \"postback\":\"dis-vdc data detail int\"\n },\n {\n \"text\":\"Prov \"+str(province)+\" Vdc-Mun Data\",\n \"postback\":\"dis-vdc data detail int\"\n },\n {\n \"text\":\"Latest Nepali News\",\n \"postback\":\"news-nepal-int\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n ]\n\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n \n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n \n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n\n\n # provincecode = pcode\n if(dvdc==\"vdc\"):\n print('inside vdc')\n typ = \"vdc\" \n else:\n print('inside district')\n typ = \"district\"\n\n data_return = dss.ard(code,typ)\n response = [\n {\n \"quickReplies\": {\n \"title\": data_return,\n \"quickReplies\": [\n \"District Summary\",\n \"Province Summary\",\n \"Nepali News\",\n \"World Data\",\n \"Preventions\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def nepal_data_new_main_int():\n url = \"https://nepalcorona.info/api/v1/data/nepal\"\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n\n \n response2 = \"Nepal Cases \\n Positive :\"+str(todos[\"tested_positive\"])+\" | Recovered: \"+str(todos[\"recovered\"])+\"| Deaths:\"+str(todos[\"deaths\"])+\" \"+\"\\n\"\n print(response2)\n response_summary = dss.affected_summary()\n\n response = [\n {\n \"text\": {\n \"text\": [\n response2\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\": {\n \"text\": [\n \"\"\n ]\n }\n },\n {\n \"card\":{\n \"title\": \"Covid-19 Nepal | Stats\",\n \"subtitle\":response_summary,\n # \"subtitle\": \"Find details by Province, Municipals and Districts for Nepal\",\n \"imageUri\": \"https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png\",\n \"buttons\":[\n {\n \"text\":\"Province Summary\",\n \"postback\":\"province data int\"\n },\n {\n \"text\":\"District-Summary\",\n \"postback\":\"district data int\"\n },\n {\n \"text\":\"Latest Nepali News\",\n \"postback\":\"news-nepal-int\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n ]\n\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def batti_update():\n url = \"https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM\"\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos[\"feeds\"][0]\n \n response2 = \"Batti Status Now :\"+str(feeds[\"field1\"]+\"\\n Last Updated: \"+str(feeds[\"created_at\"]))\n print(response2)\n reply = { \"fulfillmentText\": response2 }\n return reply\n\n\n def default():\n return \"Incorrect Data\"\n\n switcher = {\n \"nepal data int\": nepal_data_new_main_int,\n \"news-nepal-int\": news_nepal_int,\n \"i need help main int - yes\": i_need_help_yes,\n \"faq-que-ans-int\": faq_ques_ans,\n \"bloodpal-need-blood-main-int - yes\": blood_pal_yes,\n \"data world int\": world_data_live,\n \"district data int\": district_all_summary,\n \"province data int\": province_all_summary,\n \"province-wise-data\": proviencewise_detail,\n \"dis-vdc data detail int\": dis_vdc_detail,\n \"bloodpal-become-donor-main-int\":blood_pal_donor_yes,\n \"batti-update-intent\":batti_update\n }\n \n def switch(intentname):\n return switcher.get(intentname, default)()\n\n reply = switch(intent)\n return jsonify(reply)\n \n\nif __name__ == '__main__':\n \n app.run()\n",
"from flask import Flask, jsonify, request\nimport requests, json, random\nfrom bs4 import BeautifulSoup\nimport gspread\nimport pandas as pd\nimport dataservices as dss\nfrom oauth2client.service_account import ServiceAccountCredentials\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\napp = Flask(__name__)\n\n\[email protected]('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\napp.route('/death/global', methods=['POST'])\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\n 'https://www.worldometers.info/coronavirus/country/' + id + '/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n return ('In ' + idu + ' There are' + cases_list[0] +\n 'Total cases out of which' + cases_list[1] + 'are dead and' +\n cases_list[2] + 'have already recovered . There are still ' +\n active_cases + ' active cases .')\n\n\[email protected]('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\nif __name__ == '__main__':\n app.run()\n",
"<import token>\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\napp = Flask(__name__)\n\n\[email protected]('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\napp.route('/death/global', methods=['POST'])\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\n 'https://www.worldometers.info/coronavirus/country/' + id + '/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n return ('In ' + idu + ' There are' + cases_list[0] +\n 'Total cases out of which' + cases_list[1] + 'are dead and' +\n cases_list[2] + 'have already recovered . There are still ' +\n active_cases + ' active cases .')\n\n\[email protected]('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\nif __name__ == '__main__':\n app.run()\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\napp.route('/death/global', methods=['POST'])\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\n 'https://www.worldometers.info/coronavirus/country/' + id + '/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n return ('In ' + idu + ' There are' + cases_list[0] +\n 'Total cases out of which' + cases_list[1] + 'are dead and' +\n cases_list[2] + 'have already recovered . There are still ' +\n active_cases + ' active cases .')\n\n\[email protected]('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\nif __name__ == '__main__':\n app.run()\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\n<code token>\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\n 'https://www.worldometers.info/coronavirus/country/' + id + '/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n return ('In ' + idu + ' There are' + cases_list[0] +\n 'Total cases out of which' + cases_list[1] + 'are dead and' +\n cases_list[2] + 'have already recovered . There are still ' +\n active_cases + ' active cases .')\n\n\[email protected]('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\n<code token>\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\n<function token>\n\n\[email protected]('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef hello():\n return 'Flask setup'\n\n\n<function token>\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\n<code token>\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\n<function token>\n\n\[email protected]('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\n<code token>\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\n<function token>\n\n\[email protected]('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\n<code token>\n<function token>\n<function token>\n\n\[email protected]('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\n<code token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\n<code token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
1,062 |
0555c577a8fb746cf2debb929d02b46cd3be4d7b
|
from typing import List
def uppercase_first_letter(string: str) -> str:
return string[0:1].upper() + string[1:]
string_list: List[str] = input('Please, input string: ').split(' ')
result: str = ''
for i, value in enumerate(string_list):
result += (lambda index: '' if index == 0 else ' ')(i) + uppercase_first_letter(value)
print(result)
|
[
"from typing import List\n\n\ndef uppercase_first_letter(string: str) -> str:\n return string[0:1].upper() + string[1:]\n\n\nstring_list: List[str] = input('Please, input string: ').split(' ')\nresult: str = ''\n\nfor i, value in enumerate(string_list):\n result += (lambda index: '' if index == 0 else ' ')(i) + uppercase_first_letter(value)\n\nprint(result)\n",
"from typing import List\n\n\ndef uppercase_first_letter(string: str) ->str:\n return string[0:1].upper() + string[1:]\n\n\nstring_list: List[str] = input('Please, input string: ').split(' ')\nresult: str = ''\nfor i, value in enumerate(string_list):\n result += (lambda index: '' if index == 0 else ' ')(i\n ) + uppercase_first_letter(value)\nprint(result)\n",
"<import token>\n\n\ndef uppercase_first_letter(string: str) ->str:\n return string[0:1].upper() + string[1:]\n\n\nstring_list: List[str] = input('Please, input string: ').split(' ')\nresult: str = ''\nfor i, value in enumerate(string_list):\n result += (lambda index: '' if index == 0 else ' ')(i\n ) + uppercase_first_letter(value)\nprint(result)\n",
"<import token>\n\n\ndef uppercase_first_letter(string: str) ->str:\n return string[0:1].upper() + string[1:]\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
1,063 |
8355faf7c0d3742be34a56ddc982cb389c80d0a9
|
import unittest
from traceback import print_tb
from ml_base.utilities.model_manager import ModelManager
from tests.mocks import MLModelMock
class ModelManagerTests(unittest.TestCase):
def test_model_manager_will_return_same_instance_when_instantiated_many_times(self):
"""Testing that the ModelManager will return the same instance of an MLModel class from several different
references of ModelManager."""
# arrange, act
# instantiating the model manager class twice
first_model_manager = ModelManager()
second_model_manager = ModelManager()
# loading the MLModel objects from configuration
first_model_manager.load_model("tests.mocks.MLModelMock")
first_model_object = first_model_manager.get_model(qualified_name="qualified_name")
second_model_object = second_model_manager.get_model(qualified_name="qualified_name")
# assert
self.assertTrue(str(first_model_manager) == str(second_model_manager))
self.assertTrue(str(first_model_object) == str(second_model_object))
def test_load_model_method(self):
"""Testing the load_model() method."""
# arrange
# instantiating the model manager class
model_manager = ModelManager()
# adding the model
model_manager.load_model("tests.mocks.MLModelMock")
# act
exception_raised = False
model_object = None
# accessing the MLModelMock model object
try:
model_object = model_manager.get_model(qualified_name="qualified_name")
except Exception as e:
exception_raised = True
print_tb(e)
# assert
self.assertFalse(exception_raised)
self.assertTrue(model_object is not None)
def test_load_model_method_with_wrong_class_path(self):
"""Testing the load_model() method."""
# arrange
# instantiating the model manager class
model_manager = ModelManager()
# act
# adding the model
exception_raised = False
exception_message = None
# accessing the MLModelMock model object
try:
model_manager.load_model("sdf.sdf.sdf")
except Exception as e:
exception_raised = True
exception_message = str(e)
# assert
self.assertTrue(exception_raised)
self.assertTrue(exception_message == "No module named 'sdf'")
def test_only_ml_model_instances_allowed_to_be_stored(self):
"""Testing that the ModelManager only allows MLModel objects to be stored."""
# arrange
model_manager = ModelManager()
# act
exception_raised = False
exception_message = ""
try:
model_manager.load_model("tests.mocks.SomeClass")
except Exception as e:
exception_raised = True
exception_message = str(e)
# assert
self.assertTrue(exception_raised)
self.assertTrue(exception_message == "ModelManager instance can only hold references to objects of type MLModel.")
def test_model_manager_does_not_allow_duplicate_qualified_names(self):
"""Testing that the ModelManager does not allow duplicate qualified names in the singleton."""
# arrange
model_manager = ModelManager()
# act
# loading the first instance of the model object
model_manager.load_model("tests.mocks.MLModelMock")
exception_raised = False
exception_message = ""
try:
# loading it again
model_manager.load_model("tests.mocks.MLModelMock")
except Exception as e:
exception_raised = True
exception_message = str(e)
# assert
self.assertTrue(exception_raised)
self.assertTrue(exception_message == "A model with the same qualified name is already in the ModelManager singleton.")
def test_remove_model_method(self):
"""Testing the remove_model() method."""
# arrange
# instantiating the model manager class
model_manager = ModelManager()
# adding the model
model_manager.load_model("tests.mocks.MLModelMock")
# act
exception_raised1 = False
# accessing the MLModelMock model object
try:
model_manager.remove_model(qualified_name="qualified_name")
except Exception as e:
exception_raised1 = True
exception_raised2 = False
exception_message2 = ""
# trying to access the model that was removed
try:
model = model_manager.get_model(qualified_name="qualified_name")
except Exception as e:
exception_raised2 = True
exception_message2 = str(e)
# assert
self.assertFalse(exception_raised1)
self.assertTrue(exception_raised2)
self.assertTrue(exception_message2 == "Instance of model 'qualified_name' not found in ModelManager.")
def test_remove_model_method_with_missing_model(self):
"""Testing that the ModelManager raises ValueError exception when removing a model that is not found."""
# arrange
model_manager = ModelManager()
model_manager.load_model("tests.mocks.MLModelMock")
# act
exception_raised = False
exception_message = ""
try:
model_manager.remove_model(qualified_name="asdf")
except Exception as e:
exception_raised = True
exception_message = str(e)
# assert
self.assertTrue(exception_raised)
self.assertTrue(exception_message == "Instance of model 'asdf' not found in ModelManager.")
def test_get_models_method(self):
"""Testing get_models method."""
# arrange
model_manager = ModelManager()
model_manager.load_model("tests.mocks.MLModelMock")
# act
models = model_manager.get_models()
# assert
self.assertTrue(models[0]["display_name"] == "display_name")
self.assertTrue(models[0]["qualified_name"] == "qualified_name")
self.assertTrue(models[0]["description"] == "description")
self.assertTrue(models[0]["version"] == "1.0.0")
def test_get_model_metadata_method(self):
"""Testing get_model_metadata method."""
# arrange
model_manager = ModelManager()
model_manager.load_model("tests.mocks.MLModelMock")
# act
model_metadata = model_manager.get_model_metadata(qualified_name="qualified_name")
# assert
self.assertTrue(model_metadata["display_name"] == "display_name")
self.assertTrue(model_metadata["qualified_name"] == "qualified_name")
self.assertTrue(model_metadata["description"] == "description")
self.assertTrue(model_metadata["version"] == "1.0.0")
self.assertTrue(type(model_metadata["input_schema"]) is dict)
self.assertTrue(type(model_metadata["output_schema"]) is dict)
def test_get_model_metadata_method_with_missing_model(self):
"""Testing get_model_metadata method with missing model."""
# arrange
model_manager = ModelManager()
model_manager.load_model("tests.mocks.MLModelMock")
# act
excpeption_raised = False
exception_message = None
try:
model_metadata = model_manager.get_model_metadata(qualified_name="asdf")
except Exception as e:
excpeption_raised = True
exception_message = str(e)
# assert
self.assertTrue(excpeption_raised)
self.assertTrue(exception_message == "Instance of model 'asdf' not found in ModelManager.")
def test_get_model_method(self):
"""Testing the get_model method."""
# arrange
model_manager = ModelManager()
model_manager.load_model("tests.mocks.MLModelMock")
# act
exception_raised = False
model = None
try:
model = model_manager.get_model(qualified_name="qualified_name")
except Exception as e:
exception_raised = True
# assert
self.assertFalse(exception_raised)
self.assertTrue(type(model) is MLModelMock)
def test_get_model_method_with_missing_model(self):
"""Testing that the ModelManager raises ValueError exception when a model is not found."""
# arrange
model_manager = ModelManager()
model_manager.load_model("tests.mocks.MLModelMock")
# act
exception_raised = False
exception_message = ""
model = None
try:
model = model_manager.get_model(qualified_name="asdf")
except Exception as e:
exception_raised = True
exception_message = str(e)
# assert
self.assertTrue(exception_raised)
self.assertTrue(exception_message == "Instance of model 'asdf' not found in ModelManager.")
if __name__ == '__main__':
unittest.main()
|
[
"import unittest\nfrom traceback import print_tb\n\nfrom ml_base.utilities.model_manager import ModelManager\nfrom tests.mocks import MLModelMock\n\n\nclass ModelManagerTests(unittest.TestCase):\n\n def test_model_manager_will_return_same_instance_when_instantiated_many_times(self):\n \"\"\"Testing that the ModelManager will return the same instance of an MLModel class from several different\n references of ModelManager.\"\"\"\n # arrange, act\n # instantiating the model manager class twice\n first_model_manager = ModelManager()\n second_model_manager = ModelManager()\n\n # loading the MLModel objects from configuration\n first_model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n first_model_object = first_model_manager.get_model(qualified_name=\"qualified_name\")\n second_model_object = second_model_manager.get_model(qualified_name=\"qualified_name\")\n\n # assert\n self.assertTrue(str(first_model_manager) == str(second_model_manager))\n self.assertTrue(str(first_model_object) == str(second_model_object))\n\n def test_load_model_method(self):\n \"\"\"Testing the load_model() method.\"\"\"\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # adding the model\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model_object = None\n # accessing the MLModelMock model object\n try:\n model_object = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n print_tb(e)\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)\n\n def test_load_model_method_with_wrong_class_path(self):\n \"\"\"Testing the load_model() method.\"\"\"\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # act\n # adding the model\n exception_raised = False\n exception_message = None\n # accessing the MLModelMock model object\n try:\n model_manager.load_model(\"sdf.sdf.sdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"No module named 'sdf'\")\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n # arrange\n model_manager = ModelManager()\n\n # act\n exception_raised = False\n exception_message = \"\"\n try:\n model_manager.load_model(\"tests.mocks.SomeClass\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"ModelManager instance can only hold references to objects of type MLModel.\")\n\n def test_model_manager_does_not_allow_duplicate_qualified_names(self):\n \"\"\"Testing that the ModelManager does not allow duplicate qualified names in the singleton.\"\"\"\n # arrange\n model_manager = ModelManager()\n\n # act\n # loading the first instance of the model object\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n exception_raised = False\n exception_message = \"\"\n try:\n # loading it again\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"A model with the same qualified name is already in the ModelManager singleton.\")\n\n def test_remove_model_method(self):\n \"\"\"Testing the remove_model() method.\"\"\"\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # adding the model\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised1 = False\n # accessing the MLModelMock model object\n try:\n model_manager.remove_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised1 = True\n\n exception_raised2 = False\n exception_message2 = \"\"\n # trying to access the model that was removed\n try:\n model = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised2 = True\n exception_message2 = str(e)\n\n # assert\n self.assertFalse(exception_raised1)\n self.assertTrue(exception_raised2)\n self.assertTrue(exception_message2 == \"Instance of model 'qualified_name' not found in ModelManager.\")\n\n def test_remove_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when removing a model that is not found.\"\"\"\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n exception_message = \"\"\n try:\n model_manager.remove_model(qualified_name=\"asdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")\n\n def test_get_models_method(self):\n \"\"\"Testing get_models method.\"\"\"\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n models = model_manager.get_models()\n\n # assert\n self.assertTrue(models[0][\"display_name\"] == \"display_name\")\n self.assertTrue(models[0][\"qualified_name\"] == \"qualified_name\")\n self.assertTrue(models[0][\"description\"] == \"description\")\n self.assertTrue(models[0][\"version\"] == \"1.0.0\")\n\n def test_get_model_metadata_method(self):\n \"\"\"Testing get_model_metadata method.\"\"\"\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n model_metadata = model_manager.get_model_metadata(qualified_name=\"qualified_name\")\n\n # assert\n self.assertTrue(model_metadata[\"display_name\"] == \"display_name\")\n self.assertTrue(model_metadata[\"qualified_name\"] == \"qualified_name\")\n self.assertTrue(model_metadata[\"description\"] == \"description\")\n self.assertTrue(model_metadata[\"version\"] == \"1.0.0\")\n self.assertTrue(type(model_metadata[\"input_schema\"]) is dict)\n self.assertTrue(type(model_metadata[\"output_schema\"]) is dict)\n\n def test_get_model_metadata_method_with_missing_model(self):\n \"\"\"Testing get_model_metadata method with missing model.\"\"\"\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name=\"asdf\")\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")\n\n def test_get_model_method(self):\n \"\"\"Testing the get_model method.\"\"\"\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(type(model) is MLModelMock)\n\n def test_get_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when a model is not found.\"\"\"\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n exception_message = \"\"\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"asdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"import unittest\nfrom traceback import print_tb\nfrom ml_base.utilities.model_manager import ModelManager\nfrom tests.mocks import MLModelMock\n\n\nclass ModelManagerTests(unittest.TestCase):\n\n def test_model_manager_will_return_same_instance_when_instantiated_many_times(\n self):\n \"\"\"Testing that the ModelManager will return the same instance of an MLModel class from several different\n references of ModelManager.\"\"\"\n first_model_manager = ModelManager()\n second_model_manager = ModelManager()\n first_model_manager.load_model('tests.mocks.MLModelMock')\n first_model_object = first_model_manager.get_model(qualified_name=\n 'qualified_name')\n second_model_object = second_model_manager.get_model(qualified_name\n ='qualified_name')\n self.assertTrue(str(first_model_manager) == str(second_model_manager))\n self.assertTrue(str(first_model_object) == str(second_model_object))\n\n def test_load_model_method(self):\n \"\"\"Testing the load_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n model_object = None\n try:\n model_object = model_manager.get_model(qualified_name=\n 'qualified_name')\n except Exception as e:\n exception_raised = True\n print_tb(e)\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)\n\n def test_load_model_method_with_wrong_class_path(self):\n \"\"\"Testing the load_model() method.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = None\n try:\n model_manager.load_model('sdf.sdf.sdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"No module named 'sdf'\")\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.SomeClass')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'ModelManager instance can only hold references to objects of type MLModel.'\n )\n\n def test_model_manager_does_not_allow_duplicate_qualified_names(self):\n \"\"\"Testing that the ModelManager does not allow duplicate qualified names in the singleton.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.MLModelMock')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'A model with the same qualified name is already in the ModelManager singleton.'\n )\n\n def test_remove_model_method(self):\n \"\"\"Testing the remove_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised1 = False\n try:\n model_manager.remove_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised1 = True\n exception_raised2 = False\n exception_message2 = ''\n try:\n model = model_manager.get_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised2 = True\n exception_message2 = str(e)\n self.assertFalse(exception_raised1)\n self.assertTrue(exception_raised2)\n self.assertTrue(exception_message2 ==\n \"Instance of model 'qualified_name' not found in ModelManager.\")\n\n def test_remove_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when removing a model that is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.remove_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n def test_get_models_method(self):\n \"\"\"Testing get_models method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n models = model_manager.get_models()\n self.assertTrue(models[0]['display_name'] == 'display_name')\n self.assertTrue(models[0]['qualified_name'] == 'qualified_name')\n self.assertTrue(models[0]['description'] == 'description')\n self.assertTrue(models[0]['version'] == '1.0.0')\n\n def test_get_model_metadata_method(self):\n \"\"\"Testing get_model_metadata method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n model_metadata = model_manager.get_model_metadata(qualified_name=\n 'qualified_name')\n self.assertTrue(model_metadata['display_name'] == 'display_name')\n self.assertTrue(model_metadata['qualified_name'] == 'qualified_name')\n self.assertTrue(model_metadata['description'] == 'description')\n self.assertTrue(model_metadata['version'] == '1.0.0')\n self.assertTrue(type(model_metadata['input_schema']) is dict)\n self.assertTrue(type(model_metadata['output_schema']) is dict)\n\n def test_get_model_metadata_method_with_missing_model(self):\n \"\"\"Testing get_model_metadata method with missing model.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name\n ='asdf')\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n def test_get_model_method(self):\n \"\"\"Testing the get_model method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n model = None\n try:\n model = model_manager.get_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised = True\n self.assertFalse(exception_raised)\n self.assertTrue(type(model) is MLModelMock)\n\n def test_get_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when a model is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n model = None\n try:\n model = model_manager.get_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass ModelManagerTests(unittest.TestCase):\n\n def test_model_manager_will_return_same_instance_when_instantiated_many_times(\n self):\n \"\"\"Testing that the ModelManager will return the same instance of an MLModel class from several different\n references of ModelManager.\"\"\"\n first_model_manager = ModelManager()\n second_model_manager = ModelManager()\n first_model_manager.load_model('tests.mocks.MLModelMock')\n first_model_object = first_model_manager.get_model(qualified_name=\n 'qualified_name')\n second_model_object = second_model_manager.get_model(qualified_name\n ='qualified_name')\n self.assertTrue(str(first_model_manager) == str(second_model_manager))\n self.assertTrue(str(first_model_object) == str(second_model_object))\n\n def test_load_model_method(self):\n \"\"\"Testing the load_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n model_object = None\n try:\n model_object = model_manager.get_model(qualified_name=\n 'qualified_name')\n except Exception as e:\n exception_raised = True\n print_tb(e)\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)\n\n def test_load_model_method_with_wrong_class_path(self):\n \"\"\"Testing the load_model() method.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = None\n try:\n model_manager.load_model('sdf.sdf.sdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"No module named 'sdf'\")\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.SomeClass')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'ModelManager instance can only hold references to objects of type MLModel.'\n )\n\n def test_model_manager_does_not_allow_duplicate_qualified_names(self):\n \"\"\"Testing that the ModelManager does not allow duplicate qualified names in the singleton.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.MLModelMock')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'A model with the same qualified name is already in the ModelManager singleton.'\n )\n\n def test_remove_model_method(self):\n \"\"\"Testing the remove_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised1 = False\n try:\n model_manager.remove_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised1 = True\n exception_raised2 = False\n exception_message2 = ''\n try:\n model = model_manager.get_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised2 = True\n exception_message2 = str(e)\n self.assertFalse(exception_raised1)\n self.assertTrue(exception_raised2)\n self.assertTrue(exception_message2 ==\n \"Instance of model 'qualified_name' not found in ModelManager.\")\n\n def test_remove_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when removing a model that is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.remove_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n def test_get_models_method(self):\n \"\"\"Testing get_models method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n models = model_manager.get_models()\n self.assertTrue(models[0]['display_name'] == 'display_name')\n self.assertTrue(models[0]['qualified_name'] == 'qualified_name')\n self.assertTrue(models[0]['description'] == 'description')\n self.assertTrue(models[0]['version'] == '1.0.0')\n\n def test_get_model_metadata_method(self):\n \"\"\"Testing get_model_metadata method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n model_metadata = model_manager.get_model_metadata(qualified_name=\n 'qualified_name')\n self.assertTrue(model_metadata['display_name'] == 'display_name')\n self.assertTrue(model_metadata['qualified_name'] == 'qualified_name')\n self.assertTrue(model_metadata['description'] == 'description')\n self.assertTrue(model_metadata['version'] == '1.0.0')\n self.assertTrue(type(model_metadata['input_schema']) is dict)\n self.assertTrue(type(model_metadata['output_schema']) is dict)\n\n def test_get_model_metadata_method_with_missing_model(self):\n \"\"\"Testing get_model_metadata method with missing model.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name\n ='asdf')\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n def test_get_model_method(self):\n \"\"\"Testing the get_model method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n model = None\n try:\n model = model_manager.get_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised = True\n self.assertFalse(exception_raised)\n self.assertTrue(type(model) is MLModelMock)\n\n def test_get_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when a model is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n model = None\n try:\n model = model_manager.get_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass ModelManagerTests(unittest.TestCase):\n\n def test_model_manager_will_return_same_instance_when_instantiated_many_times(\n self):\n \"\"\"Testing that the ModelManager will return the same instance of an MLModel class from several different\n references of ModelManager.\"\"\"\n first_model_manager = ModelManager()\n second_model_manager = ModelManager()\n first_model_manager.load_model('tests.mocks.MLModelMock')\n first_model_object = first_model_manager.get_model(qualified_name=\n 'qualified_name')\n second_model_object = second_model_manager.get_model(qualified_name\n ='qualified_name')\n self.assertTrue(str(first_model_manager) == str(second_model_manager))\n self.assertTrue(str(first_model_object) == str(second_model_object))\n\n def test_load_model_method(self):\n \"\"\"Testing the load_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n model_object = None\n try:\n model_object = model_manager.get_model(qualified_name=\n 'qualified_name')\n except Exception as e:\n exception_raised = True\n print_tb(e)\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)\n\n def test_load_model_method_with_wrong_class_path(self):\n \"\"\"Testing the load_model() method.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = None\n try:\n model_manager.load_model('sdf.sdf.sdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"No module named 'sdf'\")\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.SomeClass')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'ModelManager instance can only hold references to objects of type MLModel.'\n )\n\n def test_model_manager_does_not_allow_duplicate_qualified_names(self):\n \"\"\"Testing that the ModelManager does not allow duplicate qualified names in the singleton.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.MLModelMock')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'A model with the same qualified name is already in the ModelManager singleton.'\n )\n\n def test_remove_model_method(self):\n \"\"\"Testing the remove_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised1 = False\n try:\n model_manager.remove_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised1 = True\n exception_raised2 = False\n exception_message2 = ''\n try:\n model = model_manager.get_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised2 = True\n exception_message2 = str(e)\n self.assertFalse(exception_raised1)\n self.assertTrue(exception_raised2)\n self.assertTrue(exception_message2 ==\n \"Instance of model 'qualified_name' not found in ModelManager.\")\n\n def test_remove_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when removing a model that is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.remove_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n def test_get_models_method(self):\n \"\"\"Testing get_models method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n models = model_manager.get_models()\n self.assertTrue(models[0]['display_name'] == 'display_name')\n self.assertTrue(models[0]['qualified_name'] == 'qualified_name')\n self.assertTrue(models[0]['description'] == 'description')\n self.assertTrue(models[0]['version'] == '1.0.0')\n\n def test_get_model_metadata_method(self):\n \"\"\"Testing get_model_metadata method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n model_metadata = model_manager.get_model_metadata(qualified_name=\n 'qualified_name')\n self.assertTrue(model_metadata['display_name'] == 'display_name')\n self.assertTrue(model_metadata['qualified_name'] == 'qualified_name')\n self.assertTrue(model_metadata['description'] == 'description')\n self.assertTrue(model_metadata['version'] == '1.0.0')\n self.assertTrue(type(model_metadata['input_schema']) is dict)\n self.assertTrue(type(model_metadata['output_schema']) is dict)\n\n def test_get_model_metadata_method_with_missing_model(self):\n \"\"\"Testing get_model_metadata method with missing model.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name\n ='asdf')\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n def test_get_model_method(self):\n \"\"\"Testing the get_model method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n model = None\n try:\n model = model_manager.get_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised = True\n self.assertFalse(exception_raised)\n self.assertTrue(type(model) is MLModelMock)\n\n def test_get_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when a model is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n model = None\n try:\n model = model_manager.get_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n\n<code token>\n",
"<import token>\n\n\nclass ModelManagerTests(unittest.TestCase):\n\n def test_model_manager_will_return_same_instance_when_instantiated_many_times(\n self):\n \"\"\"Testing that the ModelManager will return the same instance of an MLModel class from several different\n references of ModelManager.\"\"\"\n first_model_manager = ModelManager()\n second_model_manager = ModelManager()\n first_model_manager.load_model('tests.mocks.MLModelMock')\n first_model_object = first_model_manager.get_model(qualified_name=\n 'qualified_name')\n second_model_object = second_model_manager.get_model(qualified_name\n ='qualified_name')\n self.assertTrue(str(first_model_manager) == str(second_model_manager))\n self.assertTrue(str(first_model_object) == str(second_model_object))\n\n def test_load_model_method(self):\n \"\"\"Testing the load_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n model_object = None\n try:\n model_object = model_manager.get_model(qualified_name=\n 'qualified_name')\n except Exception as e:\n exception_raised = True\n print_tb(e)\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)\n\n def test_load_model_method_with_wrong_class_path(self):\n \"\"\"Testing the load_model() method.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = None\n try:\n model_manager.load_model('sdf.sdf.sdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"No module named 'sdf'\")\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.SomeClass')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'ModelManager instance can only hold references to objects of type MLModel.'\n )\n\n def test_model_manager_does_not_allow_duplicate_qualified_names(self):\n \"\"\"Testing that the ModelManager does not allow duplicate qualified names in the singleton.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.MLModelMock')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'A model with the same qualified name is already in the ModelManager singleton.'\n )\n\n def test_remove_model_method(self):\n \"\"\"Testing the remove_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised1 = False\n try:\n model_manager.remove_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised1 = True\n exception_raised2 = False\n exception_message2 = ''\n try:\n model = model_manager.get_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised2 = True\n exception_message2 = str(e)\n self.assertFalse(exception_raised1)\n self.assertTrue(exception_raised2)\n self.assertTrue(exception_message2 ==\n \"Instance of model 'qualified_name' not found in ModelManager.\")\n\n def test_remove_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when removing a model that is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.remove_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n def test_get_models_method(self):\n \"\"\"Testing get_models method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n models = model_manager.get_models()\n self.assertTrue(models[0]['display_name'] == 'display_name')\n self.assertTrue(models[0]['qualified_name'] == 'qualified_name')\n self.assertTrue(models[0]['description'] == 'description')\n self.assertTrue(models[0]['version'] == '1.0.0')\n\n def test_get_model_metadata_method(self):\n \"\"\"Testing get_model_metadata method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n model_metadata = model_manager.get_model_metadata(qualified_name=\n 'qualified_name')\n self.assertTrue(model_metadata['display_name'] == 'display_name')\n self.assertTrue(model_metadata['qualified_name'] == 'qualified_name')\n self.assertTrue(model_metadata['description'] == 'description')\n self.assertTrue(model_metadata['version'] == '1.0.0')\n self.assertTrue(type(model_metadata['input_schema']) is dict)\n self.assertTrue(type(model_metadata['output_schema']) is dict)\n\n def test_get_model_metadata_method_with_missing_model(self):\n \"\"\"Testing get_model_metadata method with missing model.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name\n ='asdf')\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n <function token>\n\n def test_get_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when a model is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n model = None\n try:\n model = model_manager.get_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n\n<code token>\n",
"<import token>\n\n\nclass ModelManagerTests(unittest.TestCase):\n <function token>\n\n def test_load_model_method(self):\n \"\"\"Testing the load_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n model_object = None\n try:\n model_object = model_manager.get_model(qualified_name=\n 'qualified_name')\n except Exception as e:\n exception_raised = True\n print_tb(e)\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)\n\n def test_load_model_method_with_wrong_class_path(self):\n \"\"\"Testing the load_model() method.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = None\n try:\n model_manager.load_model('sdf.sdf.sdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"No module named 'sdf'\")\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.SomeClass')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'ModelManager instance can only hold references to objects of type MLModel.'\n )\n\n def test_model_manager_does_not_allow_duplicate_qualified_names(self):\n \"\"\"Testing that the ModelManager does not allow duplicate qualified names in the singleton.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.MLModelMock')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'A model with the same qualified name is already in the ModelManager singleton.'\n )\n\n def test_remove_model_method(self):\n \"\"\"Testing the remove_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised1 = False\n try:\n model_manager.remove_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised1 = True\n exception_raised2 = False\n exception_message2 = ''\n try:\n model = model_manager.get_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised2 = True\n exception_message2 = str(e)\n self.assertFalse(exception_raised1)\n self.assertTrue(exception_raised2)\n self.assertTrue(exception_message2 ==\n \"Instance of model 'qualified_name' not found in ModelManager.\")\n\n def test_remove_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when removing a model that is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.remove_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n def test_get_models_method(self):\n \"\"\"Testing get_models method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n models = model_manager.get_models()\n self.assertTrue(models[0]['display_name'] == 'display_name')\n self.assertTrue(models[0]['qualified_name'] == 'qualified_name')\n self.assertTrue(models[0]['description'] == 'description')\n self.assertTrue(models[0]['version'] == '1.0.0')\n\n def test_get_model_metadata_method(self):\n \"\"\"Testing get_model_metadata method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n model_metadata = model_manager.get_model_metadata(qualified_name=\n 'qualified_name')\n self.assertTrue(model_metadata['display_name'] == 'display_name')\n self.assertTrue(model_metadata['qualified_name'] == 'qualified_name')\n self.assertTrue(model_metadata['description'] == 'description')\n self.assertTrue(model_metadata['version'] == '1.0.0')\n self.assertTrue(type(model_metadata['input_schema']) is dict)\n self.assertTrue(type(model_metadata['output_schema']) is dict)\n\n def test_get_model_metadata_method_with_missing_model(self):\n \"\"\"Testing get_model_metadata method with missing model.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name\n ='asdf')\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n <function token>\n\n def test_get_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when a model is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n model = None\n try:\n model = model_manager.get_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n\n<code token>\n",
"<import token>\n\n\nclass ModelManagerTests(unittest.TestCase):\n <function token>\n <function token>\n\n def test_load_model_method_with_wrong_class_path(self):\n \"\"\"Testing the load_model() method.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = None\n try:\n model_manager.load_model('sdf.sdf.sdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"No module named 'sdf'\")\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.SomeClass')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'ModelManager instance can only hold references to objects of type MLModel.'\n )\n\n def test_model_manager_does_not_allow_duplicate_qualified_names(self):\n \"\"\"Testing that the ModelManager does not allow duplicate qualified names in the singleton.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.MLModelMock')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'A model with the same qualified name is already in the ModelManager singleton.'\n )\n\n def test_remove_model_method(self):\n \"\"\"Testing the remove_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised1 = False\n try:\n model_manager.remove_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised1 = True\n exception_raised2 = False\n exception_message2 = ''\n try:\n model = model_manager.get_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised2 = True\n exception_message2 = str(e)\n self.assertFalse(exception_raised1)\n self.assertTrue(exception_raised2)\n self.assertTrue(exception_message2 ==\n \"Instance of model 'qualified_name' not found in ModelManager.\")\n\n def test_remove_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when removing a model that is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.remove_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n def test_get_models_method(self):\n \"\"\"Testing get_models method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n models = model_manager.get_models()\n self.assertTrue(models[0]['display_name'] == 'display_name')\n self.assertTrue(models[0]['qualified_name'] == 'qualified_name')\n self.assertTrue(models[0]['description'] == 'description')\n self.assertTrue(models[0]['version'] == '1.0.0')\n\n def test_get_model_metadata_method(self):\n \"\"\"Testing get_model_metadata method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n model_metadata = model_manager.get_model_metadata(qualified_name=\n 'qualified_name')\n self.assertTrue(model_metadata['display_name'] == 'display_name')\n self.assertTrue(model_metadata['qualified_name'] == 'qualified_name')\n self.assertTrue(model_metadata['description'] == 'description')\n self.assertTrue(model_metadata['version'] == '1.0.0')\n self.assertTrue(type(model_metadata['input_schema']) is dict)\n self.assertTrue(type(model_metadata['output_schema']) is dict)\n\n def test_get_model_metadata_method_with_missing_model(self):\n \"\"\"Testing get_model_metadata method with missing model.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name\n ='asdf')\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n <function token>\n\n def test_get_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when a model is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n model = None\n try:\n model = model_manager.get_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n\n<code token>\n",
"<import token>\n\n\nclass ModelManagerTests(unittest.TestCase):\n <function token>\n <function token>\n\n def test_load_model_method_with_wrong_class_path(self):\n \"\"\"Testing the load_model() method.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = None\n try:\n model_manager.load_model('sdf.sdf.sdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"No module named 'sdf'\")\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.SomeClass')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'ModelManager instance can only hold references to objects of type MLModel.'\n )\n\n def test_model_manager_does_not_allow_duplicate_qualified_names(self):\n \"\"\"Testing that the ModelManager does not allow duplicate qualified names in the singleton.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.MLModelMock')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'A model with the same qualified name is already in the ModelManager singleton.'\n )\n\n def test_remove_model_method(self):\n \"\"\"Testing the remove_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised1 = False\n try:\n model_manager.remove_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised1 = True\n exception_raised2 = False\n exception_message2 = ''\n try:\n model = model_manager.get_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised2 = True\n exception_message2 = str(e)\n self.assertFalse(exception_raised1)\n self.assertTrue(exception_raised2)\n self.assertTrue(exception_message2 ==\n \"Instance of model 'qualified_name' not found in ModelManager.\")\n\n def test_remove_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when removing a model that is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.remove_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n def test_get_models_method(self):\n \"\"\"Testing get_models method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n models = model_manager.get_models()\n self.assertTrue(models[0]['display_name'] == 'display_name')\n self.assertTrue(models[0]['qualified_name'] == 'qualified_name')\n self.assertTrue(models[0]['description'] == 'description')\n self.assertTrue(models[0]['version'] == '1.0.0')\n <function token>\n\n def test_get_model_metadata_method_with_missing_model(self):\n \"\"\"Testing get_model_metadata method with missing model.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name\n ='asdf')\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n <function token>\n\n def test_get_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when a model is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n model = None\n try:\n model = model_manager.get_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n\n<code token>\n",
"<import token>\n\n\nclass ModelManagerTests(unittest.TestCase):\n <function token>\n <function token>\n\n def test_load_model_method_with_wrong_class_path(self):\n \"\"\"Testing the load_model() method.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = None\n try:\n model_manager.load_model('sdf.sdf.sdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"No module named 'sdf'\")\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.SomeClass')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'ModelManager instance can only hold references to objects of type MLModel.'\n )\n\n def test_model_manager_does_not_allow_duplicate_qualified_names(self):\n \"\"\"Testing that the ModelManager does not allow duplicate qualified names in the singleton.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.MLModelMock')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'A model with the same qualified name is already in the ModelManager singleton.'\n )\n\n def test_remove_model_method(self):\n \"\"\"Testing the remove_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised1 = False\n try:\n model_manager.remove_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised1 = True\n exception_raised2 = False\n exception_message2 = ''\n try:\n model = model_manager.get_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised2 = True\n exception_message2 = str(e)\n self.assertFalse(exception_raised1)\n self.assertTrue(exception_raised2)\n self.assertTrue(exception_message2 ==\n \"Instance of model 'qualified_name' not found in ModelManager.\")\n\n def test_remove_model_method_with_missing_model(self):\n \"\"\"Testing that the ModelManager raises ValueError exception when removing a model that is not found.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.remove_model(qualified_name='asdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n\n def test_get_models_method(self):\n \"\"\"Testing get_models method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n models = model_manager.get_models()\n self.assertTrue(models[0]['display_name'] == 'display_name')\n self.assertTrue(models[0]['qualified_name'] == 'qualified_name')\n self.assertTrue(models[0]['description'] == 'description')\n self.assertTrue(models[0]['version'] == '1.0.0')\n <function token>\n\n def test_get_model_metadata_method_with_missing_model(self):\n \"\"\"Testing get_model_metadata method with missing model.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name\n ='asdf')\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass ModelManagerTests(unittest.TestCase):\n <function token>\n <function token>\n\n def test_load_model_method_with_wrong_class_path(self):\n \"\"\"Testing the load_model() method.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = None\n try:\n model_manager.load_model('sdf.sdf.sdf')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"No module named 'sdf'\")\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.SomeClass')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'ModelManager instance can only hold references to objects of type MLModel.'\n )\n\n def test_model_manager_does_not_allow_duplicate_qualified_names(self):\n \"\"\"Testing that the ModelManager does not allow duplicate qualified names in the singleton.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.MLModelMock')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'A model with the same qualified name is already in the ModelManager singleton.'\n )\n\n def test_remove_model_method(self):\n \"\"\"Testing the remove_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised1 = False\n try:\n model_manager.remove_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised1 = True\n exception_raised2 = False\n exception_message2 = ''\n try:\n model = model_manager.get_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised2 = True\n exception_message2 = str(e)\n self.assertFalse(exception_raised1)\n self.assertTrue(exception_raised2)\n self.assertTrue(exception_message2 ==\n \"Instance of model 'qualified_name' not found in ModelManager.\")\n <function token>\n\n def test_get_models_method(self):\n \"\"\"Testing get_models method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n models = model_manager.get_models()\n self.assertTrue(models[0]['display_name'] == 'display_name')\n self.assertTrue(models[0]['qualified_name'] == 'qualified_name')\n self.assertTrue(models[0]['description'] == 'description')\n self.assertTrue(models[0]['version'] == '1.0.0')\n <function token>\n\n def test_get_model_metadata_method_with_missing_model(self):\n \"\"\"Testing get_model_metadata method with missing model.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name\n ='asdf')\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass ModelManagerTests(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.SomeClass')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'ModelManager instance can only hold references to objects of type MLModel.'\n )\n\n def test_model_manager_does_not_allow_duplicate_qualified_names(self):\n \"\"\"Testing that the ModelManager does not allow duplicate qualified names in the singleton.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.MLModelMock')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'A model with the same qualified name is already in the ModelManager singleton.'\n )\n\n def test_remove_model_method(self):\n \"\"\"Testing the remove_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised1 = False\n try:\n model_manager.remove_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised1 = True\n exception_raised2 = False\n exception_message2 = ''\n try:\n model = model_manager.get_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised2 = True\n exception_message2 = str(e)\n self.assertFalse(exception_raised1)\n self.assertTrue(exception_raised2)\n self.assertTrue(exception_message2 ==\n \"Instance of model 'qualified_name' not found in ModelManager.\")\n <function token>\n\n def test_get_models_method(self):\n \"\"\"Testing get_models method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n models = model_manager.get_models()\n self.assertTrue(models[0]['display_name'] == 'display_name')\n self.assertTrue(models[0]['qualified_name'] == 'qualified_name')\n self.assertTrue(models[0]['description'] == 'description')\n self.assertTrue(models[0]['version'] == '1.0.0')\n <function token>\n\n def test_get_model_metadata_method_with_missing_model(self):\n \"\"\"Testing get_model_metadata method with missing model.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name\n ='asdf')\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass ModelManagerTests(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.SomeClass')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'ModelManager instance can only hold references to objects of type MLModel.'\n )\n <function token>\n\n def test_remove_model_method(self):\n \"\"\"Testing the remove_model() method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n exception_raised1 = False\n try:\n model_manager.remove_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised1 = True\n exception_raised2 = False\n exception_message2 = ''\n try:\n model = model_manager.get_model(qualified_name='qualified_name')\n except Exception as e:\n exception_raised2 = True\n exception_message2 = str(e)\n self.assertFalse(exception_raised1)\n self.assertTrue(exception_raised2)\n self.assertTrue(exception_message2 ==\n \"Instance of model 'qualified_name' not found in ModelManager.\")\n <function token>\n\n def test_get_models_method(self):\n \"\"\"Testing get_models method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n models = model_manager.get_models()\n self.assertTrue(models[0]['display_name'] == 'display_name')\n self.assertTrue(models[0]['qualified_name'] == 'qualified_name')\n self.assertTrue(models[0]['description'] == 'description')\n self.assertTrue(models[0]['version'] == '1.0.0')\n <function token>\n\n def test_get_model_metadata_method_with_missing_model(self):\n \"\"\"Testing get_model_metadata method with missing model.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name\n ='asdf')\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass ModelManagerTests(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.SomeClass')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'ModelManager instance can only hold references to objects of type MLModel.'\n )\n <function token>\n <function token>\n <function token>\n\n def test_get_models_method(self):\n \"\"\"Testing get_models method.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n models = model_manager.get_models()\n self.assertTrue(models[0]['display_name'] == 'display_name')\n self.assertTrue(models[0]['qualified_name'] == 'qualified_name')\n self.assertTrue(models[0]['description'] == 'description')\n self.assertTrue(models[0]['version'] == '1.0.0')\n <function token>\n\n def test_get_model_metadata_method_with_missing_model(self):\n \"\"\"Testing get_model_metadata method with missing model.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name\n ='asdf')\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass ModelManagerTests(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.SomeClass')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'ModelManager instance can only hold references to objects of type MLModel.'\n )\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_get_model_metadata_method_with_missing_model(self):\n \"\"\"Testing get_model_metadata method with missing model.\"\"\"\n model_manager = ModelManager()\n model_manager.load_model('tests.mocks.MLModelMock')\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name\n ='asdf')\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message ==\n \"Instance of model 'asdf' not found in ModelManager.\")\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass ModelManagerTests(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n def test_only_ml_model_instances_allowed_to_be_stored(self):\n \"\"\"Testing that the ModelManager only allows MLModel objects to be stored.\"\"\"\n model_manager = ModelManager()\n exception_raised = False\n exception_message = ''\n try:\n model_manager.load_model('tests.mocks.SomeClass')\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message ==\n 'ModelManager instance can only hold references to objects of type MLModel.'\n )\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass ModelManagerTests(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
1,064 |
8ec18e259af1123fad7563aee3a363e095e30e8e
|
from django.db import models
from albums.models import Albums
class Song(models.Model):
name = models.CharField(max_length=255)
filename = models.FileField(upload_to='canciones/')
album = models.ForeignKey(Albums)
def __unicode__(self,):
return self.name
|
[
"from django.db import models\n\nfrom albums.models import Albums\n\nclass Song(models.Model):\n name = models.CharField(max_length=255)\n filename = models.FileField(upload_to='canciones/')\n album = models.ForeignKey(Albums)\n\n def __unicode__(self,):\n return self.name\n",
"from django.db import models\nfrom albums.models import Albums\n\n\nclass Song(models.Model):\n name = models.CharField(max_length=255)\n filename = models.FileField(upload_to='canciones/')\n album = models.ForeignKey(Albums)\n\n def __unicode__(self):\n return self.name\n",
"<import token>\n\n\nclass Song(models.Model):\n name = models.CharField(max_length=255)\n filename = models.FileField(upload_to='canciones/')\n album = models.ForeignKey(Albums)\n\n def __unicode__(self):\n return self.name\n",
"<import token>\n\n\nclass Song(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __unicode__(self):\n return self.name\n",
"<import token>\n\n\nclass Song(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
1,065 |
f7d3096d669946e13186a893ffc53067e0fd0a0a
|
# -*- coding: utf-8 -*-
"""Digital Forensics Virtual File System (dfVFS).
dfVFS, or Digital Forensics Virtual File System, is a Python module
that provides read-only access to file-system objects from various
storage media types and file formats.
"""
|
[
"# -*- coding: utf-8 -*-\n\"\"\"Digital Forensics Virtual File System (dfVFS).\n\ndfVFS, or Digital Forensics Virtual File System, is a Python module\nthat provides read-only access to file-system objects from various\nstorage media types and file formats.\n\"\"\"\n",
"<docstring token>\n"
] | false |
1,066 |
84980b8923fa25664833f810a906d27531145141
|
import cv2, os, fitz, shutil
import numpy as np
from PIL import Image
from pytesseract import pytesseract
from PIL import UnidentifiedImageError
pytesseract.tesseract_cmd = 'C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe'
config = r'--oem 3 --psm'
# Возвращает путь к картинке, созданной на основе 1 СТРАНИЦЫ pdf файла
# На входе требуется название pdf файла
def pdf_to_png(filename):
doc = fitz.open('pdf_files\{}'.format(filename))
zoom = 4 # zoom factor (влияет на качество получаемого из pdf изображения png)
page = doc.loadPage(0)
mat = fitz.Matrix(zoom, zoom)
pix = page.getPixmap(matrix=mat)
new_filename = filename.replace('pdf', 'png')
pix.writePNG('photo_files\{}'.format(new_filename))
return new_filename
# i в аргументах - номер итерации, чтобы вырезанных символов не пересекались
def create_learn_base(filename, language, i): # Создает папки с вырезанными распознанными символами в папке learn_data
# Открываем файлы с картинками
img_to_read = cv2.imdecode(np.fromfile('photo_files\{}'.format(filename), dtype=np.uint8),cv2.IMREAD_UNCHANGED) # МОДУЛЬ ДЛЯ ЧТЕНИЯ РУССКИХ ФАЙЛОВ #
img_to_crop = Image.open('photo_files\{}'.format(filename))
# Считываем текст с картинки в массив, если нужно - выводим
# words_in_image = pytesseract.image_to_string(img_to_read, lang=language)
# print(words_in_image)
height, width, c = img_to_read.shape
letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)
for box in letter_boxes.splitlines(): # Вырезаем по очереди квадраты с символами
# Обрабатываем ошибки, возникающие при выходе за пределы картинки при обрезке
try:
i += 1
box = box.split()
x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])
cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0, 0, 255), 1)
area = (x, height - h, w, height - y) # Задаем область, содержащую вырезаемый символ
cropped_img = img_to_crop.crop(area)
try: # Обрабатываем ошибки, возникающие при неправильных именах файлов
if not os.path.exists('learn_data\s_{}'.format(box[0])):
os.mkdir('learn_data\s_{}'.format(box[0]))
cropped_img.save('learn_data\s_{}/{}_{}.PNG'.format(box[0], box[0], i))
except OSError:
pass
except SystemError:
pass
return i
def fix_dir_bugs():
for the_dir in os.listdir('learn_data'):
for the_file in os.listdir('learn_data/'+the_dir):
try:
Image.open('learn_data/'+the_dir+'/'+the_file)
except OSError:
os.remove('learn_data/'+the_dir+'/'+the_file)
def clear_directory(directory):
shutil.rmtree(directory)
os.makedirs(directory)
clear_directory('learn_data')
for the_file in os.listdir('pdf_files'):
filename = the_file
png_filename = pdf_to_png(filename)
i = 0
for the_file in os.listdir('photo_files'):
i += create_learn_base(the_file, 'rus', i)
fix_dir_bugs()
############# РУЧНАЯ ПРОВЕРКА #############
# Image.open('renamed_learn_data/26/C_591.PNG')
# fix_dir_bugs()
# try:
# Image.open('renamed_learn_data/26/C_591.PNG')
# except OSError:
# os.remove('renamed_learn_data/26/C_591.PNG')
|
[
"import cv2, os, fitz, shutil\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom pytesseract import pytesseract\r\nfrom PIL import UnidentifiedImageError\r\n\r\npytesseract.tesseract_cmd = 'C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tesseract.exe'\r\nconfig = r'--oem 3 --psm'\r\n\r\n\r\n# Возвращает путь к картинке, созданной на основе 1 СТРАНИЦЫ pdf файла\r\n# На входе требуется название pdf файла\r\ndef pdf_to_png(filename):\r\n doc = fitz.open('pdf_files\\{}'.format(filename))\r\n zoom = 4 # zoom factor (влияет на качество получаемого из pdf изображения png)\r\n page = doc.loadPage(0)\r\n mat = fitz.Matrix(zoom, zoom)\r\n pix = page.getPixmap(matrix=mat)\r\n new_filename = filename.replace('pdf', 'png')\r\n pix.writePNG('photo_files\\{}'.format(new_filename))\r\n return new_filename\r\n\r\n\r\n# i в аргументах - номер итерации, чтобы вырезанных символов не пересекались\r\ndef create_learn_base(filename, language, i): # Создает папки с вырезанными распознанными символами в папке learn_data\r\n # Открываем файлы с картинками\r\n img_to_read = cv2.imdecode(np.fromfile('photo_files\\{}'.format(filename), dtype=np.uint8),cv2.IMREAD_UNCHANGED) # МОДУЛЬ ДЛЯ ЧТЕНИЯ РУССКИХ ФАЙЛОВ #\r\n img_to_crop = Image.open('photo_files\\{}'.format(filename))\r\n\r\n # Считываем текст с картинки в массив, если нужно - выводим\r\n # words_in_image = pytesseract.image_to_string(img_to_read, lang=language)\r\n # print(words_in_image)\r\n\r\n height, width, c = img_to_read.shape\r\n letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)\r\n\r\n for box in letter_boxes.splitlines(): # Вырезаем по очереди квадраты с символами\r\n # Обрабатываем ошибки, возникающие при выходе за пределы картинки при обрезке\r\n try:\r\n i += 1\r\n box = box.split()\r\n x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])\r\n cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0, 0, 255), 1)\r\n area = (x, height - h, w, height - y) # Задаем область, содержащую вырезаемый символ\r\n cropped_img = img_to_crop.crop(area)\r\n try: # Обрабатываем ошибки, возникающие при неправильных именах файлов\r\n if not os.path.exists('learn_data\\s_{}'.format(box[0])):\r\n os.mkdir('learn_data\\s_{}'.format(box[0]))\r\n cropped_img.save('learn_data\\s_{}/{}_{}.PNG'.format(box[0], box[0], i))\r\n except OSError:\r\n pass\r\n except SystemError:\r\n pass\r\n return i\r\n\r\n\r\ndef fix_dir_bugs():\r\n for the_dir in os.listdir('learn_data'):\r\n for the_file in os.listdir('learn_data/'+the_dir):\r\n try:\r\n Image.open('learn_data/'+the_dir+'/'+the_file)\r\n except OSError:\r\n os.remove('learn_data/'+the_dir+'/'+the_file)\r\n\r\n\r\ndef clear_directory(directory):\r\n shutil.rmtree(directory)\r\n os.makedirs(directory)\r\n\r\n\r\nclear_directory('learn_data')\r\n\r\n\r\nfor the_file in os.listdir('pdf_files'):\r\n filename = the_file\r\n png_filename = pdf_to_png(filename)\r\n\r\ni = 0\r\nfor the_file in os.listdir('photo_files'):\r\n i += create_learn_base(the_file, 'rus', i)\r\n\r\nfix_dir_bugs()\r\n############# РУЧНАЯ ПРОВЕРКА #############\r\n\r\n\r\n# Image.open('renamed_learn_data/26/C_591.PNG')\r\n# fix_dir_bugs()\r\n\r\n# try:\r\n# Image.open('renamed_learn_data/26/C_591.PNG')\r\n# except OSError:\r\n# os.remove('renamed_learn_data/26/C_591.PNG')",
"import cv2, os, fitz, shutil\nimport numpy as np\nfrom PIL import Image\nfrom pytesseract import pytesseract\nfrom PIL import UnidentifiedImageError\npytesseract.tesseract_cmd = (\n 'C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tesseract.exe')\nconfig = '--oem 3 --psm'\n\n\ndef pdf_to_png(filename):\n doc = fitz.open('pdf_files\\\\{}'.format(filename))\n zoom = 4\n page = doc.loadPage(0)\n mat = fitz.Matrix(zoom, zoom)\n pix = page.getPixmap(matrix=mat)\n new_filename = filename.replace('pdf', 'png')\n pix.writePNG('photo_files\\\\{}'.format(new_filename))\n return new_filename\n\n\ndef create_learn_base(filename, language, i):\n img_to_read = cv2.imdecode(np.fromfile('photo_files\\\\{}'.format(\n filename), dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n img_to_crop = Image.open('photo_files\\\\{}'.format(filename))\n height, width, c = img_to_read.shape\n letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)\n for box in letter_boxes.splitlines():\n try:\n i += 1\n box = box.split()\n x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])\n cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0,\n 0, 255), 1)\n area = x, height - h, w, height - y\n cropped_img = img_to_crop.crop(area)\n try:\n if not os.path.exists('learn_data\\\\s_{}'.format(box[0])):\n os.mkdir('learn_data\\\\s_{}'.format(box[0]))\n cropped_img.save('learn_data\\\\s_{}/{}_{}.PNG'.format(box[0],\n box[0], i))\n except OSError:\n pass\n except SystemError:\n pass\n return i\n\n\ndef fix_dir_bugs():\n for the_dir in os.listdir('learn_data'):\n for the_file in os.listdir('learn_data/' + the_dir):\n try:\n Image.open('learn_data/' + the_dir + '/' + the_file)\n except OSError:\n os.remove('learn_data/' + the_dir + '/' + the_file)\n\n\ndef clear_directory(directory):\n shutil.rmtree(directory)\n os.makedirs(directory)\n\n\nclear_directory('learn_data')\nfor the_file in os.listdir('pdf_files'):\n filename = the_file\n png_filename = pdf_to_png(filename)\ni = 0\nfor the_file in os.listdir('photo_files'):\n i += create_learn_base(the_file, 'rus', i)\nfix_dir_bugs()\n",
"<import token>\npytesseract.tesseract_cmd = (\n 'C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tesseract.exe')\nconfig = '--oem 3 --psm'\n\n\ndef pdf_to_png(filename):\n doc = fitz.open('pdf_files\\\\{}'.format(filename))\n zoom = 4\n page = doc.loadPage(0)\n mat = fitz.Matrix(zoom, zoom)\n pix = page.getPixmap(matrix=mat)\n new_filename = filename.replace('pdf', 'png')\n pix.writePNG('photo_files\\\\{}'.format(new_filename))\n return new_filename\n\n\ndef create_learn_base(filename, language, i):\n img_to_read = cv2.imdecode(np.fromfile('photo_files\\\\{}'.format(\n filename), dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n img_to_crop = Image.open('photo_files\\\\{}'.format(filename))\n height, width, c = img_to_read.shape\n letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)\n for box in letter_boxes.splitlines():\n try:\n i += 1\n box = box.split()\n x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])\n cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0,\n 0, 255), 1)\n area = x, height - h, w, height - y\n cropped_img = img_to_crop.crop(area)\n try:\n if not os.path.exists('learn_data\\\\s_{}'.format(box[0])):\n os.mkdir('learn_data\\\\s_{}'.format(box[0]))\n cropped_img.save('learn_data\\\\s_{}/{}_{}.PNG'.format(box[0],\n box[0], i))\n except OSError:\n pass\n except SystemError:\n pass\n return i\n\n\ndef fix_dir_bugs():\n for the_dir in os.listdir('learn_data'):\n for the_file in os.listdir('learn_data/' + the_dir):\n try:\n Image.open('learn_data/' + the_dir + '/' + the_file)\n except OSError:\n os.remove('learn_data/' + the_dir + '/' + the_file)\n\n\ndef clear_directory(directory):\n shutil.rmtree(directory)\n os.makedirs(directory)\n\n\nclear_directory('learn_data')\nfor the_file in os.listdir('pdf_files'):\n filename = the_file\n png_filename = pdf_to_png(filename)\ni = 0\nfor the_file in os.listdir('photo_files'):\n i += create_learn_base(the_file, 'rus', i)\nfix_dir_bugs()\n",
"<import token>\n<assignment token>\n\n\ndef pdf_to_png(filename):\n doc = fitz.open('pdf_files\\\\{}'.format(filename))\n zoom = 4\n page = doc.loadPage(0)\n mat = fitz.Matrix(zoom, zoom)\n pix = page.getPixmap(matrix=mat)\n new_filename = filename.replace('pdf', 'png')\n pix.writePNG('photo_files\\\\{}'.format(new_filename))\n return new_filename\n\n\ndef create_learn_base(filename, language, i):\n img_to_read = cv2.imdecode(np.fromfile('photo_files\\\\{}'.format(\n filename), dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n img_to_crop = Image.open('photo_files\\\\{}'.format(filename))\n height, width, c = img_to_read.shape\n letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)\n for box in letter_boxes.splitlines():\n try:\n i += 1\n box = box.split()\n x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])\n cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0,\n 0, 255), 1)\n area = x, height - h, w, height - y\n cropped_img = img_to_crop.crop(area)\n try:\n if not os.path.exists('learn_data\\\\s_{}'.format(box[0])):\n os.mkdir('learn_data\\\\s_{}'.format(box[0]))\n cropped_img.save('learn_data\\\\s_{}/{}_{}.PNG'.format(box[0],\n box[0], i))\n except OSError:\n pass\n except SystemError:\n pass\n return i\n\n\ndef fix_dir_bugs():\n for the_dir in os.listdir('learn_data'):\n for the_file in os.listdir('learn_data/' + the_dir):\n try:\n Image.open('learn_data/' + the_dir + '/' + the_file)\n except OSError:\n os.remove('learn_data/' + the_dir + '/' + the_file)\n\n\ndef clear_directory(directory):\n shutil.rmtree(directory)\n os.makedirs(directory)\n\n\nclear_directory('learn_data')\nfor the_file in os.listdir('pdf_files'):\n filename = the_file\n png_filename = pdf_to_png(filename)\n<assignment token>\nfor the_file in os.listdir('photo_files'):\n i += create_learn_base(the_file, 'rus', i)\nfix_dir_bugs()\n",
"<import token>\n<assignment token>\n\n\ndef pdf_to_png(filename):\n doc = fitz.open('pdf_files\\\\{}'.format(filename))\n zoom = 4\n page = doc.loadPage(0)\n mat = fitz.Matrix(zoom, zoom)\n pix = page.getPixmap(matrix=mat)\n new_filename = filename.replace('pdf', 'png')\n pix.writePNG('photo_files\\\\{}'.format(new_filename))\n return new_filename\n\n\ndef create_learn_base(filename, language, i):\n img_to_read = cv2.imdecode(np.fromfile('photo_files\\\\{}'.format(\n filename), dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n img_to_crop = Image.open('photo_files\\\\{}'.format(filename))\n height, width, c = img_to_read.shape\n letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)\n for box in letter_boxes.splitlines():\n try:\n i += 1\n box = box.split()\n x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])\n cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0,\n 0, 255), 1)\n area = x, height - h, w, height - y\n cropped_img = img_to_crop.crop(area)\n try:\n if not os.path.exists('learn_data\\\\s_{}'.format(box[0])):\n os.mkdir('learn_data\\\\s_{}'.format(box[0]))\n cropped_img.save('learn_data\\\\s_{}/{}_{}.PNG'.format(box[0],\n box[0], i))\n except OSError:\n pass\n except SystemError:\n pass\n return i\n\n\ndef fix_dir_bugs():\n for the_dir in os.listdir('learn_data'):\n for the_file in os.listdir('learn_data/' + the_dir):\n try:\n Image.open('learn_data/' + the_dir + '/' + the_file)\n except OSError:\n os.remove('learn_data/' + the_dir + '/' + the_file)\n\n\ndef clear_directory(directory):\n shutil.rmtree(directory)\n os.makedirs(directory)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef create_learn_base(filename, language, i):\n img_to_read = cv2.imdecode(np.fromfile('photo_files\\\\{}'.format(\n filename), dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n img_to_crop = Image.open('photo_files\\\\{}'.format(filename))\n height, width, c = img_to_read.shape\n letter_boxes = pytesseract.image_to_boxes(img_to_read, lang=language)\n for box in letter_boxes.splitlines():\n try:\n i += 1\n box = box.split()\n x, y, w, h = int(box[1]), int(box[2]), int(box[3]), int(box[4])\n cv2.rectangle(img_to_read, (x, height - y), (w, height - h), (0,\n 0, 255), 1)\n area = x, height - h, w, height - y\n cropped_img = img_to_crop.crop(area)\n try:\n if not os.path.exists('learn_data\\\\s_{}'.format(box[0])):\n os.mkdir('learn_data\\\\s_{}'.format(box[0]))\n cropped_img.save('learn_data\\\\s_{}/{}_{}.PNG'.format(box[0],\n box[0], i))\n except OSError:\n pass\n except SystemError:\n pass\n return i\n\n\ndef fix_dir_bugs():\n for the_dir in os.listdir('learn_data'):\n for the_file in os.listdir('learn_data/' + the_dir):\n try:\n Image.open('learn_data/' + the_dir + '/' + the_file)\n except OSError:\n os.remove('learn_data/' + the_dir + '/' + the_file)\n\n\ndef clear_directory(directory):\n shutil.rmtree(directory)\n os.makedirs(directory)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef fix_dir_bugs():\n for the_dir in os.listdir('learn_data'):\n for the_file in os.listdir('learn_data/' + the_dir):\n try:\n Image.open('learn_data/' + the_dir + '/' + the_file)\n except OSError:\n os.remove('learn_data/' + the_dir + '/' + the_file)\n\n\ndef clear_directory(directory):\n shutil.rmtree(directory)\n os.makedirs(directory)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef clear_directory(directory):\n shutil.rmtree(directory)\n os.makedirs(directory)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
1,067 |
bb208d40ce098b05594aaf9c579f64b909738d52
|
#!/usr/bin/python
import os;
import math;
# os.chdir('data/postgres/linux.env')
os.chdir('data/mysql/linux.env')
# os.chdir('data/mongo/linux.env')
col_time = 0;
col_read_ops = 1
col_read_err = 2
col_write_ops = 3
col_write_err = 4
class ColumnData:
def __init__(self, chart, title, data):
self.chart = chart;
self.title = title;
self.data = data;
self.sum = sum(self.data);
self.avg = self.sum / len(self.data);
self.sd = math.sqrt(sum([math.pow(x - self.avg, 2) for x in data]) / len(self.data));
def aggregate(self, group_size):
assert len(self.data) % group_size == 0
agg_data = [0.0 for i in range(len(self.data) / group_size)]
for i in range(len(self.data)):
agg_data[i / group_size] += self.data[i]
agg_column = ColumnData(self.chart, self.title + '_agg', [x / group_size for x in agg_data])
agg_column.sum = self.sum
agg_column.avg = self.avg
agg_column.sd = self.sd
return agg_column
class ChartData:
def __init__(self, file):
assert file.endswith('.csv')
# read meta-data:
for field in file[:-len('.csv')].split(','):
[key, value] = field.split('=');
setattr(self, key, int(value));
# read raw data:
first_line = True;
input_matrix = None
for line in open(file, 'r'):
line = line.strip();
if line <> '':
items = line.split(',')
if first_line:
input_matrix = [[title.replace("'", '')] for title in items]
first_line = False;
else:
values = [float(value) for value in items]
for i in range(len(values)):
input_matrix[i].append(values[i])
self.columns = [ColumnData(self, input_column[0], input_column[1:]) for input_column in input_matrix]
self.time_line = self.columns[0]
self.read_th = self.r_lite + self.r_heavy;
read_title = 'r%d_R%d' % (self.r_lite, self.r_heavy)
self.read_ops = self.columns[1]
self.read_ops.title = 'R_' + read_title
self.read_err = self.columns[2]
self.read_err.title = 'RE_' + read_title
self.write_th = self.w_ins + self.w_up_tiny + self.w_up_wide;
write_title = 'i%d_u%d_U%d' % (self.w_ins, self.w_up_tiny, self.w_up_wide)
self.write_ops = self.columns[3]
self.write_ops.title = 'W_' + write_title
self.write_err = self.columns[4]
self.write_err.title = 'WE_' + write_title
name_index = 0;
def draw_chart(columns, name='', notes=''):
if name == '':
global name_index;
name_index += 1;
name = 'chart_%s' % name_index
id = 'chart_' + name;
result = "";
result += """
function %s() {
var data = google.visualization.arrayToDataTable([
""" % id;
result += '[%s],\n' % ', '.join(['"' + c.title + '"' for c in columns])
for i in range(len(columns[0].data)):
result += '[%s],\n' % (', '.join([str(c.data[i]) for c in columns]))
result += """
]);
var options = {
title: '%s',
//curveType: 'function',
chartArea:{left:60,top:10,width:'65%%',height:'85%%'}
};
var chart = new google.visualization.LineChart(document.getElementById('%s'));
chart.draw(data, options);
}
""" % (name, id);
return id, result
charts = []
def draw_aggregated_chart(name, columns, read_from=0, read_to=0, write_from=0, write_to=0):
read_chart = []
for file_csv in os.listdir('.'):
if file_csv.endswith('.csv'):
items = file_csv.replace('=', '_').replace('.', '_').split('_');
read_threads = int(items[4]);
write_threads = int(items[6]);
if read_from <= read_threads <= read_to and write_from <= write_threads <= write_to:
chart = read_chart_data(file_csv);
if len(read_chart) == 0:
read_chart = [[t] for t in extract_column(chart, col_time)];
for column in columns:
column_data = extract_column(chart, column)
if sum(column_data[1:]) == 0.0:
continue;
read_chart = append_column(read_chart, column_data);
return draw_chart(read_chart, name);
def meta_column(columns, title, metric):
return ColumnData(None, title, [metric(c) for c in columns])
def render_group(time_line, group_list, meta_prefix, threads_metric):
global c
charts.append(draw_chart([time_line] + [c.write_ops for c in group_list]));
charts.append(draw_chart([time_line.aggregate(10)] + [c.write_ops.aggregate(10) for c in group_list]));
charts.append(draw_chart([
meta_column([c.write_ops for c in group_list], meta_prefix + ' Threads', threads_metric),
meta_column([c.write_ops for c in group_list], meta_prefix + ' ops avg', lambda c: c.avg),
meta_column([c.write_ops for c in group_list], meta_prefix + ' ops sd', lambda c: c.sd),
]));
if True:
chart_list = []
for file_name in os.listdir('.'):
if file_name.endswith('.csv'):
chart_list.append(ChartData(file_name));
chart_ins_list = [c for c in chart_list if c.w_ins > 0 and c.read_th==0]
chart_up_tiny_list = [c for c in chart_list if c.w_up_tiny > 0 and c.read_th==0]
chart_up_wide_list = [c for c in chart_list if c.w_up_wide > 0 and c.read_th==0]
chart_r_lite_list = [c for c in chart_list if c.r_lite > 0 and c.write_th==0]
chart_r_heavy_list = [c for c in chart_list if c.r_heavy > 0 and c.write_th==0]
time_line = chart_list[0].time_line
if len(chart_ins_list)>0:
render_group(time_line, chart_ins_list, 'Write Ins', lambda c: c.chart.write_th)
if len(chart_up_tiny_list)>0:
render_group(time_line, chart_up_tiny_list, 'Write Up Tiny', lambda c: c.chart.write_th)
if len(chart_up_wide_list)>0:
render_group(time_line, chart_up_wide_list, 'Write Up Wide', lambda c: c.chart.write_th)
with open('report-all.html', 'w') as out:
out.write("""<html>
<head>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["corechart"]});
google.setOnLoadCallback(function(){
""");
for id, renderer in charts:
out.write(" %s();\n" % id);
out.write("""
});
""");
for id, renderer in charts:
out.write(renderer);
out.write("""
</script>
</head>
<body>
""");
for id, renderer in charts:
out.write(' <div id="%s" style="width: 1200px; height: 400px;"></div>\n' % id)
out.write("""
</body>
</html>""");
|
[
"#!/usr/bin/python\n\nimport os;\nimport math;\n\n# os.chdir('data/postgres/linux.env')\nos.chdir('data/mysql/linux.env')\n# os.chdir('data/mongo/linux.env')\n\ncol_time = 0;\ncol_read_ops = 1\ncol_read_err = 2\ncol_write_ops = 3\ncol_write_err = 4\n\n\nclass ColumnData:\n def __init__(self, chart, title, data):\n self.chart = chart;\n self.title = title;\n self.data = data;\n self.sum = sum(self.data);\n self.avg = self.sum / len(self.data);\n self.sd = math.sqrt(sum([math.pow(x - self.avg, 2) for x in data]) / len(self.data));\n\n def aggregate(self, group_size):\n assert len(self.data) % group_size == 0\n agg_data = [0.0 for i in range(len(self.data) / group_size)]\n for i in range(len(self.data)):\n agg_data[i / group_size] += self.data[i]\n agg_column = ColumnData(self.chart, self.title + '_agg', [x / group_size for x in agg_data])\n agg_column.sum = self.sum\n agg_column.avg = self.avg\n agg_column.sd = self.sd\n return agg_column\n\n\nclass ChartData:\n def __init__(self, file):\n assert file.endswith('.csv')\n\n # read meta-data:\n for field in file[:-len('.csv')].split(','):\n [key, value] = field.split('=');\n setattr(self, key, int(value));\n\n # read raw data:\n first_line = True;\n input_matrix = None\n for line in open(file, 'r'):\n line = line.strip();\n if line <> '':\n items = line.split(',')\n if first_line:\n input_matrix = [[title.replace(\"'\", '')] for title in items]\n first_line = False;\n else:\n values = [float(value) for value in items]\n for i in range(len(values)):\n input_matrix[i].append(values[i])\n self.columns = [ColumnData(self, input_column[0], input_column[1:]) for input_column in input_matrix]\n\n self.time_line = self.columns[0]\n\n self.read_th = self.r_lite + self.r_heavy;\n read_title = 'r%d_R%d' % (self.r_lite, self.r_heavy)\n self.read_ops = self.columns[1]\n self.read_ops.title = 'R_' + read_title\n self.read_err = self.columns[2]\n self.read_err.title = 'RE_' + read_title\n\n self.write_th = self.w_ins + self.w_up_tiny + self.w_up_wide;\n write_title = 'i%d_u%d_U%d' % (self.w_ins, self.w_up_tiny, self.w_up_wide)\n self.write_ops = self.columns[3]\n self.write_ops.title = 'W_' + write_title\n self.write_err = self.columns[4]\n self.write_err.title = 'WE_' + write_title\n\n\nname_index = 0;\n\n\ndef draw_chart(columns, name='', notes=''):\n if name == '':\n global name_index;\n name_index += 1;\n name = 'chart_%s' % name_index\n id = 'chart_' + name;\n result = \"\";\n result += \"\"\"\n function %s() {\n var data = google.visualization.arrayToDataTable([\n \"\"\" % id;\n result += '[%s],\\n' % ', '.join(['\"' + c.title + '\"' for c in columns])\n for i in range(len(columns[0].data)):\n result += '[%s],\\n' % (', '.join([str(c.data[i]) for c in columns]))\n\n result += \"\"\"\n ]);\n\n var options = {\n title: '%s',\n //curveType: 'function',\n chartArea:{left:60,top:10,width:'65%%',height:'85%%'}\n };\n\n var chart = new google.visualization.LineChart(document.getElementById('%s'));\n chart.draw(data, options);\n }\n \"\"\" % (name, id);\n return id, result\n\n\ncharts = []\n\n\ndef draw_aggregated_chart(name, columns, read_from=0, read_to=0, write_from=0, write_to=0):\n read_chart = []\n for file_csv in os.listdir('.'):\n if file_csv.endswith('.csv'):\n items = file_csv.replace('=', '_').replace('.', '_').split('_');\n read_threads = int(items[4]);\n write_threads = int(items[6]);\n if read_from <= read_threads <= read_to and write_from <= write_threads <= write_to:\n chart = read_chart_data(file_csv);\n if len(read_chart) == 0:\n read_chart = [[t] for t in extract_column(chart, col_time)];\n for column in columns:\n column_data = extract_column(chart, column)\n if sum(column_data[1:]) == 0.0:\n continue;\n read_chart = append_column(read_chart, column_data);\n return draw_chart(read_chart, name);\n\n\ndef meta_column(columns, title, metric):\n return ColumnData(None, title, [metric(c) for c in columns])\n\ndef render_group(time_line, group_list, meta_prefix, threads_metric):\n global c\n charts.append(draw_chart([time_line] + [c.write_ops for c in group_list]));\n charts.append(draw_chart([time_line.aggregate(10)] + [c.write_ops.aggregate(10) for c in group_list]));\n charts.append(draw_chart([\n meta_column([c.write_ops for c in group_list], meta_prefix + ' Threads', threads_metric),\n meta_column([c.write_ops for c in group_list], meta_prefix + ' ops avg', lambda c: c.avg),\n meta_column([c.write_ops for c in group_list], meta_prefix + ' ops sd', lambda c: c.sd),\n ]));\n\n\nif True:\n chart_list = []\n for file_name in os.listdir('.'):\n if file_name.endswith('.csv'):\n chart_list.append(ChartData(file_name));\n\n chart_ins_list = [c for c in chart_list if c.w_ins > 0 and c.read_th==0]\n chart_up_tiny_list = [c for c in chart_list if c.w_up_tiny > 0 and c.read_th==0]\n chart_up_wide_list = [c for c in chart_list if c.w_up_wide > 0 and c.read_th==0]\n chart_r_lite_list = [c for c in chart_list if c.r_lite > 0 and c.write_th==0]\n chart_r_heavy_list = [c for c in chart_list if c.r_heavy > 0 and c.write_th==0]\n time_line = chart_list[0].time_line\n\n if len(chart_ins_list)>0:\n render_group(time_line, chart_ins_list, 'Write Ins', lambda c: c.chart.write_th)\n if len(chart_up_tiny_list)>0:\n render_group(time_line, chart_up_tiny_list, 'Write Up Tiny', lambda c: c.chart.write_th)\n if len(chart_up_wide_list)>0:\n render_group(time_line, chart_up_wide_list, 'Write Up Wide', lambda c: c.chart.write_th)\n\nwith open('report-all.html', 'w') as out:\n out.write(\"\"\"<html>\n <head>\n <script type=\"text/javascript\" src=\"https://www.google.com/jsapi\"></script>\n <script type=\"text/javascript\">\n google.load(\"visualization\", \"1\", {packages:[\"corechart\"]});\n google.setOnLoadCallback(function(){\n \"\"\");\n for id, renderer in charts:\n out.write(\" %s();\\n\" % id);\n out.write(\"\"\" \n });\n \"\"\");\n for id, renderer in charts:\n out.write(renderer);\n\n out.write(\"\"\"\n </script>\n </head>\n <body>\n \"\"\");\n\n for id, renderer in charts:\n out.write(' <div id=\"%s\" style=\"width: 1200px; height: 400px;\"></div>\\n' % id)\n\n out.write(\"\"\"\n </body>\n</html>\"\"\");\n"
] | true |
1,068 |
84515ef6879b54b333f9afd48c6c4b7c43ff6957
|
class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
t = triangle
if len(t) == 1:
return t[0][0]
ret = [0] * len(t)
ret[0] = t[0][0]
for i in range(1, len(t)):
for j in range(0, i + 1):
if j == 0:
old_v = ret[j]
ret[j] += t[i][j]
elif j == i:
ret[j] = old_v + t[i][j]
else:
val = min(old_v + t[i][j], ret[j] + t[i][j])
old_v = ret[j]
ret[j] = val
return min(ret)
|
[
"class Solution(object):\n\n def minimumTotal(self, triangle):\n \"\"\"\n :type triangle: List[List[int]]\n :rtype: int\n \"\"\"\n t = triangle\n if len(t) == 1:\n return t[0][0]\n ret = [0] * len(t)\n ret[0] = t[0][0]\n for i in range(1, len(t)):\n for j in range(0, i + 1):\n if j == 0:\n old_v = ret[j]\n ret[j] += t[i][j]\n elif j == i:\n ret[j] = old_v + t[i][j]\n else:\n val = min(old_v + t[i][j], ret[j] + t[i][j])\n old_v = ret[j]\n ret[j] = val\n return min(ret)\n",
"class Solution(object):\n <function token>\n",
"<class token>\n"
] | false |
1,069 |
1bbadf02c4b9ca22a0099bcc09fa4c62c9901c39
|
from django.conf import settings
from django.db import models
def get_image_filename(instance, filename):
a = f'post_images/{instance.post.title}.svg'
return a
def get_main_image_filename(instance, filename):
a = f'post_images/{instance.title}_main.svg'
return a
# Create your models here.
class Posts(models.Model):
PYEONG_CHOICE_FIELD = (
('1-7', '1-7평'),
('8-15', '8-15평'),
('16-25', '16-25평'),
('26-', '그 이상'),
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
title = models.TextField(
'제목', max_length=50
)
content = models.TextField(
'작성 글', max_length=500
)
main_image = models.ImageField(
upload_to=get_main_image_filename,
blank=True,
null=True,
verbose_name='메인 이미지',
)
pyeong = models.ManyToManyField(
'Pyeong',
blank=True,
)
created_at = models.DateTimeField(
'생성 날짜', auto_now_add=True,
)
updated_at = models.DateTimeField(
verbose_name='수정 날짜', auto_now=True, null=True, blank=True
)
like_users = models.ManyToManyField(
'members.Users',
through='PostLike',
related_name='like_posts',
related_query_name='like_post',
blank=True,
)
colors = models.ManyToManyField(
'posts.Colors',
blank=True,
)
housingtype = models.ManyToManyField(
'HousingTypes',
blank=True,
)
style = models.ManyToManyField(
'Styles',
blank=True,
)
postPyeong = models.CharField(max_length=10, choices=PYEONG_CHOICE_FIELD)
@staticmethod
def initial_setting():
Pyeong.make_pyeng()
Colors.make_color()
HousingTypes.make_housing_type()
Styles.make_style()
class Meta:
verbose_name = '게시글'
verbose_name_plural = '게시글 목록'
def __str__(self):
return '%s : %s' % (self.pk, self.title)
class Comments(models.Model):
post = models.ForeignKey(
Posts,
on_delete=models.CASCADE,
verbose_name='포스트',
related_name='comment_set',
related_query_name='comments',
)
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
content = models.TextField(
'댓글 내용', max_length=500
)
# 글쓴이
created_at = models.DateTimeField(
'작성 날', auto_now_add=True,
)
updated_at = models.DateTimeField(
'수정 날짜', auto_now=True,
)
class Meta:
verbose_name = '댓글'
verbose_name_plural = '댓글 목록'
def save(self, *args, **kwargs):
# 여기서 이미지 처리를 하게 될 듯
super().save(*args, **kwargs)
class PostLike(models.Model):
post = models.ForeignKey(
Posts,
on_delete=models.CASCADE,
)
user = models.ForeignKey(
'members.Users',
on_delete=models.CASCADE,
)
created_at = models.DateTimeField(
auto_now_add=True,
)
def __str__(self):
return 'Post[{post_pk}] Like (User: {username})'.format(
post_pk=self.post.pk,
username=self.user.username,
)
class Meta:
verbose_name = '게시글 좋아요'
verbose_name_plural = f'{verbose_name} 목록'
# 특정 유저가 특정 포스트 좋아요를 누른 정보는 유니크 해야 함.
unique_together = (
('post', 'user'),
)
class Pyeong(models.Model):
type = models.CharField(
'평 수',
max_length=20,
)
@staticmethod
def make_pyeng():
index_list = ['1-7', '8-15', '16-25', '그 이상']
for i in range((len(index_list))):
Pyeong.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class HousingTypes(models.Model):
type = models.CharField(
'주거 환경',
max_length=20,
)
@staticmethod
def make_housing_type():
index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']
for i in range(len(index_list)):
HousingTypes.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Styles(models.Model):
type = models.CharField(
'디자인 스타일',
max_length=10,
)
@staticmethod
def make_style():
index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스', '로맨틱', '클래식', '엔틱']
for i in range(len(index_list)):
Styles.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Colors(models.Model):
type = models.CharField(
'색상',
max_length=10
)
@staticmethod
def make_color():
index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색', '회색']
for i in range(len(index_list)):
Colors.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class PostImages(models.Model):
post = models.ForeignKey(
Posts,
on_delete=models.CASCADE,
)
image = models.ImageField(
upload_to=get_image_filename,
verbose_name='다중 이미지',
)
image_comment = models.TextField(
'사진 설명', max_length=200, blank=True, null=True,
)
# 이미지 추가 스택오버플로우 정보
# https://stackoverflow.com/questions/34006994/how-to-upload-multiple-images-to-a-blog-post-in-django
|
[
"from django.conf import settings\nfrom django.db import models\n\n\ndef get_image_filename(instance, filename):\n a = f'post_images/{instance.post.title}.svg'\n return a\n\n\ndef get_main_image_filename(instance, filename):\n a = f'post_images/{instance.title}_main.svg'\n return a\n\n\n# Create your models here.\nclass Posts(models.Model):\n PYEONG_CHOICE_FIELD = (\n ('1-7', '1-7평'),\n ('8-15', '8-15평'),\n ('16-25', '16-25평'),\n ('26-', '그 이상'),\n )\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n )\n title = models.TextField(\n '제목', max_length=50\n )\n content = models.TextField(\n '작성 글', max_length=500\n )\n main_image = models.ImageField(\n upload_to=get_main_image_filename,\n blank=True,\n null=True,\n verbose_name='메인 이미지',\n )\n pyeong = models.ManyToManyField(\n 'Pyeong',\n blank=True,\n )\n created_at = models.DateTimeField(\n '생성 날짜', auto_now_add=True,\n )\n updated_at = models.DateTimeField(\n verbose_name='수정 날짜', auto_now=True, null=True, blank=True\n )\n\n like_users = models.ManyToManyField(\n 'members.Users',\n through='PostLike',\n related_name='like_posts',\n related_query_name='like_post',\n blank=True,\n )\n\n colors = models.ManyToManyField(\n 'posts.Colors',\n blank=True,\n )\n housingtype = models.ManyToManyField(\n 'HousingTypes',\n blank=True,\n )\n style = models.ManyToManyField(\n 'Styles',\n blank=True,\n )\n postPyeong = models.CharField(max_length=10, choices=PYEONG_CHOICE_FIELD)\n\n @staticmethod\n def initial_setting():\n Pyeong.make_pyeng()\n Colors.make_color()\n HousingTypes.make_housing_type()\n Styles.make_style()\n\n class Meta:\n verbose_name = '게시글'\n verbose_name_plural = '게시글 목록'\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.title)\n\n\nclass Comments(models.Model):\n post = models.ForeignKey(\n Posts,\n on_delete=models.CASCADE,\n verbose_name='포스트',\n related_name='comment_set',\n related_query_name='comments',\n )\n author = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n )\n content = models.TextField(\n '댓글 내용', max_length=500\n )\n # 글쓴이\n created_at = models.DateTimeField(\n '작성 날', auto_now_add=True,\n )\n updated_at = models.DateTimeField(\n '수정 날짜', auto_now=True,\n )\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n # 여기서 이미지 처리를 하게 될 듯\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(\n Posts,\n on_delete=models.CASCADE,\n )\n user = models.ForeignKey(\n 'members.Users',\n on_delete=models.CASCADE,\n )\n created_at = models.DateTimeField(\n auto_now_add=True,\n )\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(\n post_pk=self.post.pk,\n username=self.user.username,\n )\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n # 특정 유저가 특정 포스트 좋아요를 누른 정보는 유니크 해야 함.\n unique_together = (\n ('post', 'user'),\n )\n\n\nclass Pyeong(models.Model):\n type = models.CharField(\n '평 수',\n max_length=20,\n )\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range((len(index_list))):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField(\n '주거 환경',\n max_length=20,\n )\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField(\n '디자인 스타일',\n max_length=10,\n )\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스', '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField(\n '색상',\n max_length=10\n )\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색', '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(\n Posts,\n on_delete=models.CASCADE,\n )\n image = models.ImageField(\n upload_to=get_image_filename,\n verbose_name='다중 이미지',\n )\n image_comment = models.TextField(\n '사진 설명', max_length=200, blank=True, null=True,\n )\n # 이미지 추가 스택오버플로우 정보\n # https://stackoverflow.com/questions/34006994/how-to-upload-multiple-images-to-a-blog-post-in-django\n",
"from django.conf import settings\nfrom django.db import models\n\n\ndef get_image_filename(instance, filename):\n a = f'post_images/{instance.post.title}.svg'\n return a\n\n\ndef get_main_image_filename(instance, filename):\n a = f'post_images/{instance.title}_main.svg'\n return a\n\n\nclass Posts(models.Model):\n PYEONG_CHOICE_FIELD = ('1-7', '1-7평'), ('8-15', '8-15평'), ('16-25',\n '16-25평'), ('26-', '그 이상')\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE\n )\n title = models.TextField('제목', max_length=50)\n content = models.TextField('작성 글', max_length=500)\n main_image = models.ImageField(upload_to=get_main_image_filename, blank\n =True, null=True, verbose_name='메인 이미지')\n pyeong = models.ManyToManyField('Pyeong', blank=True)\n created_at = models.DateTimeField('생성 날짜', auto_now_add=True)\n updated_at = models.DateTimeField(verbose_name='수정 날짜', auto_now=True,\n null=True, blank=True)\n like_users = models.ManyToManyField('members.Users', through='PostLike',\n related_name='like_posts', related_query_name='like_post', blank=True)\n colors = models.ManyToManyField('posts.Colors', blank=True)\n housingtype = models.ManyToManyField('HousingTypes', blank=True)\n style = models.ManyToManyField('Styles', blank=True)\n postPyeong = models.CharField(max_length=10, choices=PYEONG_CHOICE_FIELD)\n\n @staticmethod\n def initial_setting():\n Pyeong.make_pyeng()\n Colors.make_color()\n HousingTypes.make_housing_type()\n Styles.make_style()\n\n\n class Meta:\n verbose_name = '게시글'\n verbose_name_plural = '게시글 목록'\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.title)\n\n\nclass Comments(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE, verbose_name=\n '포스트', related_name='comment_set', related_query_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE)\n content = models.TextField('댓글 내용', max_length=500)\n created_at = models.DateTimeField('작성 날', auto_now_add=True)\n updated_at = models.DateTimeField('수정 날짜', auto_now=True)\n\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n\n\ndef get_image_filename(instance, filename):\n a = f'post_images/{instance.post.title}.svg'\n return a\n\n\ndef get_main_image_filename(instance, filename):\n a = f'post_images/{instance.title}_main.svg'\n return a\n\n\nclass Posts(models.Model):\n PYEONG_CHOICE_FIELD = ('1-7', '1-7평'), ('8-15', '8-15평'), ('16-25',\n '16-25평'), ('26-', '그 이상')\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE\n )\n title = models.TextField('제목', max_length=50)\n content = models.TextField('작성 글', max_length=500)\n main_image = models.ImageField(upload_to=get_main_image_filename, blank\n =True, null=True, verbose_name='메인 이미지')\n pyeong = models.ManyToManyField('Pyeong', blank=True)\n created_at = models.DateTimeField('생성 날짜', auto_now_add=True)\n updated_at = models.DateTimeField(verbose_name='수정 날짜', auto_now=True,\n null=True, blank=True)\n like_users = models.ManyToManyField('members.Users', through='PostLike',\n related_name='like_posts', related_query_name='like_post', blank=True)\n colors = models.ManyToManyField('posts.Colors', blank=True)\n housingtype = models.ManyToManyField('HousingTypes', blank=True)\n style = models.ManyToManyField('Styles', blank=True)\n postPyeong = models.CharField(max_length=10, choices=PYEONG_CHOICE_FIELD)\n\n @staticmethod\n def initial_setting():\n Pyeong.make_pyeng()\n Colors.make_color()\n HousingTypes.make_housing_type()\n Styles.make_style()\n\n\n class Meta:\n verbose_name = '게시글'\n verbose_name_plural = '게시글 목록'\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.title)\n\n\nclass Comments(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE, verbose_name=\n '포스트', related_name='comment_set', related_query_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE)\n content = models.TextField('댓글 내용', max_length=500)\n created_at = models.DateTimeField('작성 날', auto_now_add=True)\n updated_at = models.DateTimeField('수정 날짜', auto_now=True)\n\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n\n\ndef get_image_filename(instance, filename):\n a = f'post_images/{instance.post.title}.svg'\n return a\n\n\n<function token>\n\n\nclass Posts(models.Model):\n PYEONG_CHOICE_FIELD = ('1-7', '1-7평'), ('8-15', '8-15평'), ('16-25',\n '16-25평'), ('26-', '그 이상')\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE\n )\n title = models.TextField('제목', max_length=50)\n content = models.TextField('작성 글', max_length=500)\n main_image = models.ImageField(upload_to=get_main_image_filename, blank\n =True, null=True, verbose_name='메인 이미지')\n pyeong = models.ManyToManyField('Pyeong', blank=True)\n created_at = models.DateTimeField('생성 날짜', auto_now_add=True)\n updated_at = models.DateTimeField(verbose_name='수정 날짜', auto_now=True,\n null=True, blank=True)\n like_users = models.ManyToManyField('members.Users', through='PostLike',\n related_name='like_posts', related_query_name='like_post', blank=True)\n colors = models.ManyToManyField('posts.Colors', blank=True)\n housingtype = models.ManyToManyField('HousingTypes', blank=True)\n style = models.ManyToManyField('Styles', blank=True)\n postPyeong = models.CharField(max_length=10, choices=PYEONG_CHOICE_FIELD)\n\n @staticmethod\n def initial_setting():\n Pyeong.make_pyeng()\n Colors.make_color()\n HousingTypes.make_housing_type()\n Styles.make_style()\n\n\n class Meta:\n verbose_name = '게시글'\n verbose_name_plural = '게시글 목록'\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.title)\n\n\nclass Comments(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE, verbose_name=\n '포스트', related_name='comment_set', related_query_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE)\n content = models.TextField('댓글 내용', max_length=500)\n created_at = models.DateTimeField('작성 날', auto_now_add=True)\n updated_at = models.DateTimeField('수정 날짜', auto_now=True)\n\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n\n\nclass Posts(models.Model):\n PYEONG_CHOICE_FIELD = ('1-7', '1-7평'), ('8-15', '8-15평'), ('16-25',\n '16-25평'), ('26-', '그 이상')\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE\n )\n title = models.TextField('제목', max_length=50)\n content = models.TextField('작성 글', max_length=500)\n main_image = models.ImageField(upload_to=get_main_image_filename, blank\n =True, null=True, verbose_name='메인 이미지')\n pyeong = models.ManyToManyField('Pyeong', blank=True)\n created_at = models.DateTimeField('생성 날짜', auto_now_add=True)\n updated_at = models.DateTimeField(verbose_name='수정 날짜', auto_now=True,\n null=True, blank=True)\n like_users = models.ManyToManyField('members.Users', through='PostLike',\n related_name='like_posts', related_query_name='like_post', blank=True)\n colors = models.ManyToManyField('posts.Colors', blank=True)\n housingtype = models.ManyToManyField('HousingTypes', blank=True)\n style = models.ManyToManyField('Styles', blank=True)\n postPyeong = models.CharField(max_length=10, choices=PYEONG_CHOICE_FIELD)\n\n @staticmethod\n def initial_setting():\n Pyeong.make_pyeng()\n Colors.make_color()\n HousingTypes.make_housing_type()\n Styles.make_style()\n\n\n class Meta:\n verbose_name = '게시글'\n verbose_name_plural = '게시글 목록'\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.title)\n\n\nclass Comments(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE, verbose_name=\n '포스트', related_name='comment_set', related_query_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE)\n content = models.TextField('댓글 내용', max_length=500)\n created_at = models.DateTimeField('작성 날', auto_now_add=True)\n updated_at = models.DateTimeField('수정 날짜', auto_now=True)\n\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n\n\nclass Posts(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n @staticmethod\n def initial_setting():\n Pyeong.make_pyeng()\n Colors.make_color()\n HousingTypes.make_housing_type()\n Styles.make_style()\n\n\n class Meta:\n verbose_name = '게시글'\n verbose_name_plural = '게시글 목록'\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.title)\n\n\nclass Comments(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE, verbose_name=\n '포스트', related_name='comment_set', related_query_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE)\n content = models.TextField('댓글 내용', max_length=500)\n created_at = models.DateTimeField('작성 날', auto_now_add=True)\n updated_at = models.DateTimeField('수정 날짜', auto_now=True)\n\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n\n\nclass Posts(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n @staticmethod\n def initial_setting():\n Pyeong.make_pyeng()\n Colors.make_color()\n HousingTypes.make_housing_type()\n Styles.make_style()\n\n\n class Meta:\n verbose_name = '게시글'\n verbose_name_plural = '게시글 목록'\n <function token>\n\n\nclass Comments(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE, verbose_name=\n '포스트', related_name='comment_set', related_query_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE)\n content = models.TextField('댓글 내용', max_length=500)\n created_at = models.DateTimeField('작성 날', auto_now_add=True)\n updated_at = models.DateTimeField('수정 날짜', auto_now=True)\n\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n\n\nclass Posts(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\n class Meta:\n verbose_name = '게시글'\n verbose_name_plural = '게시글 목록'\n <function token>\n\n\nclass Comments(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE, verbose_name=\n '포스트', related_name='comment_set', related_query_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE)\n content = models.TextField('댓글 내용', max_length=500)\n created_at = models.DateTimeField('작성 날', auto_now_add=True)\n updated_at = models.DateTimeField('수정 날짜', auto_now=True)\n\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n\n\nclass Comments(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE, verbose_name=\n '포스트', related_name='comment_set', related_query_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE)\n content = models.TextField('댓글 내용', max_length=500)\n created_at = models.DateTimeField('작성 날', auto_now_add=True)\n updated_at = models.DateTimeField('수정 날짜', auto_now=True)\n\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n\n\nclass Comments(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n\n\nclass Comments(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n <function token>\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass PostLike(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass PostLike(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Pyeong(models.Model):\n <assignment token>\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Pyeong(models.Model):\n <assignment token>\n <function token>\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Pyeong(models.Model):\n <assignment token>\n <function token>\n <function token>\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass HousingTypes(models.Model):\n <assignment token>\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass HousingTypes(models.Model):\n <assignment token>\n <function token>\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass HousingTypes(models.Model):\n <assignment token>\n <function token>\n <function token>\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Styles(models.Model):\n <assignment token>\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Styles(models.Model):\n <assignment token>\n <function token>\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Styles(models.Model):\n <assignment token>\n <function token>\n <function token>\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Colors(models.Model):\n <assignment token>\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Colors(models.Model):\n <assignment token>\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n <function token>\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Colors(models.Model):\n <assignment token>\n <function token>\n <function token>\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass PostImages(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
1,070 |
1ea71f7b17809189eeacf19a6b7c4c7d88a5022c
|
from dataloaders.datasets import caltech, embedding
from torch.utils.data import DataLoader
def make_data_loader(args, **kwargs):
if args.dataset == 'caltech101':
train_set = caltech.caltech101Classification(args, split='train')
val_set = caltech.caltech101Classification(args, split='val')
test_set = caltech.caltech101Classification(args, split='test')
num_classes = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, num_classes
elif args.dataset == 'embedding':
dataset = embedding.Embedding(args)
num_classes = dataset.NUM_CLASSES
loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, **kwargs)
return loader, loader, loader, num_classes
else:
print("Dataloader for {} is not implemented".format(args.dataset))
raise NotImplementedError
def make_id2class(args):
if args.dataset == 'caltech101':
return caltech.id2class
|
[
"from dataloaders.datasets import caltech, embedding\nfrom torch.utils.data import DataLoader\n\ndef make_data_loader(args, **kwargs):\n\n\tif args.dataset == 'caltech101':\n\t\ttrain_set = caltech.caltech101Classification(args, split='train')\n\t\tval_set = caltech.caltech101Classification(args, split='val')\n\t\ttest_set = caltech.caltech101Classification(args, split='test')\n\t\tnum_classes = train_set.NUM_CLASSES\n\n\t\ttrain_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)\n\t\tval_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)\n\t\ttest_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)\n\n\t\treturn train_loader, val_loader, test_loader, num_classes\n\telif args.dataset == 'embedding':\n\t\tdataset = embedding.Embedding(args)\n\t\tnum_classes = dataset.NUM_CLASSES\n\t\tloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, **kwargs)\n\t\treturn loader, loader, loader, num_classes\n\telse:\n\t\tprint(\"Dataloader for {} is not implemented\".format(args.dataset))\n\t\traise NotImplementedError\n\ndef make_id2class(args):\n\tif args.dataset == 'caltech101':\n\t\treturn caltech.id2class\n",
"from dataloaders.datasets import caltech, embedding\nfrom torch.utils.data import DataLoader\n\n\ndef make_data_loader(args, **kwargs):\n if args.dataset == 'caltech101':\n train_set = caltech.caltech101Classification(args, split='train')\n val_set = caltech.caltech101Classification(args, split='val')\n test_set = caltech.caltech101Classification(args, split='test')\n num_classes = train_set.NUM_CLASSES\n train_loader = DataLoader(train_set, batch_size=args.batch_size,\n shuffle=True, **kwargs)\n val_loader = DataLoader(val_set, batch_size=args.batch_size,\n shuffle=False, **kwargs)\n test_loader = DataLoader(test_set, batch_size=args.batch_size,\n shuffle=False, **kwargs)\n return train_loader, val_loader, test_loader, num_classes\n elif args.dataset == 'embedding':\n dataset = embedding.Embedding(args)\n num_classes = dataset.NUM_CLASSES\n loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=\n False, **kwargs)\n return loader, loader, loader, num_classes\n else:\n print('Dataloader for {} is not implemented'.format(args.dataset))\n raise NotImplementedError\n\n\ndef make_id2class(args):\n if args.dataset == 'caltech101':\n return caltech.id2class\n",
"<import token>\n\n\ndef make_data_loader(args, **kwargs):\n if args.dataset == 'caltech101':\n train_set = caltech.caltech101Classification(args, split='train')\n val_set = caltech.caltech101Classification(args, split='val')\n test_set = caltech.caltech101Classification(args, split='test')\n num_classes = train_set.NUM_CLASSES\n train_loader = DataLoader(train_set, batch_size=args.batch_size,\n shuffle=True, **kwargs)\n val_loader = DataLoader(val_set, batch_size=args.batch_size,\n shuffle=False, **kwargs)\n test_loader = DataLoader(test_set, batch_size=args.batch_size,\n shuffle=False, **kwargs)\n return train_loader, val_loader, test_loader, num_classes\n elif args.dataset == 'embedding':\n dataset = embedding.Embedding(args)\n num_classes = dataset.NUM_CLASSES\n loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=\n False, **kwargs)\n return loader, loader, loader, num_classes\n else:\n print('Dataloader for {} is not implemented'.format(args.dataset))\n raise NotImplementedError\n\n\ndef make_id2class(args):\n if args.dataset == 'caltech101':\n return caltech.id2class\n",
"<import token>\n\n\ndef make_data_loader(args, **kwargs):\n if args.dataset == 'caltech101':\n train_set = caltech.caltech101Classification(args, split='train')\n val_set = caltech.caltech101Classification(args, split='val')\n test_set = caltech.caltech101Classification(args, split='test')\n num_classes = train_set.NUM_CLASSES\n train_loader = DataLoader(train_set, batch_size=args.batch_size,\n shuffle=True, **kwargs)\n val_loader = DataLoader(val_set, batch_size=args.batch_size,\n shuffle=False, **kwargs)\n test_loader = DataLoader(test_set, batch_size=args.batch_size,\n shuffle=False, **kwargs)\n return train_loader, val_loader, test_loader, num_classes\n elif args.dataset == 'embedding':\n dataset = embedding.Embedding(args)\n num_classes = dataset.NUM_CLASSES\n loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=\n False, **kwargs)\n return loader, loader, loader, num_classes\n else:\n print('Dataloader for {} is not implemented'.format(args.dataset))\n raise NotImplementedError\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n"
] | false |
1,071 |
2ca1b603b18316bc1d970b5e32389e10e4b532e2
|
import configure
import connectify
import userlog
import dirlog
import time
def getUser(sock):
try:
userinfo = userlog.getInfo()
except:
userinfo = configure.init(sock)
userinfo = userinfo.split('^')[0]
# print userinfo
return userinfo
if __name__=="__main__":
sock = connectify.createCon()
userinfo = getUser(sock)
while 1:
dirs, flag = dirlog.getDirs()
if flag:
sock.send('2'+userinfo+'^'+dirs)
print sock.recv(1024)
sock.send('3'+userinfo)
update_count = sock.recv(1024)
update = []
for x in range(0,int(update_count)):
sock.send('4'+userinfo)
update.append(sock.recv(1024))
print update
time.sleep(2)
connectify.closeCon(sock)
|
[
"import configure\nimport connectify\nimport userlog\nimport dirlog\nimport time\n\n\ndef getUser(sock):\n\ttry:\n\t\tuserinfo = userlog.getInfo()\n\texcept:\t\n\t\tuserinfo = configure.init(sock)\n\tuserinfo = userinfo.split('^')[0]\n#\tprint userinfo\n\treturn userinfo\n\nif __name__==\"__main__\":\t\n\tsock = connectify.createCon()\n\tuserinfo = getUser(sock)\n\twhile 1:\n\t\tdirs, flag = dirlog.getDirs()\n\t\tif flag:\n\t\t\tsock.send('2'+userinfo+'^'+dirs)\n\t\t\tprint sock.recv(1024)\n\t\tsock.send('3'+userinfo)\n\t\tupdate_count = sock.recv(1024)\n\t\tupdate = []\n\t\tfor x in range(0,int(update_count)):\n\t\t\tsock.send('4'+userinfo)\n\t\t\tupdate.append(sock.recv(1024))\n\t\tprint update\n\t\ttime.sleep(2)\n\tconnectify.closeCon(sock)\n"
] | true |
1,072 |
07544d1eb039da0081716aa489fc1a0a5a200145
|
from peewee import *
db = PostgresqlDatabase('contacts', user='postgres', password='',
host='localhost', port=5432)
intro_question = input("What would you like to do with Contacts? Create? Read? Find? Delete? Update? ")
def read_contact():
contacts = Contact.select()
for contact in contacts:
print(contact)
print(contact.firstname + " " + contact.lastname + " " + contact.phone + " " + contact.email + " " + contact.address)
def create_contact():
contact_firstname = input("Enter First Name: ")
contact_lastname = input("Enter Last Name: ")
contact_phone = input("Enter Phone Number: ")
contact_email = input("Enter Email: ")
contact_address = input("Enter Address: ")
newcontact = Contact(firstname = contact_firstname, lastname = contact_lastname, phone = contact_phone, email = contact_email, address = contact_address)
newcontact.save()
print(newcontact.firstname + " " + newcontact.lastname + " " + newcontact.phone + " " + newcontact.email + " " + newcontact.address)
def update_contact():
update_find_by_firstname = input("Enter the First Name of the contact you want to update: ")
updated_info = Contact.get(Contact.firstname == update_find_by_firstname)
new_phone = input("Enter the new number: ")
updated_info.phone = new_phone
new_email = input("Enter new Email: ")
updated_info.email = new_email
new_address = input("Enter new Address: ")
updated_info.address = new_address
updated_info.save()
def find_contact():
find_contact_by_firstname = input("Enter First Name of the contact you want to find: ")
find_by_firstname = Contact.get(Contact.firstname == find_contact_by_firstname)
print(find_by_firstname.firstname + " " + find_by_firstname.lastname + " " + find_by_firstname.phone + " " + find_by_firstname.email + " " + find_by_firstname.address)
def delete_contact():
contact_name_delete = input("Enter First Name of the contact you want to delete: ")
contact_firstname = Contact.get(Contact.firstname == contact_name_delete)
contact_firstname.delete_instance()
class BaseModel(Model):
class Meta:
database = db
class Contact(BaseModel):
firstname = CharField()
lastname = CharField()
phone = CharField()
email = CharField()
address = CharField()
db.connect()
db.create_tables([Contact])
if intro_question == "Create":
create_contact()
elif intro_question == "Read":
read_contact()
elif intro_question == "Delete":
delete_contact()
elif intro_question == "Find":
find_contact()
elif intro_question == "Update":
update_contact()
|
[
"from peewee import *\n\ndb = PostgresqlDatabase('contacts', user='postgres', password='',\n host='localhost', port=5432)\n\nintro_question = input(\"What would you like to do with Contacts? Create? Read? Find? Delete? Update? \")\n\n\ndef read_contact():\n contacts = Contact.select()\n for contact in contacts:\n print(contact)\n print(contact.firstname + \" \" + contact.lastname + \" \" + contact.phone + \" \" + contact.email + \" \" + contact.address)\n\n\ndef create_contact():\n contact_firstname = input(\"Enter First Name: \")\n contact_lastname = input(\"Enter Last Name: \")\n contact_phone = input(\"Enter Phone Number: \")\n contact_email = input(\"Enter Email: \")\n contact_address = input(\"Enter Address: \")\n newcontact = Contact(firstname = contact_firstname, lastname = contact_lastname, phone = contact_phone, email = contact_email, address = contact_address)\n newcontact.save()\n print(newcontact.firstname + \" \" + newcontact.lastname + \" \" + newcontact.phone + \" \" + newcontact.email + \" \" + newcontact.address)\n\ndef update_contact():\n update_find_by_firstname = input(\"Enter the First Name of the contact you want to update: \")\n updated_info = Contact.get(Contact.firstname == update_find_by_firstname)\n new_phone = input(\"Enter the new number: \")\n updated_info.phone = new_phone\n new_email = input(\"Enter new Email: \")\n updated_info.email = new_email\n new_address = input(\"Enter new Address: \")\n updated_info.address = new_address\n updated_info.save() \n\n\ndef find_contact():\n find_contact_by_firstname = input(\"Enter First Name of the contact you want to find: \")\n find_by_firstname = Contact.get(Contact.firstname == find_contact_by_firstname)\n print(find_by_firstname.firstname + \" \" + find_by_firstname.lastname + \" \" + find_by_firstname.phone + \" \" + find_by_firstname.email + \" \" + find_by_firstname.address)\n\ndef delete_contact():\n contact_name_delete = input(\"Enter First Name of the contact you want to delete: \")\n contact_firstname = Contact.get(Contact.firstname == contact_name_delete)\n contact_firstname.delete_instance()\n\nclass BaseModel(Model):\n class Meta:\n database = db\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\ndb.connect()\ndb.create_tables([Contact])\n\nif intro_question == \"Create\":\n create_contact()\n\n\nelif intro_question == \"Read\":\n read_contact()\n\n\nelif intro_question == \"Delete\":\n delete_contact()\n\nelif intro_question == \"Find\":\n find_contact()\n\nelif intro_question == \"Update\":\n update_contact()",
"from peewee import *\ndb = PostgresqlDatabase('contacts', user='postgres', password='', host=\n 'localhost', port=5432)\nintro_question = input(\n 'What would you like to do with Contacts? Create? Read? Find? Delete? Update? '\n )\n\n\ndef read_contact():\n contacts = Contact.select()\n for contact in contacts:\n print(contact)\n print(contact.firstname + ' ' + contact.lastname + ' ' + contact.\n phone + ' ' + contact.email + ' ' + contact.address)\n\n\ndef create_contact():\n contact_firstname = input('Enter First Name: ')\n contact_lastname = input('Enter Last Name: ')\n contact_phone = input('Enter Phone Number: ')\n contact_email = input('Enter Email: ')\n contact_address = input('Enter Address: ')\n newcontact = Contact(firstname=contact_firstname, lastname=\n contact_lastname, phone=contact_phone, email=contact_email, address\n =contact_address)\n newcontact.save()\n print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +\n newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)\n\n\ndef update_contact():\n update_find_by_firstname = input(\n 'Enter the First Name of the contact you want to update: ')\n updated_info = Contact.get(Contact.firstname == update_find_by_firstname)\n new_phone = input('Enter the new number: ')\n updated_info.phone = new_phone\n new_email = input('Enter new Email: ')\n updated_info.email = new_email\n new_address = input('Enter new Address: ')\n updated_info.address = new_address\n updated_info.save()\n\n\ndef find_contact():\n find_contact_by_firstname = input(\n 'Enter First Name of the contact you want to find: ')\n find_by_firstname = Contact.get(Contact.firstname ==\n find_contact_by_firstname)\n print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +\n ' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +\n find_by_firstname.address)\n\n\ndef delete_contact():\n contact_name_delete = input(\n 'Enter First Name of the contact you want to delete: ')\n contact_firstname = Contact.get(Contact.firstname == contact_name_delete)\n contact_firstname.delete_instance()\n\n\nclass BaseModel(Model):\n\n\n class Meta:\n database = db\n\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\n\ndb.connect()\ndb.create_tables([Contact])\nif intro_question == 'Create':\n create_contact()\nelif intro_question == 'Read':\n read_contact()\nelif intro_question == 'Delete':\n delete_contact()\nelif intro_question == 'Find':\n find_contact()\nelif intro_question == 'Update':\n update_contact()\n",
"<import token>\ndb = PostgresqlDatabase('contacts', user='postgres', password='', host=\n 'localhost', port=5432)\nintro_question = input(\n 'What would you like to do with Contacts? Create? Read? Find? Delete? Update? '\n )\n\n\ndef read_contact():\n contacts = Contact.select()\n for contact in contacts:\n print(contact)\n print(contact.firstname + ' ' + contact.lastname + ' ' + contact.\n phone + ' ' + contact.email + ' ' + contact.address)\n\n\ndef create_contact():\n contact_firstname = input('Enter First Name: ')\n contact_lastname = input('Enter Last Name: ')\n contact_phone = input('Enter Phone Number: ')\n contact_email = input('Enter Email: ')\n contact_address = input('Enter Address: ')\n newcontact = Contact(firstname=contact_firstname, lastname=\n contact_lastname, phone=contact_phone, email=contact_email, address\n =contact_address)\n newcontact.save()\n print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +\n newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)\n\n\ndef update_contact():\n update_find_by_firstname = input(\n 'Enter the First Name of the contact you want to update: ')\n updated_info = Contact.get(Contact.firstname == update_find_by_firstname)\n new_phone = input('Enter the new number: ')\n updated_info.phone = new_phone\n new_email = input('Enter new Email: ')\n updated_info.email = new_email\n new_address = input('Enter new Address: ')\n updated_info.address = new_address\n updated_info.save()\n\n\ndef find_contact():\n find_contact_by_firstname = input(\n 'Enter First Name of the contact you want to find: ')\n find_by_firstname = Contact.get(Contact.firstname ==\n find_contact_by_firstname)\n print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +\n ' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +\n find_by_firstname.address)\n\n\ndef delete_contact():\n contact_name_delete = input(\n 'Enter First Name of the contact you want to delete: ')\n contact_firstname = Contact.get(Contact.firstname == contact_name_delete)\n contact_firstname.delete_instance()\n\n\nclass BaseModel(Model):\n\n\n class Meta:\n database = db\n\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\n\ndb.connect()\ndb.create_tables([Contact])\nif intro_question == 'Create':\n create_contact()\nelif intro_question == 'Read':\n read_contact()\nelif intro_question == 'Delete':\n delete_contact()\nelif intro_question == 'Find':\n find_contact()\nelif intro_question == 'Update':\n update_contact()\n",
"<import token>\n<assignment token>\n\n\ndef read_contact():\n contacts = Contact.select()\n for contact in contacts:\n print(contact)\n print(contact.firstname + ' ' + contact.lastname + ' ' + contact.\n phone + ' ' + contact.email + ' ' + contact.address)\n\n\ndef create_contact():\n contact_firstname = input('Enter First Name: ')\n contact_lastname = input('Enter Last Name: ')\n contact_phone = input('Enter Phone Number: ')\n contact_email = input('Enter Email: ')\n contact_address = input('Enter Address: ')\n newcontact = Contact(firstname=contact_firstname, lastname=\n contact_lastname, phone=contact_phone, email=contact_email, address\n =contact_address)\n newcontact.save()\n print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +\n newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)\n\n\ndef update_contact():\n update_find_by_firstname = input(\n 'Enter the First Name of the contact you want to update: ')\n updated_info = Contact.get(Contact.firstname == update_find_by_firstname)\n new_phone = input('Enter the new number: ')\n updated_info.phone = new_phone\n new_email = input('Enter new Email: ')\n updated_info.email = new_email\n new_address = input('Enter new Address: ')\n updated_info.address = new_address\n updated_info.save()\n\n\ndef find_contact():\n find_contact_by_firstname = input(\n 'Enter First Name of the contact you want to find: ')\n find_by_firstname = Contact.get(Contact.firstname ==\n find_contact_by_firstname)\n print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +\n ' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +\n find_by_firstname.address)\n\n\ndef delete_contact():\n contact_name_delete = input(\n 'Enter First Name of the contact you want to delete: ')\n contact_firstname = Contact.get(Contact.firstname == contact_name_delete)\n contact_firstname.delete_instance()\n\n\nclass BaseModel(Model):\n\n\n class Meta:\n database = db\n\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\n\ndb.connect()\ndb.create_tables([Contact])\nif intro_question == 'Create':\n create_contact()\nelif intro_question == 'Read':\n read_contact()\nelif intro_question == 'Delete':\n delete_contact()\nelif intro_question == 'Find':\n find_contact()\nelif intro_question == 'Update':\n update_contact()\n",
"<import token>\n<assignment token>\n\n\ndef read_contact():\n contacts = Contact.select()\n for contact in contacts:\n print(contact)\n print(contact.firstname + ' ' + contact.lastname + ' ' + contact.\n phone + ' ' + contact.email + ' ' + contact.address)\n\n\ndef create_contact():\n contact_firstname = input('Enter First Name: ')\n contact_lastname = input('Enter Last Name: ')\n contact_phone = input('Enter Phone Number: ')\n contact_email = input('Enter Email: ')\n contact_address = input('Enter Address: ')\n newcontact = Contact(firstname=contact_firstname, lastname=\n contact_lastname, phone=contact_phone, email=contact_email, address\n =contact_address)\n newcontact.save()\n print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +\n newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)\n\n\ndef update_contact():\n update_find_by_firstname = input(\n 'Enter the First Name of the contact you want to update: ')\n updated_info = Contact.get(Contact.firstname == update_find_by_firstname)\n new_phone = input('Enter the new number: ')\n updated_info.phone = new_phone\n new_email = input('Enter new Email: ')\n updated_info.email = new_email\n new_address = input('Enter new Address: ')\n updated_info.address = new_address\n updated_info.save()\n\n\ndef find_contact():\n find_contact_by_firstname = input(\n 'Enter First Name of the contact you want to find: ')\n find_by_firstname = Contact.get(Contact.firstname ==\n find_contact_by_firstname)\n print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +\n ' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +\n find_by_firstname.address)\n\n\ndef delete_contact():\n contact_name_delete = input(\n 'Enter First Name of the contact you want to delete: ')\n contact_firstname = Contact.get(Contact.firstname == contact_name_delete)\n contact_firstname.delete_instance()\n\n\nclass BaseModel(Model):\n\n\n class Meta:\n database = db\n\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef read_contact():\n contacts = Contact.select()\n for contact in contacts:\n print(contact)\n print(contact.firstname + ' ' + contact.lastname + ' ' + contact.\n phone + ' ' + contact.email + ' ' + contact.address)\n\n\ndef create_contact():\n contact_firstname = input('Enter First Name: ')\n contact_lastname = input('Enter Last Name: ')\n contact_phone = input('Enter Phone Number: ')\n contact_email = input('Enter Email: ')\n contact_address = input('Enter Address: ')\n newcontact = Contact(firstname=contact_firstname, lastname=\n contact_lastname, phone=contact_phone, email=contact_email, address\n =contact_address)\n newcontact.save()\n print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +\n newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)\n\n\ndef update_contact():\n update_find_by_firstname = input(\n 'Enter the First Name of the contact you want to update: ')\n updated_info = Contact.get(Contact.firstname == update_find_by_firstname)\n new_phone = input('Enter the new number: ')\n updated_info.phone = new_phone\n new_email = input('Enter new Email: ')\n updated_info.email = new_email\n new_address = input('Enter new Address: ')\n updated_info.address = new_address\n updated_info.save()\n\n\ndef find_contact():\n find_contact_by_firstname = input(\n 'Enter First Name of the contact you want to find: ')\n find_by_firstname = Contact.get(Contact.firstname ==\n find_contact_by_firstname)\n print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +\n ' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +\n find_by_firstname.address)\n\n\n<function token>\n\n\nclass BaseModel(Model):\n\n\n class Meta:\n database = db\n\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef create_contact():\n contact_firstname = input('Enter First Name: ')\n contact_lastname = input('Enter Last Name: ')\n contact_phone = input('Enter Phone Number: ')\n contact_email = input('Enter Email: ')\n contact_address = input('Enter Address: ')\n newcontact = Contact(firstname=contact_firstname, lastname=\n contact_lastname, phone=contact_phone, email=contact_email, address\n =contact_address)\n newcontact.save()\n print(newcontact.firstname + ' ' + newcontact.lastname + ' ' +\n newcontact.phone + ' ' + newcontact.email + ' ' + newcontact.address)\n\n\ndef update_contact():\n update_find_by_firstname = input(\n 'Enter the First Name of the contact you want to update: ')\n updated_info = Contact.get(Contact.firstname == update_find_by_firstname)\n new_phone = input('Enter the new number: ')\n updated_info.phone = new_phone\n new_email = input('Enter new Email: ')\n updated_info.email = new_email\n new_address = input('Enter new Address: ')\n updated_info.address = new_address\n updated_info.save()\n\n\ndef find_contact():\n find_contact_by_firstname = input(\n 'Enter First Name of the contact you want to find: ')\n find_by_firstname = Contact.get(Contact.firstname ==\n find_contact_by_firstname)\n print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +\n ' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +\n find_by_firstname.address)\n\n\n<function token>\n\n\nclass BaseModel(Model):\n\n\n class Meta:\n database = db\n\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef update_contact():\n update_find_by_firstname = input(\n 'Enter the First Name of the contact you want to update: ')\n updated_info = Contact.get(Contact.firstname == update_find_by_firstname)\n new_phone = input('Enter the new number: ')\n updated_info.phone = new_phone\n new_email = input('Enter new Email: ')\n updated_info.email = new_email\n new_address = input('Enter new Address: ')\n updated_info.address = new_address\n updated_info.save()\n\n\ndef find_contact():\n find_contact_by_firstname = input(\n 'Enter First Name of the contact you want to find: ')\n find_by_firstname = Contact.get(Contact.firstname ==\n find_contact_by_firstname)\n print(find_by_firstname.firstname + ' ' + find_by_firstname.lastname +\n ' ' + find_by_firstname.phone + ' ' + find_by_firstname.email + ' ' +\n find_by_firstname.address)\n\n\n<function token>\n\n\nclass BaseModel(Model):\n\n\n class Meta:\n database = db\n\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef update_contact():\n update_find_by_firstname = input(\n 'Enter the First Name of the contact you want to update: ')\n updated_info = Contact.get(Contact.firstname == update_find_by_firstname)\n new_phone = input('Enter the new number: ')\n updated_info.phone = new_phone\n new_email = input('Enter new Email: ')\n updated_info.email = new_email\n new_address = input('Enter new Address: ')\n updated_info.address = new_address\n updated_info.save()\n\n\n<function token>\n<function token>\n\n\nclass BaseModel(Model):\n\n\n class Meta:\n database = db\n\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass BaseModel(Model):\n\n\n class Meta:\n database = db\n\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass Contact(BaseModel):\n firstname = CharField()\n lastname = CharField()\n phone = CharField()\n email = CharField()\n address = CharField()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass Contact(BaseModel):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<code token>\n"
] | false |
1,073 |
289aa48b4433be533c3916dd039136df45e0ac0b
|
# Generated by Django 2.2.5 on 2019-10-24 05:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0008_studentbasic_stu_class_num'),
]
operations = [
migrations.AlterModelOptions(
name='onduty',
options={'verbose_name': '学员考勤信息', 'verbose_name_plural': '学员考勤信息'},
),
migrations.AlterModelOptions(
name='studentbasic',
options={'verbose_name': '学员招生信息', 'verbose_name_plural': '学员招生信息'},
),
migrations.AlterModelOptions(
name='studentcertification',
options={'verbose_name': '学员证书', 'verbose_name_plural': '学员证书'},
),
migrations.AlterModelOptions(
name='studentclass',
options={'verbose_name': '学员班级', 'verbose_name_plural': '学员班级'},
),
migrations.AlterModelOptions(
name='studentexam',
options={'verbose_name': '学员考试信息', 'verbose_name_plural': '学员考试信息'},
),
migrations.AlterModelOptions(
name='studentexamextra',
options={'verbose_name': '学员补考情况', 'verbose_name_plural': '学员补考情况'},
),
migrations.AlterModelOptions(
name='studenttextbook',
options={'verbose_name': '学员教材', 'verbose_name_plural': '学员教材'},
),
migrations.AlterModelOptions(
name='studentwechat',
options={'verbose_name': '学员365开通情况', 'verbose_name_plural': '学员365开通情况'},
),
migrations.AlterModelOptions(
name='tuition',
options={'verbose_name': '学员交费信息', 'verbose_name_plural': '学员交费信息'},
),
migrations.AlterField(
model_name='studentbasic',
name='stu_signup_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报名日期'),
),
migrations.AlterField(
model_name='studentcertification',
name='cert_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='发证日期'),
),
migrations.AlterField(
model_name='studentexam',
name='exam_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报考日期'),
),
migrations.AlterField(
model_name='studentexamextra',
name='exam_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报考日期'),
),
migrations.AlterField(
model_name='studentwechat',
name='wechat_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='开通日期'),
),
migrations.AlterField(
model_name='tuition',
name='fee_date',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='缴费日期'),
),
]
|
[
"# Generated by Django 2.2.5 on 2019-10-24 05:11\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('student', '0008_studentbasic_stu_class_num'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='onduty',\n options={'verbose_name': '学员考勤信息', 'verbose_name_plural': '学员考勤信息'},\n ),\n migrations.AlterModelOptions(\n name='studentbasic',\n options={'verbose_name': '学员招生信息', 'verbose_name_plural': '学员招生信息'},\n ),\n migrations.AlterModelOptions(\n name='studentcertification',\n options={'verbose_name': '学员证书', 'verbose_name_plural': '学员证书'},\n ),\n migrations.AlterModelOptions(\n name='studentclass',\n options={'verbose_name': '学员班级', 'verbose_name_plural': '学员班级'},\n ),\n migrations.AlterModelOptions(\n name='studentexam',\n options={'verbose_name': '学员考试信息', 'verbose_name_plural': '学员考试信息'},\n ),\n migrations.AlterModelOptions(\n name='studentexamextra',\n options={'verbose_name': '学员补考情况', 'verbose_name_plural': '学员补考情况'},\n ),\n migrations.AlterModelOptions(\n name='studenttextbook',\n options={'verbose_name': '学员教材', 'verbose_name_plural': '学员教材'},\n ),\n migrations.AlterModelOptions(\n name='studentwechat',\n options={'verbose_name': '学员365开通情况', 'verbose_name_plural': '学员365开通情况'},\n ),\n migrations.AlterModelOptions(\n name='tuition',\n options={'verbose_name': '学员交费信息', 'verbose_name_plural': '学员交费信息'},\n ),\n migrations.AlterField(\n model_name='studentbasic',\n name='stu_signup_date',\n field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报名日期'),\n ),\n migrations.AlterField(\n model_name='studentcertification',\n name='cert_date',\n field=models.CharField(blank=True, max_length=128, null=True, verbose_name='发证日期'),\n ),\n migrations.AlterField(\n model_name='studentexam',\n name='exam_date',\n field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报考日期'),\n ),\n migrations.AlterField(\n model_name='studentexamextra',\n name='exam_date',\n field=models.CharField(blank=True, max_length=128, null=True, verbose_name='报考日期'),\n ),\n migrations.AlterField(\n model_name='studentwechat',\n name='wechat_date',\n field=models.CharField(blank=True, max_length=128, null=True, verbose_name='开通日期'),\n ),\n migrations.AlterField(\n model_name='tuition',\n name='fee_date',\n field=models.CharField(blank=True, max_length=128, null=True, verbose_name='缴费日期'),\n ),\n ]\n",
"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('student', '0008_studentbasic_stu_class_num')]\n operations = [migrations.AlterModelOptions(name='onduty', options={\n 'verbose_name': '学员考勤信息', 'verbose_name_plural': '学员考勤信息'}),\n migrations.AlterModelOptions(name='studentbasic', options={\n 'verbose_name': '学员招生信息', 'verbose_name_plural': '学员招生信息'}),\n migrations.AlterModelOptions(name='studentcertification', options={\n 'verbose_name': '学员证书', 'verbose_name_plural': '学员证书'}), migrations\n .AlterModelOptions(name='studentclass', options={'verbose_name':\n '学员班级', 'verbose_name_plural': '学员班级'}), migrations.\n AlterModelOptions(name='studentexam', options={'verbose_name':\n '学员考试信息', 'verbose_name_plural': '学员考试信息'}), migrations.\n AlterModelOptions(name='studentexamextra', options={'verbose_name':\n '学员补考情况', 'verbose_name_plural': '学员补考情况'}), migrations.\n AlterModelOptions(name='studenttextbook', options={'verbose_name':\n '学员教材', 'verbose_name_plural': '学员教材'}), migrations.\n AlterModelOptions(name='studentwechat', options={'verbose_name':\n '学员365开通情况', 'verbose_name_plural': '学员365开通情况'}), migrations.\n AlterModelOptions(name='tuition', options={'verbose_name': '学员交费信息',\n 'verbose_name_plural': '学员交费信息'}), migrations.AlterField(model_name\n ='studentbasic', name='stu_signup_date', field=models.CharField(\n blank=True, max_length=128, null=True, verbose_name='报名日期')),\n migrations.AlterField(model_name='studentcertification', name=\n 'cert_date', field=models.CharField(blank=True, max_length=128,\n null=True, verbose_name='发证日期')), migrations.AlterField(model_name=\n 'studentexam', name='exam_date', field=models.CharField(blank=True,\n max_length=128, null=True, verbose_name='报考日期')), migrations.\n AlterField(model_name='studentexamextra', name='exam_date', field=\n models.CharField(blank=True, max_length=128, null=True,\n verbose_name='报考日期')), migrations.AlterField(model_name=\n 'studentwechat', name='wechat_date', field=models.CharField(blank=\n True, max_length=128, null=True, verbose_name='开通日期')), migrations.\n AlterField(model_name='tuition', name='fee_date', field=models.\n CharField(blank=True, max_length=128, null=True, verbose_name='缴费日期'))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('student', '0008_studentbasic_stu_class_num')]\n operations = [migrations.AlterModelOptions(name='onduty', options={\n 'verbose_name': '学员考勤信息', 'verbose_name_plural': '学员考勤信息'}),\n migrations.AlterModelOptions(name='studentbasic', options={\n 'verbose_name': '学员招生信息', 'verbose_name_plural': '学员招生信息'}),\n migrations.AlterModelOptions(name='studentcertification', options={\n 'verbose_name': '学员证书', 'verbose_name_plural': '学员证书'}), migrations\n .AlterModelOptions(name='studentclass', options={'verbose_name':\n '学员班级', 'verbose_name_plural': '学员班级'}), migrations.\n AlterModelOptions(name='studentexam', options={'verbose_name':\n '学员考试信息', 'verbose_name_plural': '学员考试信息'}), migrations.\n AlterModelOptions(name='studentexamextra', options={'verbose_name':\n '学员补考情况', 'verbose_name_plural': '学员补考情况'}), migrations.\n AlterModelOptions(name='studenttextbook', options={'verbose_name':\n '学员教材', 'verbose_name_plural': '学员教材'}), migrations.\n AlterModelOptions(name='studentwechat', options={'verbose_name':\n '学员365开通情况', 'verbose_name_plural': '学员365开通情况'}), migrations.\n AlterModelOptions(name='tuition', options={'verbose_name': '学员交费信息',\n 'verbose_name_plural': '学员交费信息'}), migrations.AlterField(model_name\n ='studentbasic', name='stu_signup_date', field=models.CharField(\n blank=True, max_length=128, null=True, verbose_name='报名日期')),\n migrations.AlterField(model_name='studentcertification', name=\n 'cert_date', field=models.CharField(blank=True, max_length=128,\n null=True, verbose_name='发证日期')), migrations.AlterField(model_name=\n 'studentexam', name='exam_date', field=models.CharField(blank=True,\n max_length=128, null=True, verbose_name='报考日期')), migrations.\n AlterField(model_name='studentexamextra', name='exam_date', field=\n models.CharField(blank=True, max_length=128, null=True,\n verbose_name='报考日期')), migrations.AlterField(model_name=\n 'studentwechat', name='wechat_date', field=models.CharField(blank=\n True, max_length=128, null=True, verbose_name='开通日期')), migrations.\n AlterField(model_name='tuition', name='fee_date', field=models.\n CharField(blank=True, max_length=128, null=True, verbose_name='缴费日期'))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
1,074 |
03062ea08bd6ad88376f7c2aa2c89d2194ed8b2e
|
'''
fibonacci(6) => [1, 1, 2, 3, 5, 8]
fibonacci(7) => [1, 1, 2, 3, 5, 8, 13]
'''
def fibonacci(n):
if n == 0:
return []
elif n == 1:
return [1]
elif n == 2:
return [1, 1]
else:
lista = fibonacci(n-1)
suma = lista[len(lista)-1] + lista[len(lista)-2]
lista.append(suma)
return lista
def main():
resultado = fibonacci(6)
print(resultado)
if __name__ == '__main__':
main()
|
[
"'''\nfibonacci(6) => [1, 1, 2, 3, 5, 8]\nfibonacci(7) => [1, 1, 2, 3, 5, 8, 13]\n'''\n\ndef fibonacci(n):\n if n == 0:\n return []\n elif n == 1:\n return [1]\n elif n == 2:\n return [1, 1]\n else:\n lista = fibonacci(n-1)\n suma = lista[len(lista)-1] + lista[len(lista)-2]\n lista.append(suma)\n return lista\n\ndef main():\n resultado = fibonacci(6)\n print(resultado)\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n\n\ndef fibonacci(n):\n if n == 0:\n return []\n elif n == 1:\n return [1]\n elif n == 2:\n return [1, 1]\n else:\n lista = fibonacci(n - 1)\n suma = lista[len(lista) - 1] + lista[len(lista) - 2]\n lista.append(suma)\n return lista\n\n\ndef main():\n resultado = fibonacci(6)\n print(resultado)\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n\n\ndef fibonacci(n):\n if n == 0:\n return []\n elif n == 1:\n return [1]\n elif n == 2:\n return [1, 1]\n else:\n lista = fibonacci(n - 1)\n suma = lista[len(lista) - 1] + lista[len(lista) - 2]\n lista.append(suma)\n return lista\n\n\ndef main():\n resultado = fibonacci(6)\n print(resultado)\n\n\n<code token>\n",
"<docstring token>\n<function token>\n\n\ndef main():\n resultado = fibonacci(6)\n print(resultado)\n\n\n<code token>\n",
"<docstring token>\n<function token>\n<function token>\n<code token>\n"
] | false |
1,075 |
af668751074df6f182c7121821587270734ea5af
|
# -*- coding: utf-8 -*-
import scrapy
import os
from topdb.items import BiqugeItem
class NovelsSpider(scrapy.Spider):
name = 'novels'
allowed_domains = ['xbiquge.la']
start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']
def parse(self, response):
# 小说分类
path = '/Users/qx/Documents/小说/new/'
all=response.xpath(".//div[@class='novellist']")
for oneitem in all:
classname=oneitem.xpath('./h2/text()').extract_first()
if classname=='奇幻小说、玄幻小说大全列表':
classname='xuanhuan'
if classname=='历史小说、军事小说、穿越小说大全列表':
classname='chuanyue'
if classname=='武侠小说、仙侠小说、修真小说大全列表':
classname='xiuzhen'
if classname=='言情小说、都市小说大全列表':
classname='dushi'
if classname=='异灵小说、科幻小说大全列表':
classname='kehuan'
if classname=='游戏小说、竞技小说、网游小说大全列表':
classname='wangyou'
urls=oneitem.xpath('./ul/li/a/@href').extract()
names=oneitem.xpath('./ul/li/a/text()').extract()
for i in range(len(urls)):
url=urls[i]
name=names[i]
yield scrapy.Request(url, meta={'name': name, 'classname': classname}, callback=self.url_parse)
def url_parse(self, response):
# 小说章节列表
print('小说章节')
path = '/Users/qx/Documents/小说/new/'
name = response.meta['name']
classname = response.meta['classname']
author = response.xpath("//div[@id ='info']/p/text()").extract_first()
if author:
author=author.split(':',1)[1]
print(name+'-'+author)
listurls = response.xpath("//div[@id ='list']/dl/dd/a/@href").extract()
chapternames = response.xpath("//div[@id ='list']/dl/dd/a/text()").extract()
for i in range(len(listurls)):
url = "http://www.xbiquge.la" + listurls[i]
chaptername=chapternames[i]
oldname=path+ classname+'/'+name+ '-作者:' + author
newname=path+ classname+'/'+name
if (os.path.exists(oldname)):
os.rename(oldname,newname)
if (not os.path.exists(newname)):
os.makedirs(newname)
if(not os.path.exists(newname+'/'+ str(i) + ".txt")):
yield scrapy.Request(url, meta={'chaptername':chaptername,'tag':classname,'name':name,'author':author,'index':i}, callback=self.detail_parse)
def detail_parse(self, response):
# 章节详细内容
tag = response.meta['tag']
name = response.meta['name']
author = response.meta['author']
chaptername = response.meta['chaptername']
index = response.meta['index']
item = BiqugeItem()
novel = response.xpath("//div[@id='content']/text()").extract()
item['novel'] = "\n".join(novel).replace(" ", " ")
item['name'] = name
item['tag'] = tag
item['author'] = author
item['chapter'] = chaptername
item['index'] = index
# print(item['classname'])
# print(item['name'])
# print(item['title'])
# print('\n')
yield item
# 这里是爬取整个网站且按照分类进行爬取 但是重点是 爬取太慢scrapy 是异步操作 还需要了解一下多线程的问题 这样速度能更快些
|
[
"# -*- coding: utf-8 -*-\nimport scrapy\n\nimport os\nfrom topdb.items import BiqugeItem\n\nclass NovelsSpider(scrapy.Spider):\n name = 'novels'\n allowed_domains = ['xbiquge.la']\n start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']\n\n def parse(self, response):\n # 小说分类\n path = '/Users/qx/Documents/小说/new/'\n\n all=response.xpath(\".//div[@class='novellist']\")\n\n for oneitem in all:\n\n classname=oneitem.xpath('./h2/text()').extract_first()\n if classname=='奇幻小说、玄幻小说大全列表':\n classname='xuanhuan'\n if classname=='历史小说、军事小说、穿越小说大全列表':\n classname='chuanyue'\n if classname=='武侠小说、仙侠小说、修真小说大全列表':\n classname='xiuzhen'\n if classname=='言情小说、都市小说大全列表':\n classname='dushi'\n if classname=='异灵小说、科幻小说大全列表':\n classname='kehuan'\n if classname=='游戏小说、竞技小说、网游小说大全列表':\n classname='wangyou'\n\n urls=oneitem.xpath('./ul/li/a/@href').extract()\n\n names=oneitem.xpath('./ul/li/a/text()').extract()\n\n for i in range(len(urls)):\n url=urls[i]\n name=names[i]\n yield scrapy.Request(url, meta={'name': name, 'classname': classname}, callback=self.url_parse)\n\n\n def url_parse(self, response):\n # 小说章节列表\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n\n name = response.meta['name']\n classname = response.meta['classname']\n\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n\n if author:\n author=author.split(':',1)[1]\n\n print(name+'-'+author)\n\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\").extract()\n\n for i in range(len(listurls)):\n url = \"http://www.xbiquge.la\" + listurls[i]\n chaptername=chapternames[i]\n\n oldname=path+ classname+'/'+name+ '-作者:' + author\n newname=path+ classname+'/'+name\n\n if (os.path.exists(oldname)):\n os.rename(oldname,newname)\n\n if (not os.path.exists(newname)):\n os.makedirs(newname)\n\n if(not os.path.exists(newname+'/'+ str(i) + \".txt\")):\n yield scrapy.Request(url, meta={'chaptername':chaptername,'tag':classname,'name':name,'author':author,'index':i}, callback=self.detail_parse)\n\n def detail_parse(self, response):\n # 章节详细内容\n\n tag = response.meta['tag']\n name = response.meta['name']\n author = response.meta['author']\n chaptername = response.meta['chaptername']\n index = response.meta['index']\n\n item = BiqugeItem()\n\n novel = response.xpath(\"//div[@id='content']/text()\").extract()\n item['novel'] = \"\\n\".join(novel).replace(\" \", \" \")\n item['name'] = name\n item['tag'] = tag\n item['author'] = author\n item['chapter'] = chaptername\n item['index'] = index\n\n # print(item['classname'])\n # print(item['name'])\n # print(item['title'])\n # print('\\n')\n yield item\n\n # 这里是爬取整个网站且按照分类进行爬取 但是重点是 爬取太慢scrapy 是异步操作 还需要了解一下多线程的问题 这样速度能更快些\n",
"import scrapy\nimport os\nfrom topdb.items import BiqugeItem\n\n\nclass NovelsSpider(scrapy.Spider):\n name = 'novels'\n allowed_domains = ['xbiquge.la']\n start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']\n\n def parse(self, response):\n path = '/Users/qx/Documents/小说/new/'\n all = response.xpath(\".//div[@class='novellist']\")\n for oneitem in all:\n classname = oneitem.xpath('./h2/text()').extract_first()\n if classname == '奇幻小说、玄幻小说大全列表':\n classname = 'xuanhuan'\n if classname == '历史小说、军事小说、穿越小说大全列表':\n classname = 'chuanyue'\n if classname == '武侠小说、仙侠小说、修真小说大全列表':\n classname = 'xiuzhen'\n if classname == '言情小说、都市小说大全列表':\n classname = 'dushi'\n if classname == '异灵小说、科幻小说大全列表':\n classname = 'kehuan'\n if classname == '游戏小说、竞技小说、网游小说大全列表':\n classname = 'wangyou'\n urls = oneitem.xpath('./ul/li/a/@href').extract()\n names = oneitem.xpath('./ul/li/a/text()').extract()\n for i in range(len(urls)):\n url = urls[i]\n name = names[i]\n yield scrapy.Request(url, meta={'name': name, 'classname':\n classname}, callback=self.url_parse)\n\n def url_parse(self, response):\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n name = response.meta['name']\n classname = response.meta['classname']\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n if author:\n author = author.split(':', 1)[1]\n print(name + '-' + author)\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\"\n ).extract()\n for i in range(len(listurls)):\n url = 'http://www.xbiquge.la' + listurls[i]\n chaptername = chapternames[i]\n oldname = path + classname + '/' + name + '-作者:' + author\n newname = path + classname + '/' + name\n if os.path.exists(oldname):\n os.rename(oldname, newname)\n if not os.path.exists(newname):\n os.makedirs(newname)\n if not os.path.exists(newname + '/' + str(i) + '.txt'):\n yield scrapy.Request(url, meta={'chaptername': chaptername,\n 'tag': classname, 'name': name, 'author': author,\n 'index': i}, callback=self.detail_parse)\n\n def detail_parse(self, response):\n tag = response.meta['tag']\n name = response.meta['name']\n author = response.meta['author']\n chaptername = response.meta['chaptername']\n index = response.meta['index']\n item = BiqugeItem()\n novel = response.xpath(\"//div[@id='content']/text()\").extract()\n item['novel'] = '\\n'.join(novel).replace(' ', ' ')\n item['name'] = name\n item['tag'] = tag\n item['author'] = author\n item['chapter'] = chaptername\n item['index'] = index\n yield item\n",
"<import token>\n\n\nclass NovelsSpider(scrapy.Spider):\n name = 'novels'\n allowed_domains = ['xbiquge.la']\n start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']\n\n def parse(self, response):\n path = '/Users/qx/Documents/小说/new/'\n all = response.xpath(\".//div[@class='novellist']\")\n for oneitem in all:\n classname = oneitem.xpath('./h2/text()').extract_first()\n if classname == '奇幻小说、玄幻小说大全列表':\n classname = 'xuanhuan'\n if classname == '历史小说、军事小说、穿越小说大全列表':\n classname = 'chuanyue'\n if classname == '武侠小说、仙侠小说、修真小说大全列表':\n classname = 'xiuzhen'\n if classname == '言情小说、都市小说大全列表':\n classname = 'dushi'\n if classname == '异灵小说、科幻小说大全列表':\n classname = 'kehuan'\n if classname == '游戏小说、竞技小说、网游小说大全列表':\n classname = 'wangyou'\n urls = oneitem.xpath('./ul/li/a/@href').extract()\n names = oneitem.xpath('./ul/li/a/text()').extract()\n for i in range(len(urls)):\n url = urls[i]\n name = names[i]\n yield scrapy.Request(url, meta={'name': name, 'classname':\n classname}, callback=self.url_parse)\n\n def url_parse(self, response):\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n name = response.meta['name']\n classname = response.meta['classname']\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n if author:\n author = author.split(':', 1)[1]\n print(name + '-' + author)\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\"\n ).extract()\n for i in range(len(listurls)):\n url = 'http://www.xbiquge.la' + listurls[i]\n chaptername = chapternames[i]\n oldname = path + classname + '/' + name + '-作者:' + author\n newname = path + classname + '/' + name\n if os.path.exists(oldname):\n os.rename(oldname, newname)\n if not os.path.exists(newname):\n os.makedirs(newname)\n if not os.path.exists(newname + '/' + str(i) + '.txt'):\n yield scrapy.Request(url, meta={'chaptername': chaptername,\n 'tag': classname, 'name': name, 'author': author,\n 'index': i}, callback=self.detail_parse)\n\n def detail_parse(self, response):\n tag = response.meta['tag']\n name = response.meta['name']\n author = response.meta['author']\n chaptername = response.meta['chaptername']\n index = response.meta['index']\n item = BiqugeItem()\n novel = response.xpath(\"//div[@id='content']/text()\").extract()\n item['novel'] = '\\n'.join(novel).replace(' ', ' ')\n item['name'] = name\n item['tag'] = tag\n item['author'] = author\n item['chapter'] = chaptername\n item['index'] = index\n yield item\n",
"<import token>\n\n\nclass NovelsSpider(scrapy.Spider):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def parse(self, response):\n path = '/Users/qx/Documents/小说/new/'\n all = response.xpath(\".//div[@class='novellist']\")\n for oneitem in all:\n classname = oneitem.xpath('./h2/text()').extract_first()\n if classname == '奇幻小说、玄幻小说大全列表':\n classname = 'xuanhuan'\n if classname == '历史小说、军事小说、穿越小说大全列表':\n classname = 'chuanyue'\n if classname == '武侠小说、仙侠小说、修真小说大全列表':\n classname = 'xiuzhen'\n if classname == '言情小说、都市小说大全列表':\n classname = 'dushi'\n if classname == '异灵小说、科幻小说大全列表':\n classname = 'kehuan'\n if classname == '游戏小说、竞技小说、网游小说大全列表':\n classname = 'wangyou'\n urls = oneitem.xpath('./ul/li/a/@href').extract()\n names = oneitem.xpath('./ul/li/a/text()').extract()\n for i in range(len(urls)):\n url = urls[i]\n name = names[i]\n yield scrapy.Request(url, meta={'name': name, 'classname':\n classname}, callback=self.url_parse)\n\n def url_parse(self, response):\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n name = response.meta['name']\n classname = response.meta['classname']\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n if author:\n author = author.split(':', 1)[1]\n print(name + '-' + author)\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\"\n ).extract()\n for i in range(len(listurls)):\n url = 'http://www.xbiquge.la' + listurls[i]\n chaptername = chapternames[i]\n oldname = path + classname + '/' + name + '-作者:' + author\n newname = path + classname + '/' + name\n if os.path.exists(oldname):\n os.rename(oldname, newname)\n if not os.path.exists(newname):\n os.makedirs(newname)\n if not os.path.exists(newname + '/' + str(i) + '.txt'):\n yield scrapy.Request(url, meta={'chaptername': chaptername,\n 'tag': classname, 'name': name, 'author': author,\n 'index': i}, callback=self.detail_parse)\n\n def detail_parse(self, response):\n tag = response.meta['tag']\n name = response.meta['name']\n author = response.meta['author']\n chaptername = response.meta['chaptername']\n index = response.meta['index']\n item = BiqugeItem()\n novel = response.xpath(\"//div[@id='content']/text()\").extract()\n item['novel'] = '\\n'.join(novel).replace(' ', ' ')\n item['name'] = name\n item['tag'] = tag\n item['author'] = author\n item['chapter'] = chaptername\n item['index'] = index\n yield item\n",
"<import token>\n\n\nclass NovelsSpider(scrapy.Spider):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def url_parse(self, response):\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n name = response.meta['name']\n classname = response.meta['classname']\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n if author:\n author = author.split(':', 1)[1]\n print(name + '-' + author)\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\"\n ).extract()\n for i in range(len(listurls)):\n url = 'http://www.xbiquge.la' + listurls[i]\n chaptername = chapternames[i]\n oldname = path + classname + '/' + name + '-作者:' + author\n newname = path + classname + '/' + name\n if os.path.exists(oldname):\n os.rename(oldname, newname)\n if not os.path.exists(newname):\n os.makedirs(newname)\n if not os.path.exists(newname + '/' + str(i) + '.txt'):\n yield scrapy.Request(url, meta={'chaptername': chaptername,\n 'tag': classname, 'name': name, 'author': author,\n 'index': i}, callback=self.detail_parse)\n\n def detail_parse(self, response):\n tag = response.meta['tag']\n name = response.meta['name']\n author = response.meta['author']\n chaptername = response.meta['chaptername']\n index = response.meta['index']\n item = BiqugeItem()\n novel = response.xpath(\"//div[@id='content']/text()\").extract()\n item['novel'] = '\\n'.join(novel).replace(' ', ' ')\n item['name'] = name\n item['tag'] = tag\n item['author'] = author\n item['chapter'] = chaptername\n item['index'] = index\n yield item\n",
"<import token>\n\n\nclass NovelsSpider(scrapy.Spider):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def url_parse(self, response):\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n name = response.meta['name']\n classname = response.meta['classname']\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n if author:\n author = author.split(':', 1)[1]\n print(name + '-' + author)\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\"\n ).extract()\n for i in range(len(listurls)):\n url = 'http://www.xbiquge.la' + listurls[i]\n chaptername = chapternames[i]\n oldname = path + classname + '/' + name + '-作者:' + author\n newname = path + classname + '/' + name\n if os.path.exists(oldname):\n os.rename(oldname, newname)\n if not os.path.exists(newname):\n os.makedirs(newname)\n if not os.path.exists(newname + '/' + str(i) + '.txt'):\n yield scrapy.Request(url, meta={'chaptername': chaptername,\n 'tag': classname, 'name': name, 'author': author,\n 'index': i}, callback=self.detail_parse)\n <function token>\n",
"<import token>\n\n\nclass NovelsSpider(scrapy.Spider):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
1,076 |
06f961c07695d1c312cb943afbfa64508a709c7e
|
from alive_progress import alive_bar
from time import sleep
with alive_bar(100) as bar: # default setting
for i in range(100):
sleep(0.03)
bar() # call after consuming one item
# using bubble bar and notes spinner
with alive_bar(200, bar='bubbles', spinner='notes2') as bar:
for i in range(200):
sleep(0.03)
bar() # call after consuming one item
|
[
"from alive_progress import alive_bar\nfrom time import sleep\n\nwith alive_bar(100) as bar: # default setting\n for i in range(100):\n sleep(0.03)\n bar() # call after consuming one item\n\n # using bubble bar and notes spinner\n with alive_bar(200, bar='bubbles', spinner='notes2') as bar:\n for i in range(200):\n sleep(0.03)\n bar() # call after consuming one item\n",
"from alive_progress import alive_bar\nfrom time import sleep\nwith alive_bar(100) as bar:\n for i in range(100):\n sleep(0.03)\n bar()\n with alive_bar(200, bar='bubbles', spinner='notes2') as bar:\n for i in range(200):\n sleep(0.03)\n bar()\n",
"<import token>\nwith alive_bar(100) as bar:\n for i in range(100):\n sleep(0.03)\n bar()\n with alive_bar(200, bar='bubbles', spinner='notes2') as bar:\n for i in range(200):\n sleep(0.03)\n bar()\n",
"<import token>\n<code token>\n"
] | false |
1,077 |
1fafbc1e415b5089afcd2976d4f0dc2aa1c5a144
|
def maxProduct(self, A):
size= len(A)
if size==1:
return A[0]
Max=[A[0]]
Min=[A[0]]
for i in range(1,size):
Max.append(max(max(Max[i-1]*A[i],Min[i-1]*A[i]),A[i]))
Min.append(min(min(Max[i-1]*A[i],Min[i-1]*A[i]),A[i]))
tmax=Max[0]
for i in range(0,size):
if Max[i]>tmax:
tmax=Max[i]
return tmax
|
[
" def maxProduct(self, A):\n size= len(A)\n if size==1:\n return A[0]\n Max=[A[0]]\n Min=[A[0]]\n for i in range(1,size):\n Max.append(max(max(Max[i-1]*A[i],Min[i-1]*A[i]),A[i]))\n Min.append(min(min(Max[i-1]*A[i],Min[i-1]*A[i]),A[i]))\n tmax=Max[0]\n for i in range(0,size):\n if Max[i]>tmax:\n tmax=Max[i]\n return tmax\n"
] | true |
1,078 |
9a6ceeb286bb6c3d5923fe3b53be90a097e16ef5
|
'''
Create a dictionary of fasttext embedding, stored locally
fasttext import. This will hopefully make it easier to load
and train data.
This will also be used to store the
Steps to clean scripts (codify):
1) copy direct from website (space-delimited text)
2) remove actions in brackets
3) change words not in fasttext dictionary like "heeeey" to closest approximation like "heeey", and convert made-up conjuction like "overdie" to "over-die"
4) concate the speaker into one string, without space
5) create a space between punctuation and words [.,?;!]
6) delete apostrophes for shorten words like "it's"
'''
import fastText as ft
import pickle as pk
import os
import re
import pdb
def createLinePairs(corpus):
'''
Input: Read episode linse with format:
ELAINE Hi Mr . Seinfeld !
JERRY Hey , theres the old man !
Output: convert those pairs into array
[["Hi", "Mr", ".", "Seinfeld", "!"],
["Hey", ",", "theres", "the", "old","man","!"]
'''
print("Reading lines...")
# combine every two lines into pairs of vectors
with open(corpus) as f:
content = f.readlines()
print('CONTENT')
print(content)
# strip \n and \t, and skip the speaker
lines = convert_lines_to_arrays(content)
pairs = []
for i,x in enumerate(lines[:-1]):
# create pairs of lines to feed as input and output
# for model, empty lines represent new scene
# so any pair wiht an empty line is discarded
if lines[i] and lines[i+1]: #if neither lines are empty
pairs.append([lines[i], lines[i+1]])
return pairs
def convert_lines_to_arrays(content):
'''
convert each line in scene to an array of text
formating when not relevant
'''
lines = []
for x in content:
line = x.strip()
if len(line)>0: #skip empty lines
if 'scene:' in x: # store empty arrays for new scene
lines.append([])
else:
line_arr = format_line(line)
if line_arr: # if line not empty
lines.append(line_arr)
return lines
def format_line(line):
'''
format the line before storing as an array
'''
line = line.lower() # set line to lower case
line_arr = []
open_brack = []
is_dialogue = False
word = ''
for s in line:
if s=="'": # don't store apostrophe, so it's stored as its
continue
if s==':': # after first speaker identified
is_dialogue = True
continue
if s=='[': #if open_brack is not null, string is not dialogue
open_brack.append(s)
continue
if s==']': #remove open brack, if closed one found
open_brack.pop()
continue
if is_dialogue and not open_brack:
# if not inside bracket and some word to store
if s == ' ': # if space
if len(word)>0:
line_arr.append(word)
word = '' # reset word to blank
elif re.match("[.,?;!\"]", s):
# start new word if character
if len(word)>0:
line_arr.append(word)
line_arr.append(s)
word = ''
elif re.match("[A-Za-z\-]", s):
# store if alpha character
word = word+s
return line_arr
def line2TrainVector(pairs, word_dict):
# [TODO] convert each line into vectors
# don't need to use target lens when not batched
'''
Input: Read pairs of lines:
[["Hi", "Mr", ".", "Seinfeld", "!"],
["Hey", ",", "theres", "the", "old","man","!"]
word_dict is embedding hash formed with processDict() below
Output: convert into fasttext embedding vectors (dim 300)
above example returns
matrix size 4 x 300 for input
matrix size 7 x 300 for target
'''
input_v = createWordVector(pairs[0], word_dict)
target_v = createWordVector(pairs[1], word_dict)
return input_v, target_v
def createWordVector(word_array, word_dict):
vect = []
for word in word_array:
# a hyphenated word may be tricky
# if cannot find, then may need to split up
# as 2 word
if '-' in word and word not in word_dict:
vect.extend(createHypenEmbed(word))
continue
# semi-colons not in fasttext
if word == ';': word = '.'
if word == '':
continue
if word in word_dict:
vect.append(word_dict[word])
else:
print('NOT IN DICT')
print(word)
editted_word = editWord(word, word_dict)
vect.append(editted_word)
print(editted_word)
print(word_array)
return vect
def editWord(weird_word, word_dict):
# edit weird string, remove extra letters
# until word in dict
last_s = ''
weird_stack = []
for i, s in enumerate(weird_word):
## create ([index, letter, num]) for each different letter
if s!=last_s:
weird_stack.append([i, s, 1])
else:
weird_stack[-1][2]+=1 # add 1 to the weird word
last_s = s
# sort the stack to find most common group of letters and index
sorted_stack = sorted(weird_stack, key = lambda x: x[2])
most_common = sorted_stack[-1]
# remove most common letter in the weird word
# i.e. in heeeeey, remove e
common_idx = most_common[0]+most_common[2]
weird_word = weird_word[:(common_idx-1)]+weird_word[common_idx:]
if weird_word in word_dict:
return weird_word
else:
weird_word = editWord(weird_word, word_dict)
return weird_word
def createHypenEmbed(word):
'''
Handle outlier language with hyphen
'''
word_whole = re.sub('-', '', word)
if word_whole in word_dict:
return [word_dict[word_whole]]
else:
# [TODO] should the hyphenated word be
# split into two words or kept as an
# average embedding?
# currently adding the two word into one vect
subwords = word.split('-')
word_vect = [word_dict[subwords[0]]]
for w in subwords[1:]:
word_vect.append(word_dict[w])
return word_vect
class fastDict():
def __init__(self, read_filename, method):
# [TODO] allow dynamically init
self.method = method
print(method)
if method == 'store':
read_filename = '~/FastData/wiki.en/wiki.en.bin'
print(read_filename)
self.fast = ft.load_model(
os.path.expanduser(read_filename))
pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'
self.pickle_path = os.path.expanduser(pickle_filename)
print(pickle_filename)
def processDict(self):
# method = store or import
# read pickle dictionary
# if method = store, convert fastText data to pickle format first
if self.method == 'store':
self.writeWordDict()
return self.loadWordDict()
def loadWordDict(self):
pickle_reader = open(self.pickle_path, 'rb')
word_vec = pk.load(pickle_reader)
return word_vec
def writeWordDict(self):
all_words = self.getAllWords()
self.createWordDict(all_words)
def getAllWords(self):
all_the_words = self.fast.get_words()
return all_the_words
def createWordDict(self, all_words):
pickle_writer = open(self.pickle_path, 'wb')
word_dict = {}
for word in all_words:
word_dict[word] = self.fast.get_word_vector(word)
pk.dump(word_dict, pickle_writer)
if __name__ == '__main__':
read_filename = '~/FastData/wiki.en/wiki.en.bin'
method = 'import'
fast = fastDict(read_filename, method)
word_dict = fast.processDict()
# [TODO] clean-up do not need to call these functions in main
test_filename = '~/Documents/seinfeld/episodes/episode_TheSeinfeldChronicles_copy'
pairs = createLinePairs(os.path.expanduser(test_filename))
# [TODO] transfer this into
# for pair in pairs: input, output = line2TrainVector(pair, word_dict)
|
[
"'''\n Create a dictionary of fasttext embedding, stored locally\n fasttext import. This will hopefully make it easier to load\n and train data.\n\n This will also be used to store the\n Steps to clean scripts (codify): \n 1) copy direct from website (space-delimited text) \n 2) remove actions in brackets \n 3) change words not in fasttext dictionary like \"heeeey\" to closest approximation like \"heeey\", and convert made-up conjuction like \"overdie\" to \"over-die\" \n 4) concate the speaker into one string, without space \n 5) create a space between punctuation and words [.,?;!] \n 6) delete apostrophes for shorten words like \"it's\"\n\n'''\nimport fastText as ft\nimport pickle as pk\nimport os\nimport re\nimport pdb\n\n\n\ndef createLinePairs(corpus):\n '''\n Input: Read episode linse with format:\n ELAINE Hi Mr . Seinfeld !\n JERRY Hey , theres the old man !\n Output: convert those pairs into array\n [[\"Hi\", \"Mr\", \".\", \"Seinfeld\", \"!\"],\n [\"Hey\", \",\", \"theres\", \"the\", \"old\",\"man\",\"!\"]\n '''\n print(\"Reading lines...\")\n # combine every two lines into pairs of vectors\n with open(corpus) as f:\n content = f.readlines()\n print('CONTENT')\n print(content)\n # strip \\n and \\t, and skip the speaker\n lines = convert_lines_to_arrays(content)\n\n pairs = []\n for i,x in enumerate(lines[:-1]):\n # create pairs of lines to feed as input and output\n # for model, empty lines represent new scene\n # so any pair wiht an empty line is discarded\n if lines[i] and lines[i+1]: #if neither lines are empty\n pairs.append([lines[i], lines[i+1]])\n return pairs\n\n\ndef convert_lines_to_arrays(content):\n '''\n convert each line in scene to an array of text\n formating when not relevant\n '''\n lines = []\n for x in content:\n line = x.strip()\n if len(line)>0: #skip empty lines\n if 'scene:' in x: # store empty arrays for new scene\n lines.append([])\n else:\n line_arr = format_line(line)\n if line_arr: # if line not empty\n lines.append(line_arr)\n return lines\n\ndef format_line(line):\n '''\n format the line before storing as an array\n '''\n line = line.lower() # set line to lower case\n line_arr = []\n open_brack = []\n is_dialogue = False\n word = ''\n for s in line:\n if s==\"'\": # don't store apostrophe, so it's stored as its\n continue\n if s==':': # after first speaker identified\n is_dialogue = True\n continue\n if s=='[': #if open_brack is not null, string is not dialogue\n open_brack.append(s)\n continue\n if s==']': #remove open brack, if closed one found\n open_brack.pop()\n continue\n if is_dialogue and not open_brack: \n # if not inside bracket and some word to store\n if s == ' ': # if space\n if len(word)>0:\n line_arr.append(word)\n word = '' # reset word to blank\n elif re.match(\"[.,?;!\\\"]\", s):\n # start new word if character\n if len(word)>0:\n line_arr.append(word)\n line_arr.append(s)\n word = ''\n elif re.match(\"[A-Za-z\\-]\", s):\n # store if alpha character\n word = word+s\n return line_arr\n\n\n\ndef line2TrainVector(pairs, word_dict):\n # [TODO] convert each line into vectors\n # don't need to use target lens when not batched\n '''\n Input: Read pairs of lines:\n [[\"Hi\", \"Mr\", \".\", \"Seinfeld\", \"!\"],\n [\"Hey\", \",\", \"theres\", \"the\", \"old\",\"man\",\"!\"]\n\n word_dict is embedding hash formed with processDict() below\n Output: convert into fasttext embedding vectors (dim 300)\n above example returns \n matrix size 4 x 300 for input\n matrix size 7 x 300 for target\n '''\n input_v = createWordVector(pairs[0], word_dict)\n target_v = createWordVector(pairs[1], word_dict)\n return input_v, target_v\n\n\ndef createWordVector(word_array, word_dict):\n vect = []\n for word in word_array:\n # a hyphenated word may be tricky\n # if cannot find, then may need to split up\n # as 2 word\n if '-' in word and word not in word_dict:\n vect.extend(createHypenEmbed(word))\n continue\n # semi-colons not in fasttext\n if word == ';': word = '.'\n if word == '':\n continue\n if word in word_dict:\n vect.append(word_dict[word])\n else:\n print('NOT IN DICT')\n print(word)\n editted_word = editWord(word, word_dict)\n vect.append(editted_word)\n print(editted_word)\n print(word_array)\n return vect\n\n\ndef editWord(weird_word, word_dict):\n # edit weird string, remove extra letters\n # until word in dict\n last_s = ''\n weird_stack = []\n for i, s in enumerate(weird_word):\n ## create ([index, letter, num]) for each different letter\n if s!=last_s:\n weird_stack.append([i, s, 1])\n else:\n weird_stack[-1][2]+=1 # add 1 to the weird word\n last_s = s\n # sort the stack to find most common group of letters and index\n sorted_stack = sorted(weird_stack, key = lambda x: x[2])\n most_common = sorted_stack[-1]\n # remove most common letter in the weird word\n # i.e. in heeeeey, remove e\n common_idx = most_common[0]+most_common[2]\n weird_word = weird_word[:(common_idx-1)]+weird_word[common_idx:]\n if weird_word in word_dict:\n return weird_word\n else:\n weird_word = editWord(weird_word, word_dict)\n return weird_word\n\n\n\n\n\n\ndef createHypenEmbed(word):\n '''\n Handle outlier language with hyphen\n '''\n word_whole = re.sub('-', '', word)\n if word_whole in word_dict:\n return [word_dict[word_whole]]\n else:\n # [TODO] should the hyphenated word be\n # split into two words or kept as an\n # average embedding?\n # currently adding the two word into one vect\n subwords = word.split('-')\n word_vect = [word_dict[subwords[0]]]\n for w in subwords[1:]:\n word_vect.append(word_dict[w])\n return word_vect\n\n\n\nclass fastDict():\n\n def __init__(self, read_filename, method):\n # [TODO] allow dynamically init\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(\n os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n\n def processDict(self):\n # method = store or import\n # read pickle dictionary\n # if method = store, convert fastText data to pickle format first\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n\n\n def loadWordDict(self):\n pickle_reader = open(self.pickle_path, 'rb')\n word_vec = pk.load(pickle_reader)\n return word_vec\n\n def writeWordDict(self):\n all_words = self.getAllWords()\n self.createWordDict(all_words)\n\n\n def getAllWords(self):\n all_the_words = self.fast.get_words()\n return all_the_words\n\n def createWordDict(self, all_words):\n pickle_writer = open(self.pickle_path, 'wb')\n word_dict = {}\n for word in all_words:\n word_dict[word] = self.fast.get_word_vector(word)\n pk.dump(word_dict, pickle_writer)\n\n\n\nif __name__ == '__main__':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n method = 'import'\n fast = fastDict(read_filename, method)\n word_dict = fast.processDict()\n # [TODO] clean-up do not need to call these functions in main\n test_filename = '~/Documents/seinfeld/episodes/episode_TheSeinfeldChronicles_copy'\n pairs = createLinePairs(os.path.expanduser(test_filename))\n # [TODO] transfer this into \n # for pair in pairs: input, output = line2TrainVector(pair, word_dict)\n",
"<docstring token>\nimport fastText as ft\nimport pickle as pk\nimport os\nimport re\nimport pdb\n\n\ndef createLinePairs(corpus):\n \"\"\"\n Input: Read episode linse with format:\n ELAINE Hi Mr . Seinfeld !\n JERRY Hey , theres the old man !\n Output: convert those pairs into array\n [[\"Hi\", \"Mr\", \".\", \"Seinfeld\", \"!\"],\n [\"Hey\", \",\", \"theres\", \"the\", \"old\",\"man\",\"!\"]\n \"\"\"\n print('Reading lines...')\n with open(corpus) as f:\n content = f.readlines()\n print('CONTENT')\n print(content)\n lines = convert_lines_to_arrays(content)\n pairs = []\n for i, x in enumerate(lines[:-1]):\n if lines[i] and lines[i + 1]:\n pairs.append([lines[i], lines[i + 1]])\n return pairs\n\n\ndef convert_lines_to_arrays(content):\n \"\"\"\n convert each line in scene to an array of text\n formating when not relevant\n \"\"\"\n lines = []\n for x in content:\n line = x.strip()\n if len(line) > 0:\n if 'scene:' in x:\n lines.append([])\n else:\n line_arr = format_line(line)\n if line_arr:\n lines.append(line_arr)\n return lines\n\n\ndef format_line(line):\n \"\"\"\n format the line before storing as an array\n \"\"\"\n line = line.lower()\n line_arr = []\n open_brack = []\n is_dialogue = False\n word = ''\n for s in line:\n if s == \"'\":\n continue\n if s == ':':\n is_dialogue = True\n continue\n if s == '[':\n open_brack.append(s)\n continue\n if s == ']':\n open_brack.pop()\n continue\n if is_dialogue and not open_brack:\n if s == ' ':\n if len(word) > 0:\n line_arr.append(word)\n word = ''\n elif re.match('[.,?;!\"]', s):\n if len(word) > 0:\n line_arr.append(word)\n line_arr.append(s)\n word = ''\n elif re.match('[A-Za-z\\\\-]', s):\n word = word + s\n return line_arr\n\n\ndef line2TrainVector(pairs, word_dict):\n \"\"\"\n Input: Read pairs of lines:\n [[\"Hi\", \"Mr\", \".\", \"Seinfeld\", \"!\"],\n [\"Hey\", \",\", \"theres\", \"the\", \"old\",\"man\",\"!\"]\n\n word_dict is embedding hash formed with processDict() below\n Output: convert into fasttext embedding vectors (dim 300)\n above example returns \n matrix size 4 x 300 for input\n matrix size 7 x 300 for target\n \"\"\"\n input_v = createWordVector(pairs[0], word_dict)\n target_v = createWordVector(pairs[1], word_dict)\n return input_v, target_v\n\n\ndef createWordVector(word_array, word_dict):\n vect = []\n for word in word_array:\n if '-' in word and word not in word_dict:\n vect.extend(createHypenEmbed(word))\n continue\n if word == ';':\n word = '.'\n if word == '':\n continue\n if word in word_dict:\n vect.append(word_dict[word])\n else:\n print('NOT IN DICT')\n print(word)\n editted_word = editWord(word, word_dict)\n vect.append(editted_word)\n print(editted_word)\n print(word_array)\n return vect\n\n\ndef editWord(weird_word, word_dict):\n last_s = ''\n weird_stack = []\n for i, s in enumerate(weird_word):\n if s != last_s:\n weird_stack.append([i, s, 1])\n else:\n weird_stack[-1][2] += 1\n last_s = s\n sorted_stack = sorted(weird_stack, key=lambda x: x[2])\n most_common = sorted_stack[-1]\n common_idx = most_common[0] + most_common[2]\n weird_word = weird_word[:common_idx - 1] + weird_word[common_idx:]\n if weird_word in word_dict:\n return weird_word\n else:\n weird_word = editWord(weird_word, word_dict)\n return weird_word\n\n\ndef createHypenEmbed(word):\n \"\"\"\n Handle outlier language with hyphen\n \"\"\"\n word_whole = re.sub('-', '', word)\n if word_whole in word_dict:\n return [word_dict[word_whole]]\n else:\n subwords = word.split('-')\n word_vect = [word_dict[subwords[0]]]\n for w in subwords[1:]:\n word_vect.append(word_dict[w])\n return word_vect\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n def processDict(self):\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n\n def loadWordDict(self):\n pickle_reader = open(self.pickle_path, 'rb')\n word_vec = pk.load(pickle_reader)\n return word_vec\n\n def writeWordDict(self):\n all_words = self.getAllWords()\n self.createWordDict(all_words)\n\n def getAllWords(self):\n all_the_words = self.fast.get_words()\n return all_the_words\n\n def createWordDict(self, all_words):\n pickle_writer = open(self.pickle_path, 'wb')\n word_dict = {}\n for word in all_words:\n word_dict[word] = self.fast.get_word_vector(word)\n pk.dump(word_dict, pickle_writer)\n\n\nif __name__ == '__main__':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n method = 'import'\n fast = fastDict(read_filename, method)\n word_dict = fast.processDict()\n test_filename = (\n '~/Documents/seinfeld/episodes/episode_TheSeinfeldChronicles_copy')\n pairs = createLinePairs(os.path.expanduser(test_filename))\n",
"<docstring token>\n<import token>\n\n\ndef createLinePairs(corpus):\n \"\"\"\n Input: Read episode linse with format:\n ELAINE Hi Mr . Seinfeld !\n JERRY Hey , theres the old man !\n Output: convert those pairs into array\n [[\"Hi\", \"Mr\", \".\", \"Seinfeld\", \"!\"],\n [\"Hey\", \",\", \"theres\", \"the\", \"old\",\"man\",\"!\"]\n \"\"\"\n print('Reading lines...')\n with open(corpus) as f:\n content = f.readlines()\n print('CONTENT')\n print(content)\n lines = convert_lines_to_arrays(content)\n pairs = []\n for i, x in enumerate(lines[:-1]):\n if lines[i] and lines[i + 1]:\n pairs.append([lines[i], lines[i + 1]])\n return pairs\n\n\ndef convert_lines_to_arrays(content):\n \"\"\"\n convert each line in scene to an array of text\n formating when not relevant\n \"\"\"\n lines = []\n for x in content:\n line = x.strip()\n if len(line) > 0:\n if 'scene:' in x:\n lines.append([])\n else:\n line_arr = format_line(line)\n if line_arr:\n lines.append(line_arr)\n return lines\n\n\ndef format_line(line):\n \"\"\"\n format the line before storing as an array\n \"\"\"\n line = line.lower()\n line_arr = []\n open_brack = []\n is_dialogue = False\n word = ''\n for s in line:\n if s == \"'\":\n continue\n if s == ':':\n is_dialogue = True\n continue\n if s == '[':\n open_brack.append(s)\n continue\n if s == ']':\n open_brack.pop()\n continue\n if is_dialogue and not open_brack:\n if s == ' ':\n if len(word) > 0:\n line_arr.append(word)\n word = ''\n elif re.match('[.,?;!\"]', s):\n if len(word) > 0:\n line_arr.append(word)\n line_arr.append(s)\n word = ''\n elif re.match('[A-Za-z\\\\-]', s):\n word = word + s\n return line_arr\n\n\ndef line2TrainVector(pairs, word_dict):\n \"\"\"\n Input: Read pairs of lines:\n [[\"Hi\", \"Mr\", \".\", \"Seinfeld\", \"!\"],\n [\"Hey\", \",\", \"theres\", \"the\", \"old\",\"man\",\"!\"]\n\n word_dict is embedding hash formed with processDict() below\n Output: convert into fasttext embedding vectors (dim 300)\n above example returns \n matrix size 4 x 300 for input\n matrix size 7 x 300 for target\n \"\"\"\n input_v = createWordVector(pairs[0], word_dict)\n target_v = createWordVector(pairs[1], word_dict)\n return input_v, target_v\n\n\ndef createWordVector(word_array, word_dict):\n vect = []\n for word in word_array:\n if '-' in word and word not in word_dict:\n vect.extend(createHypenEmbed(word))\n continue\n if word == ';':\n word = '.'\n if word == '':\n continue\n if word in word_dict:\n vect.append(word_dict[word])\n else:\n print('NOT IN DICT')\n print(word)\n editted_word = editWord(word, word_dict)\n vect.append(editted_word)\n print(editted_word)\n print(word_array)\n return vect\n\n\ndef editWord(weird_word, word_dict):\n last_s = ''\n weird_stack = []\n for i, s in enumerate(weird_word):\n if s != last_s:\n weird_stack.append([i, s, 1])\n else:\n weird_stack[-1][2] += 1\n last_s = s\n sorted_stack = sorted(weird_stack, key=lambda x: x[2])\n most_common = sorted_stack[-1]\n common_idx = most_common[0] + most_common[2]\n weird_word = weird_word[:common_idx - 1] + weird_word[common_idx:]\n if weird_word in word_dict:\n return weird_word\n else:\n weird_word = editWord(weird_word, word_dict)\n return weird_word\n\n\ndef createHypenEmbed(word):\n \"\"\"\n Handle outlier language with hyphen\n \"\"\"\n word_whole = re.sub('-', '', word)\n if word_whole in word_dict:\n return [word_dict[word_whole]]\n else:\n subwords = word.split('-')\n word_vect = [word_dict[subwords[0]]]\n for w in subwords[1:]:\n word_vect.append(word_dict[w])\n return word_vect\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n def processDict(self):\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n\n def loadWordDict(self):\n pickle_reader = open(self.pickle_path, 'rb')\n word_vec = pk.load(pickle_reader)\n return word_vec\n\n def writeWordDict(self):\n all_words = self.getAllWords()\n self.createWordDict(all_words)\n\n def getAllWords(self):\n all_the_words = self.fast.get_words()\n return all_the_words\n\n def createWordDict(self, all_words):\n pickle_writer = open(self.pickle_path, 'wb')\n word_dict = {}\n for word in all_words:\n word_dict[word] = self.fast.get_word_vector(word)\n pk.dump(word_dict, pickle_writer)\n\n\nif __name__ == '__main__':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n method = 'import'\n fast = fastDict(read_filename, method)\n word_dict = fast.processDict()\n test_filename = (\n '~/Documents/seinfeld/episodes/episode_TheSeinfeldChronicles_copy')\n pairs = createLinePairs(os.path.expanduser(test_filename))\n",
"<docstring token>\n<import token>\n\n\ndef createLinePairs(corpus):\n \"\"\"\n Input: Read episode linse with format:\n ELAINE Hi Mr . Seinfeld !\n JERRY Hey , theres the old man !\n Output: convert those pairs into array\n [[\"Hi\", \"Mr\", \".\", \"Seinfeld\", \"!\"],\n [\"Hey\", \",\", \"theres\", \"the\", \"old\",\"man\",\"!\"]\n \"\"\"\n print('Reading lines...')\n with open(corpus) as f:\n content = f.readlines()\n print('CONTENT')\n print(content)\n lines = convert_lines_to_arrays(content)\n pairs = []\n for i, x in enumerate(lines[:-1]):\n if lines[i] and lines[i + 1]:\n pairs.append([lines[i], lines[i + 1]])\n return pairs\n\n\ndef convert_lines_to_arrays(content):\n \"\"\"\n convert each line in scene to an array of text\n formating when not relevant\n \"\"\"\n lines = []\n for x in content:\n line = x.strip()\n if len(line) > 0:\n if 'scene:' in x:\n lines.append([])\n else:\n line_arr = format_line(line)\n if line_arr:\n lines.append(line_arr)\n return lines\n\n\ndef format_line(line):\n \"\"\"\n format the line before storing as an array\n \"\"\"\n line = line.lower()\n line_arr = []\n open_brack = []\n is_dialogue = False\n word = ''\n for s in line:\n if s == \"'\":\n continue\n if s == ':':\n is_dialogue = True\n continue\n if s == '[':\n open_brack.append(s)\n continue\n if s == ']':\n open_brack.pop()\n continue\n if is_dialogue and not open_brack:\n if s == ' ':\n if len(word) > 0:\n line_arr.append(word)\n word = ''\n elif re.match('[.,?;!\"]', s):\n if len(word) > 0:\n line_arr.append(word)\n line_arr.append(s)\n word = ''\n elif re.match('[A-Za-z\\\\-]', s):\n word = word + s\n return line_arr\n\n\ndef line2TrainVector(pairs, word_dict):\n \"\"\"\n Input: Read pairs of lines:\n [[\"Hi\", \"Mr\", \".\", \"Seinfeld\", \"!\"],\n [\"Hey\", \",\", \"theres\", \"the\", \"old\",\"man\",\"!\"]\n\n word_dict is embedding hash formed with processDict() below\n Output: convert into fasttext embedding vectors (dim 300)\n above example returns \n matrix size 4 x 300 for input\n matrix size 7 x 300 for target\n \"\"\"\n input_v = createWordVector(pairs[0], word_dict)\n target_v = createWordVector(pairs[1], word_dict)\n return input_v, target_v\n\n\ndef createWordVector(word_array, word_dict):\n vect = []\n for word in word_array:\n if '-' in word and word not in word_dict:\n vect.extend(createHypenEmbed(word))\n continue\n if word == ';':\n word = '.'\n if word == '':\n continue\n if word in word_dict:\n vect.append(word_dict[word])\n else:\n print('NOT IN DICT')\n print(word)\n editted_word = editWord(word, word_dict)\n vect.append(editted_word)\n print(editted_word)\n print(word_array)\n return vect\n\n\ndef editWord(weird_word, word_dict):\n last_s = ''\n weird_stack = []\n for i, s in enumerate(weird_word):\n if s != last_s:\n weird_stack.append([i, s, 1])\n else:\n weird_stack[-1][2] += 1\n last_s = s\n sorted_stack = sorted(weird_stack, key=lambda x: x[2])\n most_common = sorted_stack[-1]\n common_idx = most_common[0] + most_common[2]\n weird_word = weird_word[:common_idx - 1] + weird_word[common_idx:]\n if weird_word in word_dict:\n return weird_word\n else:\n weird_word = editWord(weird_word, word_dict)\n return weird_word\n\n\ndef createHypenEmbed(word):\n \"\"\"\n Handle outlier language with hyphen\n \"\"\"\n word_whole = re.sub('-', '', word)\n if word_whole in word_dict:\n return [word_dict[word_whole]]\n else:\n subwords = word.split('-')\n word_vect = [word_dict[subwords[0]]]\n for w in subwords[1:]:\n word_vect.append(word_dict[w])\n return word_vect\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n def processDict(self):\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n\n def loadWordDict(self):\n pickle_reader = open(self.pickle_path, 'rb')\n word_vec = pk.load(pickle_reader)\n return word_vec\n\n def writeWordDict(self):\n all_words = self.getAllWords()\n self.createWordDict(all_words)\n\n def getAllWords(self):\n all_the_words = self.fast.get_words()\n return all_the_words\n\n def createWordDict(self, all_words):\n pickle_writer = open(self.pickle_path, 'wb')\n word_dict = {}\n for word in all_words:\n word_dict[word] = self.fast.get_word_vector(word)\n pk.dump(word_dict, pickle_writer)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef createLinePairs(corpus):\n \"\"\"\n Input: Read episode linse with format:\n ELAINE Hi Mr . Seinfeld !\n JERRY Hey , theres the old man !\n Output: convert those pairs into array\n [[\"Hi\", \"Mr\", \".\", \"Seinfeld\", \"!\"],\n [\"Hey\", \",\", \"theres\", \"the\", \"old\",\"man\",\"!\"]\n \"\"\"\n print('Reading lines...')\n with open(corpus) as f:\n content = f.readlines()\n print('CONTENT')\n print(content)\n lines = convert_lines_to_arrays(content)\n pairs = []\n for i, x in enumerate(lines[:-1]):\n if lines[i] and lines[i + 1]:\n pairs.append([lines[i], lines[i + 1]])\n return pairs\n\n\ndef convert_lines_to_arrays(content):\n \"\"\"\n convert each line in scene to an array of text\n formating when not relevant\n \"\"\"\n lines = []\n for x in content:\n line = x.strip()\n if len(line) > 0:\n if 'scene:' in x:\n lines.append([])\n else:\n line_arr = format_line(line)\n if line_arr:\n lines.append(line_arr)\n return lines\n\n\ndef format_line(line):\n \"\"\"\n format the line before storing as an array\n \"\"\"\n line = line.lower()\n line_arr = []\n open_brack = []\n is_dialogue = False\n word = ''\n for s in line:\n if s == \"'\":\n continue\n if s == ':':\n is_dialogue = True\n continue\n if s == '[':\n open_brack.append(s)\n continue\n if s == ']':\n open_brack.pop()\n continue\n if is_dialogue and not open_brack:\n if s == ' ':\n if len(word) > 0:\n line_arr.append(word)\n word = ''\n elif re.match('[.,?;!\"]', s):\n if len(word) > 0:\n line_arr.append(word)\n line_arr.append(s)\n word = ''\n elif re.match('[A-Za-z\\\\-]', s):\n word = word + s\n return line_arr\n\n\n<function token>\n\n\ndef createWordVector(word_array, word_dict):\n vect = []\n for word in word_array:\n if '-' in word and word not in word_dict:\n vect.extend(createHypenEmbed(word))\n continue\n if word == ';':\n word = '.'\n if word == '':\n continue\n if word in word_dict:\n vect.append(word_dict[word])\n else:\n print('NOT IN DICT')\n print(word)\n editted_word = editWord(word, word_dict)\n vect.append(editted_word)\n print(editted_word)\n print(word_array)\n return vect\n\n\ndef editWord(weird_word, word_dict):\n last_s = ''\n weird_stack = []\n for i, s in enumerate(weird_word):\n if s != last_s:\n weird_stack.append([i, s, 1])\n else:\n weird_stack[-1][2] += 1\n last_s = s\n sorted_stack = sorted(weird_stack, key=lambda x: x[2])\n most_common = sorted_stack[-1]\n common_idx = most_common[0] + most_common[2]\n weird_word = weird_word[:common_idx - 1] + weird_word[common_idx:]\n if weird_word in word_dict:\n return weird_word\n else:\n weird_word = editWord(weird_word, word_dict)\n return weird_word\n\n\ndef createHypenEmbed(word):\n \"\"\"\n Handle outlier language with hyphen\n \"\"\"\n word_whole = re.sub('-', '', word)\n if word_whole in word_dict:\n return [word_dict[word_whole]]\n else:\n subwords = word.split('-')\n word_vect = [word_dict[subwords[0]]]\n for w in subwords[1:]:\n word_vect.append(word_dict[w])\n return word_vect\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n def processDict(self):\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n\n def loadWordDict(self):\n pickle_reader = open(self.pickle_path, 'rb')\n word_vec = pk.load(pickle_reader)\n return word_vec\n\n def writeWordDict(self):\n all_words = self.getAllWords()\n self.createWordDict(all_words)\n\n def getAllWords(self):\n all_the_words = self.fast.get_words()\n return all_the_words\n\n def createWordDict(self, all_words):\n pickle_writer = open(self.pickle_path, 'wb')\n word_dict = {}\n for word in all_words:\n word_dict[word] = self.fast.get_word_vector(word)\n pk.dump(word_dict, pickle_writer)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef createLinePairs(corpus):\n \"\"\"\n Input: Read episode linse with format:\n ELAINE Hi Mr . Seinfeld !\n JERRY Hey , theres the old man !\n Output: convert those pairs into array\n [[\"Hi\", \"Mr\", \".\", \"Seinfeld\", \"!\"],\n [\"Hey\", \",\", \"theres\", \"the\", \"old\",\"man\",\"!\"]\n \"\"\"\n print('Reading lines...')\n with open(corpus) as f:\n content = f.readlines()\n print('CONTENT')\n print(content)\n lines = convert_lines_to_arrays(content)\n pairs = []\n for i, x in enumerate(lines[:-1]):\n if lines[i] and lines[i + 1]:\n pairs.append([lines[i], lines[i + 1]])\n return pairs\n\n\ndef convert_lines_to_arrays(content):\n \"\"\"\n convert each line in scene to an array of text\n formating when not relevant\n \"\"\"\n lines = []\n for x in content:\n line = x.strip()\n if len(line) > 0:\n if 'scene:' in x:\n lines.append([])\n else:\n line_arr = format_line(line)\n if line_arr:\n lines.append(line_arr)\n return lines\n\n\ndef format_line(line):\n \"\"\"\n format the line before storing as an array\n \"\"\"\n line = line.lower()\n line_arr = []\n open_brack = []\n is_dialogue = False\n word = ''\n for s in line:\n if s == \"'\":\n continue\n if s == ':':\n is_dialogue = True\n continue\n if s == '[':\n open_brack.append(s)\n continue\n if s == ']':\n open_brack.pop()\n continue\n if is_dialogue and not open_brack:\n if s == ' ':\n if len(word) > 0:\n line_arr.append(word)\n word = ''\n elif re.match('[.,?;!\"]', s):\n if len(word) > 0:\n line_arr.append(word)\n line_arr.append(s)\n word = ''\n elif re.match('[A-Za-z\\\\-]', s):\n word = word + s\n return line_arr\n\n\n<function token>\n\n\ndef createWordVector(word_array, word_dict):\n vect = []\n for word in word_array:\n if '-' in word and word not in word_dict:\n vect.extend(createHypenEmbed(word))\n continue\n if word == ';':\n word = '.'\n if word == '':\n continue\n if word in word_dict:\n vect.append(word_dict[word])\n else:\n print('NOT IN DICT')\n print(word)\n editted_word = editWord(word, word_dict)\n vect.append(editted_word)\n print(editted_word)\n print(word_array)\n return vect\n\n\n<function token>\n\n\ndef createHypenEmbed(word):\n \"\"\"\n Handle outlier language with hyphen\n \"\"\"\n word_whole = re.sub('-', '', word)\n if word_whole in word_dict:\n return [word_dict[word_whole]]\n else:\n subwords = word.split('-')\n word_vect = [word_dict[subwords[0]]]\n for w in subwords[1:]:\n word_vect.append(word_dict[w])\n return word_vect\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n def processDict(self):\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n\n def loadWordDict(self):\n pickle_reader = open(self.pickle_path, 'rb')\n word_vec = pk.load(pickle_reader)\n return word_vec\n\n def writeWordDict(self):\n all_words = self.getAllWords()\n self.createWordDict(all_words)\n\n def getAllWords(self):\n all_the_words = self.fast.get_words()\n return all_the_words\n\n def createWordDict(self, all_words):\n pickle_writer = open(self.pickle_path, 'wb')\n word_dict = {}\n for word in all_words:\n word_dict[word] = self.fast.get_word_vector(word)\n pk.dump(word_dict, pickle_writer)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef createLinePairs(corpus):\n \"\"\"\n Input: Read episode linse with format:\n ELAINE Hi Mr . Seinfeld !\n JERRY Hey , theres the old man !\n Output: convert those pairs into array\n [[\"Hi\", \"Mr\", \".\", \"Seinfeld\", \"!\"],\n [\"Hey\", \",\", \"theres\", \"the\", \"old\",\"man\",\"!\"]\n \"\"\"\n print('Reading lines...')\n with open(corpus) as f:\n content = f.readlines()\n print('CONTENT')\n print(content)\n lines = convert_lines_to_arrays(content)\n pairs = []\n for i, x in enumerate(lines[:-1]):\n if lines[i] and lines[i + 1]:\n pairs.append([lines[i], lines[i + 1]])\n return pairs\n\n\ndef convert_lines_to_arrays(content):\n \"\"\"\n convert each line in scene to an array of text\n formating when not relevant\n \"\"\"\n lines = []\n for x in content:\n line = x.strip()\n if len(line) > 0:\n if 'scene:' in x:\n lines.append([])\n else:\n line_arr = format_line(line)\n if line_arr:\n lines.append(line_arr)\n return lines\n\n\n<function token>\n<function token>\n\n\ndef createWordVector(word_array, word_dict):\n vect = []\n for word in word_array:\n if '-' in word and word not in word_dict:\n vect.extend(createHypenEmbed(word))\n continue\n if word == ';':\n word = '.'\n if word == '':\n continue\n if word in word_dict:\n vect.append(word_dict[word])\n else:\n print('NOT IN DICT')\n print(word)\n editted_word = editWord(word, word_dict)\n vect.append(editted_word)\n print(editted_word)\n print(word_array)\n return vect\n\n\n<function token>\n\n\ndef createHypenEmbed(word):\n \"\"\"\n Handle outlier language with hyphen\n \"\"\"\n word_whole = re.sub('-', '', word)\n if word_whole in word_dict:\n return [word_dict[word_whole]]\n else:\n subwords = word.split('-')\n word_vect = [word_dict[subwords[0]]]\n for w in subwords[1:]:\n word_vect.append(word_dict[w])\n return word_vect\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n def processDict(self):\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n\n def loadWordDict(self):\n pickle_reader = open(self.pickle_path, 'rb')\n word_vec = pk.load(pickle_reader)\n return word_vec\n\n def writeWordDict(self):\n all_words = self.getAllWords()\n self.createWordDict(all_words)\n\n def getAllWords(self):\n all_the_words = self.fast.get_words()\n return all_the_words\n\n def createWordDict(self, all_words):\n pickle_writer = open(self.pickle_path, 'wb')\n word_dict = {}\n for word in all_words:\n word_dict[word] = self.fast.get_word_vector(word)\n pk.dump(word_dict, pickle_writer)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef createLinePairs(corpus):\n \"\"\"\n Input: Read episode linse with format:\n ELAINE Hi Mr . Seinfeld !\n JERRY Hey , theres the old man !\n Output: convert those pairs into array\n [[\"Hi\", \"Mr\", \".\", \"Seinfeld\", \"!\"],\n [\"Hey\", \",\", \"theres\", \"the\", \"old\",\"man\",\"!\"]\n \"\"\"\n print('Reading lines...')\n with open(corpus) as f:\n content = f.readlines()\n print('CONTENT')\n print(content)\n lines = convert_lines_to_arrays(content)\n pairs = []\n for i, x in enumerate(lines[:-1]):\n if lines[i] and lines[i + 1]:\n pairs.append([lines[i], lines[i + 1]])\n return pairs\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef createWordVector(word_array, word_dict):\n vect = []\n for word in word_array:\n if '-' in word and word not in word_dict:\n vect.extend(createHypenEmbed(word))\n continue\n if word == ';':\n word = '.'\n if word == '':\n continue\n if word in word_dict:\n vect.append(word_dict[word])\n else:\n print('NOT IN DICT')\n print(word)\n editted_word = editWord(word, word_dict)\n vect.append(editted_word)\n print(editted_word)\n print(word_array)\n return vect\n\n\n<function token>\n\n\ndef createHypenEmbed(word):\n \"\"\"\n Handle outlier language with hyphen\n \"\"\"\n word_whole = re.sub('-', '', word)\n if word_whole in word_dict:\n return [word_dict[word_whole]]\n else:\n subwords = word.split('-')\n word_vect = [word_dict[subwords[0]]]\n for w in subwords[1:]:\n word_vect.append(word_dict[w])\n return word_vect\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n def processDict(self):\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n\n def loadWordDict(self):\n pickle_reader = open(self.pickle_path, 'rb')\n word_vec = pk.load(pickle_reader)\n return word_vec\n\n def writeWordDict(self):\n all_words = self.getAllWords()\n self.createWordDict(all_words)\n\n def getAllWords(self):\n all_the_words = self.fast.get_words()\n return all_the_words\n\n def createWordDict(self, all_words):\n pickle_writer = open(self.pickle_path, 'wb')\n word_dict = {}\n for word in all_words:\n word_dict[word] = self.fast.get_word_vector(word)\n pk.dump(word_dict, pickle_writer)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef createLinePairs(corpus):\n \"\"\"\n Input: Read episode linse with format:\n ELAINE Hi Mr . Seinfeld !\n JERRY Hey , theres the old man !\n Output: convert those pairs into array\n [[\"Hi\", \"Mr\", \".\", \"Seinfeld\", \"!\"],\n [\"Hey\", \",\", \"theres\", \"the\", \"old\",\"man\",\"!\"]\n \"\"\"\n print('Reading lines...')\n with open(corpus) as f:\n content = f.readlines()\n print('CONTENT')\n print(content)\n lines = convert_lines_to_arrays(content)\n pairs = []\n for i, x in enumerate(lines[:-1]):\n if lines[i] and lines[i + 1]:\n pairs.append([lines[i], lines[i + 1]])\n return pairs\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef createWordVector(word_array, word_dict):\n vect = []\n for word in word_array:\n if '-' in word and word not in word_dict:\n vect.extend(createHypenEmbed(word))\n continue\n if word == ';':\n word = '.'\n if word == '':\n continue\n if word in word_dict:\n vect.append(word_dict[word])\n else:\n print('NOT IN DICT')\n print(word)\n editted_word = editWord(word, word_dict)\n vect.append(editted_word)\n print(editted_word)\n print(word_array)\n return vect\n\n\n<function token>\n<function token>\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n def processDict(self):\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n\n def loadWordDict(self):\n pickle_reader = open(self.pickle_path, 'rb')\n word_vec = pk.load(pickle_reader)\n return word_vec\n\n def writeWordDict(self):\n all_words = self.getAllWords()\n self.createWordDict(all_words)\n\n def getAllWords(self):\n all_the_words = self.fast.get_words()\n return all_the_words\n\n def createWordDict(self, all_words):\n pickle_writer = open(self.pickle_path, 'wb')\n word_dict = {}\n for word in all_words:\n word_dict[word] = self.fast.get_word_vector(word)\n pk.dump(word_dict, pickle_writer)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef createLinePairs(corpus):\n \"\"\"\n Input: Read episode linse with format:\n ELAINE Hi Mr . Seinfeld !\n JERRY Hey , theres the old man !\n Output: convert those pairs into array\n [[\"Hi\", \"Mr\", \".\", \"Seinfeld\", \"!\"],\n [\"Hey\", \",\", \"theres\", \"the\", \"old\",\"man\",\"!\"]\n \"\"\"\n print('Reading lines...')\n with open(corpus) as f:\n content = f.readlines()\n print('CONTENT')\n print(content)\n lines = convert_lines_to_arrays(content)\n pairs = []\n for i, x in enumerate(lines[:-1]):\n if lines[i] and lines[i + 1]:\n pairs.append([lines[i], lines[i + 1]])\n return pairs\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n def processDict(self):\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n\n def loadWordDict(self):\n pickle_reader = open(self.pickle_path, 'rb')\n word_vec = pk.load(pickle_reader)\n return word_vec\n\n def writeWordDict(self):\n all_words = self.getAllWords()\n self.createWordDict(all_words)\n\n def getAllWords(self):\n all_the_words = self.fast.get_words()\n return all_the_words\n\n def createWordDict(self, all_words):\n pickle_writer = open(self.pickle_path, 'wb')\n word_dict = {}\n for word in all_words:\n word_dict[word] = self.fast.get_word_vector(word)\n pk.dump(word_dict, pickle_writer)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n def processDict(self):\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n\n def loadWordDict(self):\n pickle_reader = open(self.pickle_path, 'rb')\n word_vec = pk.load(pickle_reader)\n return word_vec\n\n def writeWordDict(self):\n all_words = self.getAllWords()\n self.createWordDict(all_words)\n\n def getAllWords(self):\n all_the_words = self.fast.get_words()\n return all_the_words\n\n def createWordDict(self, all_words):\n pickle_writer = open(self.pickle_path, 'wb')\n word_dict = {}\n for word in all_words:\n word_dict[word] = self.fast.get_word_vector(word)\n pk.dump(word_dict, pickle_writer)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n def processDict(self):\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n\n def loadWordDict(self):\n pickle_reader = open(self.pickle_path, 'rb')\n word_vec = pk.load(pickle_reader)\n return word_vec\n <function token>\n\n def getAllWords(self):\n all_the_words = self.fast.get_words()\n return all_the_words\n\n def createWordDict(self, all_words):\n pickle_writer = open(self.pickle_path, 'wb')\n word_dict = {}\n for word in all_words:\n word_dict[word] = self.fast.get_word_vector(word)\n pk.dump(word_dict, pickle_writer)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n def processDict(self):\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n\n def loadWordDict(self):\n pickle_reader = open(self.pickle_path, 'rb')\n word_vec = pk.load(pickle_reader)\n return word_vec\n <function token>\n\n def getAllWords(self):\n all_the_words = self.fast.get_words()\n return all_the_words\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n def processDict(self):\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n <function token>\n <function token>\n\n def getAllWords(self):\n all_the_words = self.fast.get_words()\n return all_the_words\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n\n def processDict(self):\n if self.method == 'store':\n self.writeWordDict()\n return self.loadWordDict()\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass fastDict:\n\n def __init__(self, read_filename, method):\n self.method = method\n print(method)\n if method == 'store':\n read_filename = '~/FastData/wiki.en/wiki.en.bin'\n print(read_filename)\n self.fast = ft.load_model(os.path.expanduser(read_filename))\n pickle_filename = '~/FastData/wiki.en/wiki.en.pkl'\n self.pickle_path = os.path.expanduser(pickle_filename)\n print(pickle_filename)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass fastDict:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n<code token>\n"
] | false |
1,079 |
618aa64c08ebf8d9a0bc9662195ece2bbd485c17
|
dic = {}
try:
print(dic[55])
except Exception as err:
print('Mensagem: ',err)
|
[
"dic = {}\n\ntry:\n print(dic[55])\nexcept Exception as err:\n print('Mensagem: ',err)\n",
"dic = {}\ntry:\n print(dic[55])\nexcept Exception as err:\n print('Mensagem: ', err)\n",
"<assignment token>\ntry:\n print(dic[55])\nexcept Exception as err:\n print('Mensagem: ', err)\n",
"<assignment token>\n<code token>\n"
] | false |
1,080 |
8f5d9918260e2f50fb229a7067f820a186101b99
|
import numpy as np
from scipy import stats
from scipy import interpolate
from math import factorial
from scipy import signal
"""
A continuous wavelet transform based peak finder. Tested exclusively on Raman spectra, however,
it should work for most datasets.
Parameters
----------
lowerBound: The lowest value of the scale factor to use in the wavelet transform
upperBound: The highest value of the scale factor to use in the wavelet transform
steps: The number of scale factors we want between the highest and lowest bounds
rowWindow: The maximum number of rows that a ridge line can be discontinuous before it is
terminated. I.e. the maximum number of scale factors it can deviate.
colWindow: The maximum number of columns that a ridge line can wander before it is terminated.
I.e. the maximum number of wavenumbers (or a similar X value) that the ridge line can deviate.
"""
# CWT Transform parameters
lowerBound = 1
upperBound = 70
steps = 90
# Ridge line filtering parameters
rowWindow = 2
columnWindow = 5
class _spectra:
def __init__(self,x,y):
self.x = x
self.y = y
def x(self):
return waveNumbers
def y(self):
return intensities
"""
Simple helper function for finding all of the maxima in the 2D array returned by the wavelet
transform. Works on the basis of a simple comparison between neighbouring elements. These
values form the initial basis for the ridge lines.
"""
def _findMaxima1D(CWTArray):
maximas = np.zeros(CWTArray.size,dtype=(float,3))
# Populate the maxima array with a tuple of the coordinates and the values of the maxima
count = 0
for j,row in enumerate(CWTArray):
for i,element in enumerate(row):
try:
if element > row[i-1] and element > row[i+1]:
maximas[count]= ((steps-j,i,element))
count += 1
except IndexError:
pass
return np.vstack(maximas[:count])
"""
Filter the ridge lines found from the maxima of the CWT coefficient array based on a set
parameters, namely the maximum deviations in wavenumber and scale space. Any lines which are
found from this criteria are considered to be peaks and further evaluated in the following
steps.
"""
def _filterRidgeLines(maximaArray,rowMax,colMax):
# Helper to prevent duplicating ridge lines
def checkValues(value, ridgeLines):
for lines in ridgeLines:
for points in lines:
if value in points:
return True
return False
ridgeLines = []
# Maxima array is a n row, 1 column array containing tuples of (scaleFactor, column)
for i,row in enumerate(maximaArray):
ridge = [] # For each maxima start a ridge line
colPos = row[1] # Get the column position of the current maxima
rowPos = row[0] # Get the row position of the current maxima
# If this value is already part of another ridge line, move to the next value
if checkValues(colPos, ridgeLines):
continue
for j, nextRows in enumerate(maximaArray[i:,:]): # Look through the subsequent maxima
if nextRows[0] == rowPos: # If the scale factors are the same, skip
continue
if np.abs(colPos - nextRows[1]) <= colMax and \
np.abs(rowPos - nextRows[0]) <= rowMax:
ridge.append((rowPos,colPos,nextRows[2]))
rowPos = nextRows[0]
colPos = nextRows[1]
# If the ridge lines run all the way to the lowest scale factors, add them to the list
if len(ridge) != 0:
if ridge[-1][0] <= 2:
ridgeLines.append(ridge)
return ridgeLines
"""
For each of the ridge lines found from the filtered CWT array, determine the other
characteristics of the peaks.
The position of the peak is determined from the position of the maxima in the ridge
line.
"""
def getPeakInfo(ridgeLines,data,waveletCoeff):
# For each of the ridge lines we have found, locate the positions of the maxima. These
# correspond to the peak centers.
peakInfo = np.zeros(len(ridgeLines),dtype=[('position','int32'),('scale','int32'),\
('cwtCoeff','f'),('SNR','f'),('length','uint8'),\
('intensity','f'),('wavenumber','f')])
# For each of the ridge lines, add the position of the peak center and the length of the
# line. These are useful for filtering peaks later.
for i,lines in enumerate(ridgeLines):
# Find the index of the maximum CWT coefficient. This is the peak center.
maximum = np.argmax(zip(*lines)[2])
peakInfo[i] = lines[maximum][1],lines[maximum][0],lines[maximum][2],0,len(lines),\
data.x[lines[maximum][1]],data.y[lines[maximum][1]]
# Calculate the local SNR of each peak within a window of 30 pixels of the peak. The SNR is
# defined as the 95th quantile of the absolute values of the lowest scale factor coefficients.
for i, peaks in enumerate(peakInfo):
SNR = np.abs(waveletCoeff[-1,peaks[0]-15:peaks[0]+15])
if len(SNR) == 0:
peakInfo['SNR'][i] = 0
else:
SNR = stats.scoreatpercentile(SNR, 95)
peakInfo['SNR'][i] = SNR
return peakInfo
"""
Processes spectral data and returns a structured array of peak information. Peak can then be
filtered based on ridge line length, signal to noise ratio and scale values.
"""
def getPeaks(waveNumbers,intensities):
data = _spectra(waveNumbers,intensities)
# Take the CWT of the spectra. Trim the result to remove padding.
waveletCoeff = signal.cwt(intensities, signal.ricker, \
np.linspace(lowerBound,upperBound,steps))
# Flip the matrix so the highest wavelet coefficient is the top row
waveletCoeff = np.flipud(waveletCoeff)
# Find the ridge lines connecting the maxima in the wavelet coefficient array. Filter ridge lines
# takes a (scaleFactor,3) array of positions and values of maxima.
ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),columnWindow,rowWindow)
# Populate a structured array with peak information
peakInfo = getPeakInfo(ridgeLines,data,waveletCoeff)
return peakInfo
|
[
"import numpy as np\nfrom scipy import stats\nfrom scipy import interpolate\nfrom math import factorial\nfrom scipy import signal\n\n\"\"\"\n\nA continuous wavelet transform based peak finder. Tested exclusively on Raman spectra, however,\nit should work for most datasets.\n\nParameters\n----------\n\nlowerBound: The lowest value of the scale factor to use in the wavelet transform\nupperBound: The highest value of the scale factor to use in the wavelet transform\nsteps: The number of scale factors we want between the highest and lowest bounds\n\nrowWindow: The maximum number of rows that a ridge line can be discontinuous before it is\nterminated. I.e. the maximum number of scale factors it can deviate.\n\ncolWindow: The maximum number of columns that a ridge line can wander before it is terminated.\nI.e. the maximum number of wavenumbers (or a similar X value) that the ridge line can deviate.\n\n\"\"\"\n\n# CWT Transform parameters\nlowerBound = 1\nupperBound = 70\nsteps = 90\n\n# Ridge line filtering parameters\nrowWindow = 2\ncolumnWindow = 5\n\nclass _spectra:\n def __init__(self,x,y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\"\"\"\n\nSimple helper function for finding all of the maxima in the 2D array returned by the wavelet\ntransform. Works on the basis of a simple comparison between neighbouring elements. These\nvalues form the initial basis for the ridge lines.\n\n\"\"\"\ndef _findMaxima1D(CWTArray):\n\n maximas = np.zeros(CWTArray.size,dtype=(float,3))\n\n # Populate the maxima array with a tuple of the coordinates and the values of the maxima\n count = 0\n for j,row in enumerate(CWTArray):\n for i,element in enumerate(row):\n try:\n if element > row[i-1] and element > row[i+1]:\n maximas[count]= ((steps-j,i,element))\n count += 1\n except IndexError:\n pass\n\n return np.vstack(maximas[:count])\n\n\"\"\"\n\nFilter the ridge lines found from the maxima of the CWT coefficient array based on a set\nparameters, namely the maximum deviations in wavenumber and scale space. Any lines which are\nfound from this criteria are considered to be peaks and further evaluated in the following\nsteps.\n\n\"\"\"\ndef _filterRidgeLines(maximaArray,rowMax,colMax):\n\n # Helper to prevent duplicating ridge lines\n def checkValues(value, ridgeLines):\n\n for lines in ridgeLines:\n for points in lines:\n if value in points:\n return True\n return False\n\n ridgeLines = []\n\n # Maxima array is a n row, 1 column array containing tuples of (scaleFactor, column)\n for i,row in enumerate(maximaArray):\n ridge = [] # For each maxima start a ridge line\n colPos = row[1] # Get the column position of the current maxima\n rowPos = row[0] # Get the row position of the current maxima\n # If this value is already part of another ridge line, move to the next value\n if checkValues(colPos, ridgeLines):\n continue\n for j, nextRows in enumerate(maximaArray[i:,:]): # Look through the subsequent maxima\n if nextRows[0] == rowPos: # If the scale factors are the same, skip\n continue\n if np.abs(colPos - nextRows[1]) <= colMax and \\\n np.abs(rowPos - nextRows[0]) <= rowMax:\n ridge.append((rowPos,colPos,nextRows[2]))\n rowPos = nextRows[0]\n colPos = nextRows[1]\n\n # If the ridge lines run all the way to the lowest scale factors, add them to the list\n if len(ridge) != 0:\n if ridge[-1][0] <= 2:\n ridgeLines.append(ridge)\n\n return ridgeLines\n\n\"\"\"\n\nFor each of the ridge lines found from the filtered CWT array, determine the other\ncharacteristics of the peaks.\n\nThe position of the peak is determined from the position of the maxima in the ridge\nline.\n\n\"\"\"\ndef getPeakInfo(ridgeLines,data,waveletCoeff):\n\n # For each of the ridge lines we have found, locate the positions of the maxima. These\n # correspond to the peak centers.\n peakInfo = np.zeros(len(ridgeLines),dtype=[('position','int32'),('scale','int32'),\\\n ('cwtCoeff','f'),('SNR','f'),('length','uint8'),\\\n ('intensity','f'),('wavenumber','f')])\n\n # For each of the ridge lines, add the position of the peak center and the length of the\n # line. These are useful for filtering peaks later.\n for i,lines in enumerate(ridgeLines):\n # Find the index of the maximum CWT coefficient. This is the peak center.\n maximum = np.argmax(zip(*lines)[2])\n peakInfo[i] = lines[maximum][1],lines[maximum][0],lines[maximum][2],0,len(lines),\\\n data.x[lines[maximum][1]],data.y[lines[maximum][1]]\n\n # Calculate the local SNR of each peak within a window of 30 pixels of the peak. The SNR is\n # defined as the 95th quantile of the absolute values of the lowest scale factor coefficients.\n for i, peaks in enumerate(peakInfo):\n SNR = np.abs(waveletCoeff[-1,peaks[0]-15:peaks[0]+15])\n if len(SNR) == 0:\n peakInfo['SNR'][i] = 0\n else:\n SNR = stats.scoreatpercentile(SNR, 95)\n peakInfo['SNR'][i] = SNR\n\n return peakInfo\n\n\"\"\"\n\nProcesses spectral data and returns a structured array of peak information. Peak can then be\nfiltered based on ridge line length, signal to noise ratio and scale values.\n\n\"\"\"\ndef getPeaks(waveNumbers,intensities):\n\n data = _spectra(waveNumbers,intensities)\n\n # Take the CWT of the spectra. Trim the result to remove padding.\n waveletCoeff = signal.cwt(intensities, signal.ricker, \\\n np.linspace(lowerBound,upperBound,steps))\n\n # Flip the matrix so the highest wavelet coefficient is the top row\n waveletCoeff = np.flipud(waveletCoeff)\n\n # Find the ridge lines connecting the maxima in the wavelet coefficient array. Filter ridge lines\n # takes a (scaleFactor,3) array of positions and values of maxima.\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),columnWindow,rowWindow)\n\n # Populate a structured array with peak information\n peakInfo = getPeakInfo(ridgeLines,data,waveletCoeff)\n\n return peakInfo\n",
"import numpy as np\nfrom scipy import stats\nfrom scipy import interpolate\nfrom math import factorial\nfrom scipy import signal\n<docstring token>\nlowerBound = 1\nupperBound = 70\nsteps = 90\nrowWindow = 2\ncolumnWindow = 5\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<docstring token>\n\n\ndef _findMaxima1D(CWTArray):\n maximas = np.zeros(CWTArray.size, dtype=(float, 3))\n count = 0\n for j, row in enumerate(CWTArray):\n for i, element in enumerate(row):\n try:\n if element > row[i - 1] and element > row[i + 1]:\n maximas[count] = steps - j, i, element\n count += 1\n except IndexError:\n pass\n return np.vstack(maximas[:count])\n\n\n<docstring token>\n\n\ndef _filterRidgeLines(maximaArray, rowMax, colMax):\n\n def checkValues(value, ridgeLines):\n for lines in ridgeLines:\n for points in lines:\n if value in points:\n return True\n return False\n ridgeLines = []\n for i, row in enumerate(maximaArray):\n ridge = []\n colPos = row[1]\n rowPos = row[0]\n if checkValues(colPos, ridgeLines):\n continue\n for j, nextRows in enumerate(maximaArray[i:, :]):\n if nextRows[0] == rowPos:\n continue\n if np.abs(colPos - nextRows[1]) <= colMax and np.abs(rowPos -\n nextRows[0]) <= rowMax:\n ridge.append((rowPos, colPos, nextRows[2]))\n rowPos = nextRows[0]\n colPos = nextRows[1]\n if len(ridge) != 0:\n if ridge[-1][0] <= 2:\n ridgeLines.append(ridge)\n return ridgeLines\n\n\n<docstring token>\n\n\ndef getPeakInfo(ridgeLines, data, waveletCoeff):\n peakInfo = np.zeros(len(ridgeLines), dtype=[('position', 'int32'), (\n 'scale', 'int32'), ('cwtCoeff', 'f'), ('SNR', 'f'), ('length',\n 'uint8'), ('intensity', 'f'), ('wavenumber', 'f')])\n for i, lines in enumerate(ridgeLines):\n maximum = np.argmax(zip(*lines)[2])\n peakInfo[i] = lines[maximum][1], lines[maximum][0], lines[maximum][2\n ], 0, len(lines), data.x[lines[maximum][1]], data.y[lines[\n maximum][1]]\n for i, peaks in enumerate(peakInfo):\n SNR = np.abs(waveletCoeff[-1, peaks[0] - 15:peaks[0] + 15])\n if len(SNR) == 0:\n peakInfo['SNR'][i] = 0\n else:\n SNR = stats.scoreatpercentile(SNR, 95)\n peakInfo['SNR'][i] = SNR\n return peakInfo\n\n\n<docstring token>\n\n\ndef getPeaks(waveNumbers, intensities):\n data = _spectra(waveNumbers, intensities)\n waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(\n lowerBound, upperBound, steps))\n waveletCoeff = np.flipud(waveletCoeff)\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),\n columnWindow, rowWindow)\n peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)\n return peakInfo\n",
"<import token>\n<docstring token>\nlowerBound = 1\nupperBound = 70\nsteps = 90\nrowWindow = 2\ncolumnWindow = 5\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<docstring token>\n\n\ndef _findMaxima1D(CWTArray):\n maximas = np.zeros(CWTArray.size, dtype=(float, 3))\n count = 0\n for j, row in enumerate(CWTArray):\n for i, element in enumerate(row):\n try:\n if element > row[i - 1] and element > row[i + 1]:\n maximas[count] = steps - j, i, element\n count += 1\n except IndexError:\n pass\n return np.vstack(maximas[:count])\n\n\n<docstring token>\n\n\ndef _filterRidgeLines(maximaArray, rowMax, colMax):\n\n def checkValues(value, ridgeLines):\n for lines in ridgeLines:\n for points in lines:\n if value in points:\n return True\n return False\n ridgeLines = []\n for i, row in enumerate(maximaArray):\n ridge = []\n colPos = row[1]\n rowPos = row[0]\n if checkValues(colPos, ridgeLines):\n continue\n for j, nextRows in enumerate(maximaArray[i:, :]):\n if nextRows[0] == rowPos:\n continue\n if np.abs(colPos - nextRows[1]) <= colMax and np.abs(rowPos -\n nextRows[0]) <= rowMax:\n ridge.append((rowPos, colPos, nextRows[2]))\n rowPos = nextRows[0]\n colPos = nextRows[1]\n if len(ridge) != 0:\n if ridge[-1][0] <= 2:\n ridgeLines.append(ridge)\n return ridgeLines\n\n\n<docstring token>\n\n\ndef getPeakInfo(ridgeLines, data, waveletCoeff):\n peakInfo = np.zeros(len(ridgeLines), dtype=[('position', 'int32'), (\n 'scale', 'int32'), ('cwtCoeff', 'f'), ('SNR', 'f'), ('length',\n 'uint8'), ('intensity', 'f'), ('wavenumber', 'f')])\n for i, lines in enumerate(ridgeLines):\n maximum = np.argmax(zip(*lines)[2])\n peakInfo[i] = lines[maximum][1], lines[maximum][0], lines[maximum][2\n ], 0, len(lines), data.x[lines[maximum][1]], data.y[lines[\n maximum][1]]\n for i, peaks in enumerate(peakInfo):\n SNR = np.abs(waveletCoeff[-1, peaks[0] - 15:peaks[0] + 15])\n if len(SNR) == 0:\n peakInfo['SNR'][i] = 0\n else:\n SNR = stats.scoreatpercentile(SNR, 95)\n peakInfo['SNR'][i] = SNR\n return peakInfo\n\n\n<docstring token>\n\n\ndef getPeaks(waveNumbers, intensities):\n data = _spectra(waveNumbers, intensities)\n waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(\n lowerBound, upperBound, steps))\n waveletCoeff = np.flipud(waveletCoeff)\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),\n columnWindow, rowWindow)\n peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)\n return peakInfo\n",
"<import token>\n<docstring token>\n<assignment token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<docstring token>\n\n\ndef _findMaxima1D(CWTArray):\n maximas = np.zeros(CWTArray.size, dtype=(float, 3))\n count = 0\n for j, row in enumerate(CWTArray):\n for i, element in enumerate(row):\n try:\n if element > row[i - 1] and element > row[i + 1]:\n maximas[count] = steps - j, i, element\n count += 1\n except IndexError:\n pass\n return np.vstack(maximas[:count])\n\n\n<docstring token>\n\n\ndef _filterRidgeLines(maximaArray, rowMax, colMax):\n\n def checkValues(value, ridgeLines):\n for lines in ridgeLines:\n for points in lines:\n if value in points:\n return True\n return False\n ridgeLines = []\n for i, row in enumerate(maximaArray):\n ridge = []\n colPos = row[1]\n rowPos = row[0]\n if checkValues(colPos, ridgeLines):\n continue\n for j, nextRows in enumerate(maximaArray[i:, :]):\n if nextRows[0] == rowPos:\n continue\n if np.abs(colPos - nextRows[1]) <= colMax and np.abs(rowPos -\n nextRows[0]) <= rowMax:\n ridge.append((rowPos, colPos, nextRows[2]))\n rowPos = nextRows[0]\n colPos = nextRows[1]\n if len(ridge) != 0:\n if ridge[-1][0] <= 2:\n ridgeLines.append(ridge)\n return ridgeLines\n\n\n<docstring token>\n\n\ndef getPeakInfo(ridgeLines, data, waveletCoeff):\n peakInfo = np.zeros(len(ridgeLines), dtype=[('position', 'int32'), (\n 'scale', 'int32'), ('cwtCoeff', 'f'), ('SNR', 'f'), ('length',\n 'uint8'), ('intensity', 'f'), ('wavenumber', 'f')])\n for i, lines in enumerate(ridgeLines):\n maximum = np.argmax(zip(*lines)[2])\n peakInfo[i] = lines[maximum][1], lines[maximum][0], lines[maximum][2\n ], 0, len(lines), data.x[lines[maximum][1]], data.y[lines[\n maximum][1]]\n for i, peaks in enumerate(peakInfo):\n SNR = np.abs(waveletCoeff[-1, peaks[0] - 15:peaks[0] + 15])\n if len(SNR) == 0:\n peakInfo['SNR'][i] = 0\n else:\n SNR = stats.scoreatpercentile(SNR, 95)\n peakInfo['SNR'][i] = SNR\n return peakInfo\n\n\n<docstring token>\n\n\ndef getPeaks(waveNumbers, intensities):\n data = _spectra(waveNumbers, intensities)\n waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(\n lowerBound, upperBound, steps))\n waveletCoeff = np.flipud(waveletCoeff)\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),\n columnWindow, rowWindow)\n peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)\n return peakInfo\n",
"<import token>\n<docstring token>\n<assignment token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<docstring token>\n\n\ndef _findMaxima1D(CWTArray):\n maximas = np.zeros(CWTArray.size, dtype=(float, 3))\n count = 0\n for j, row in enumerate(CWTArray):\n for i, element in enumerate(row):\n try:\n if element > row[i - 1] and element > row[i + 1]:\n maximas[count] = steps - j, i, element\n count += 1\n except IndexError:\n pass\n return np.vstack(maximas[:count])\n\n\n<docstring token>\n\n\ndef _filterRidgeLines(maximaArray, rowMax, colMax):\n\n def checkValues(value, ridgeLines):\n for lines in ridgeLines:\n for points in lines:\n if value in points:\n return True\n return False\n ridgeLines = []\n for i, row in enumerate(maximaArray):\n ridge = []\n colPos = row[1]\n rowPos = row[0]\n if checkValues(colPos, ridgeLines):\n continue\n for j, nextRows in enumerate(maximaArray[i:, :]):\n if nextRows[0] == rowPos:\n continue\n if np.abs(colPos - nextRows[1]) <= colMax and np.abs(rowPos -\n nextRows[0]) <= rowMax:\n ridge.append((rowPos, colPos, nextRows[2]))\n rowPos = nextRows[0]\n colPos = nextRows[1]\n if len(ridge) != 0:\n if ridge[-1][0] <= 2:\n ridgeLines.append(ridge)\n return ridgeLines\n\n\n<docstring token>\n<function token>\n<docstring token>\n\n\ndef getPeaks(waveNumbers, intensities):\n data = _spectra(waveNumbers, intensities)\n waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(\n lowerBound, upperBound, steps))\n waveletCoeff = np.flipud(waveletCoeff)\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),\n columnWindow, rowWindow)\n peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)\n return peakInfo\n",
"<import token>\n<docstring token>\n<assignment token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<docstring token>\n\n\ndef _findMaxima1D(CWTArray):\n maximas = np.zeros(CWTArray.size, dtype=(float, 3))\n count = 0\n for j, row in enumerate(CWTArray):\n for i, element in enumerate(row):\n try:\n if element > row[i - 1] and element > row[i + 1]:\n maximas[count] = steps - j, i, element\n count += 1\n except IndexError:\n pass\n return np.vstack(maximas[:count])\n\n\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n<docstring token>\n\n\ndef getPeaks(waveNumbers, intensities):\n data = _spectra(waveNumbers, intensities)\n waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(\n lowerBound, upperBound, steps))\n waveletCoeff = np.flipud(waveletCoeff)\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),\n columnWindow, rowWindow)\n peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)\n return peakInfo\n",
"<import token>\n<docstring token>\n<assignment token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<docstring token>\n\n\ndef _findMaxima1D(CWTArray):\n maximas = np.zeros(CWTArray.size, dtype=(float, 3))\n count = 0\n for j, row in enumerate(CWTArray):\n for i, element in enumerate(row):\n try:\n if element > row[i - 1] and element > row[i + 1]:\n maximas[count] = steps - j, i, element\n count += 1\n except IndexError:\n pass\n return np.vstack(maximas[:count])\n\n\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n",
"<import token>\n<docstring token>\n<assignment token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n",
"<import token>\n<docstring token>\n<assignment token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n <function token>\n\n\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n",
"<import token>\n<docstring token>\n<assignment token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n <function token>\n <function token>\n\n\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n",
"<import token>\n<docstring token>\n<assignment token>\n\n\nclass _spectra:\n <function token>\n <function token>\n <function token>\n\n\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n",
"<import token>\n<docstring token>\n<assignment token>\n<class token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n<docstring token>\n<function token>\n"
] | false |
1,081 |
e51c0d8c6430603d989d55a64fdf77f9e1a2397b
|
"""
Tests of neo.io.exampleio
"""
import pathlib
import unittest
from neo.io.exampleio import ExampleIO # , HAVE_SCIPY
from neo.test.iotest.common_io_test import BaseTestIO
from neo.test.iotest.tools import get_test_file_full_path
from neo.io.proxyobjects import (AnalogSignalProxy,
SpikeTrainProxy, EventProxy, EpochProxy)
from neo import (AnalogSignal, SpikeTrain)
import quantities as pq
import numpy as np
# This run standart tests, this is mandatory for all IO
class TestExampleIO(BaseTestIO, unittest.TestCase, ):
ioclass = ExampleIO
entities_to_download = []
entities_to_test = [
'fake1.fake',
'fake2.fake',
]
def setUp(self):
super().setUp()
# ensure fake test files exist before running common tests
for entity in self.entities_to_test:
full_path = get_test_file_full_path(self.ioclass, filename=entity,
directory=self.local_test_dir)
pathlib.Path(full_path).touch()
def tearDown(self) -> None:
super().tearDown()
for entity in self.entities_to_test:
full_path = get_test_file_full_path(self.ioclass, filename=entity,
directory=self.local_test_dir)
pathlib.Path(full_path).unlink(missing_ok=True)
# This is the minimal variables that are required
# to run the common IO tests. IO specific tests
# can be added here and will be run automatically
# in addition to the common tests.
class Specific_TestExampleIO(unittest.TestCase):
def test_read_segment_lazy(self):
r = ExampleIO(filename=None)
seg = r.read_segment(lazy=True)
for ana in seg.analogsignals:
assert isinstance(ana, AnalogSignalProxy)
ana = ana.load()
assert isinstance(ana, AnalogSignal)
for st in seg.spiketrains:
assert isinstance(st, SpikeTrainProxy)
st = st.load()
assert isinstance(st, SpikeTrain)
seg = r.read_segment(lazy=False)
for anasig in seg.analogsignals:
assert isinstance(ana, AnalogSignal)
self.assertNotEqual(anasig.size, 0)
for st in seg.spiketrains:
assert isinstance(st, SpikeTrain)
self.assertNotEqual(st.size, 0)
# annotations
assert 'seg_extra_info' in seg.annotations
assert seg.name == 'Seg #0 Block #0'
for anasig in seg.analogsignals:
assert anasig.name is not None
for st in seg.spiketrains:
assert st.name is not None
for ev in seg.events:
assert ev.name is not None
for ep in seg.epochs:
assert ep.name is not None
def test_read_block(self):
r = ExampleIO(filename=None)
bl = r.read_block(lazy=True)
#assert len(bl.list_units) == 3
#assert len(bl.channel_indexes) == 1 + 1 # signals grouped + units grouped
def test_read_segment_with_time_slice(self):
r = ExampleIO(filename=None)
seg = r.read_segment(time_slice=None)
shape_full = seg.analogsignals[0].shape
spikes_full = seg.spiketrains[0]
event_full = seg.events[0]
t_start, t_stop = 260 * pq.ms, 1.854 * pq.s
seg = r.read_segment(time_slice=(t_start, t_stop))
shape_slice = seg.analogsignals[0].shape
spikes_slice = seg.spiketrains[0]
event_slice = seg.events[0]
assert shape_full[0] > shape_slice[0]
assert spikes_full.size > spikes_slice.size
assert np.all(spikes_slice >= t_start)
assert np.all(spikes_slice <= t_stop)
assert spikes_slice.t_start == t_start
assert spikes_slice.t_stop == t_stop
assert event_full.size > event_slice.size
assert np.all(event_slice.times >= t_start)
assert np.all(event_slice.times <= t_stop)
if __name__ == "__main__":
unittest.main()
|
[
"\"\"\"\nTests of neo.io.exampleio\n\"\"\"\n\nimport pathlib\nimport unittest\n\nfrom neo.io.exampleio import ExampleIO # , HAVE_SCIPY\nfrom neo.test.iotest.common_io_test import BaseTestIO\nfrom neo.test.iotest.tools import get_test_file_full_path\nfrom neo.io.proxyobjects import (AnalogSignalProxy,\n SpikeTrainProxy, EventProxy, EpochProxy)\nfrom neo import (AnalogSignal, SpikeTrain)\n\nimport quantities as pq\nimport numpy as np\n\n\n# This run standart tests, this is mandatory for all IO\nclass TestExampleIO(BaseTestIO, unittest.TestCase, ):\n ioclass = ExampleIO\n entities_to_download = []\n entities_to_test = [\n 'fake1.fake',\n 'fake2.fake',\n ]\n\n def setUp(self):\n super().setUp()\n # ensure fake test files exist before running common tests\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=entity,\n directory=self.local_test_dir)\n pathlib.Path(full_path).touch()\n\n def tearDown(self) -> None:\n super().tearDown()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=entity,\n directory=self.local_test_dir)\n pathlib.Path(full_path).unlink(missing_ok=True)\n\n# This is the minimal variables that are required\n# to run the common IO tests. IO specific tests\n# can be added here and will be run automatically\n# in addition to the common tests.\nclass Specific_TestExampleIO(unittest.TestCase):\n def test_read_segment_lazy(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(lazy=True)\n for ana in seg.analogsignals:\n assert isinstance(ana, AnalogSignalProxy)\n ana = ana.load()\n assert isinstance(ana, AnalogSignal)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrainProxy)\n st = st.load()\n assert isinstance(st, SpikeTrain)\n\n seg = r.read_segment(lazy=False)\n for anasig in seg.analogsignals:\n assert isinstance(ana, AnalogSignal)\n self.assertNotEqual(anasig.size, 0)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrain)\n self.assertNotEqual(st.size, 0)\n\n # annotations\n assert 'seg_extra_info' in seg.annotations\n assert seg.name == 'Seg #0 Block #0'\n for anasig in seg.analogsignals:\n assert anasig.name is not None\n for st in seg.spiketrains:\n assert st.name is not None\n for ev in seg.events:\n assert ev.name is not None\n for ep in seg.epochs:\n assert ep.name is not None\n\n def test_read_block(self):\n r = ExampleIO(filename=None)\n bl = r.read_block(lazy=True)\n #assert len(bl.list_units) == 3\n #assert len(bl.channel_indexes) == 1 + 1 # signals grouped + units grouped\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n\n assert shape_full[0] > shape_slice[0]\n\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"<docstring token>\nimport pathlib\nimport unittest\nfrom neo.io.exampleio import ExampleIO\nfrom neo.test.iotest.common_io_test import BaseTestIO\nfrom neo.test.iotest.tools import get_test_file_full_path\nfrom neo.io.proxyobjects import AnalogSignalProxy, SpikeTrainProxy, EventProxy, EpochProxy\nfrom neo import AnalogSignal, SpikeTrain\nimport quantities as pq\nimport numpy as np\n\n\nclass TestExampleIO(BaseTestIO, unittest.TestCase):\n ioclass = ExampleIO\n entities_to_download = []\n entities_to_test = ['fake1.fake', 'fake2.fake']\n\n def setUp(self):\n super().setUp()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).touch()\n\n def tearDown(self) ->None:\n super().tearDown()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).unlink(missing_ok=True)\n\n\nclass Specific_TestExampleIO(unittest.TestCase):\n\n def test_read_segment_lazy(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(lazy=True)\n for ana in seg.analogsignals:\n assert isinstance(ana, AnalogSignalProxy)\n ana = ana.load()\n assert isinstance(ana, AnalogSignal)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrainProxy)\n st = st.load()\n assert isinstance(st, SpikeTrain)\n seg = r.read_segment(lazy=False)\n for anasig in seg.analogsignals:\n assert isinstance(ana, AnalogSignal)\n self.assertNotEqual(anasig.size, 0)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrain)\n self.assertNotEqual(st.size, 0)\n assert 'seg_extra_info' in seg.annotations\n assert seg.name == 'Seg #0 Block #0'\n for anasig in seg.analogsignals:\n assert anasig.name is not None\n for st in seg.spiketrains:\n assert st.name is not None\n for ev in seg.events:\n assert ev.name is not None\n for ep in seg.epochs:\n assert ep.name is not None\n\n def test_read_block(self):\n r = ExampleIO(filename=None)\n bl = r.read_block(lazy=True)\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n assert shape_full[0] > shape_slice[0]\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<docstring token>\n<import token>\n\n\nclass TestExampleIO(BaseTestIO, unittest.TestCase):\n ioclass = ExampleIO\n entities_to_download = []\n entities_to_test = ['fake1.fake', 'fake2.fake']\n\n def setUp(self):\n super().setUp()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).touch()\n\n def tearDown(self) ->None:\n super().tearDown()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).unlink(missing_ok=True)\n\n\nclass Specific_TestExampleIO(unittest.TestCase):\n\n def test_read_segment_lazy(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(lazy=True)\n for ana in seg.analogsignals:\n assert isinstance(ana, AnalogSignalProxy)\n ana = ana.load()\n assert isinstance(ana, AnalogSignal)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrainProxy)\n st = st.load()\n assert isinstance(st, SpikeTrain)\n seg = r.read_segment(lazy=False)\n for anasig in seg.analogsignals:\n assert isinstance(ana, AnalogSignal)\n self.assertNotEqual(anasig.size, 0)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrain)\n self.assertNotEqual(st.size, 0)\n assert 'seg_extra_info' in seg.annotations\n assert seg.name == 'Seg #0 Block #0'\n for anasig in seg.analogsignals:\n assert anasig.name is not None\n for st in seg.spiketrains:\n assert st.name is not None\n for ev in seg.events:\n assert ev.name is not None\n for ep in seg.epochs:\n assert ep.name is not None\n\n def test_read_block(self):\n r = ExampleIO(filename=None)\n bl = r.read_block(lazy=True)\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n assert shape_full[0] > shape_slice[0]\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<docstring token>\n<import token>\n\n\nclass TestExampleIO(BaseTestIO, unittest.TestCase):\n ioclass = ExampleIO\n entities_to_download = []\n entities_to_test = ['fake1.fake', 'fake2.fake']\n\n def setUp(self):\n super().setUp()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).touch()\n\n def tearDown(self) ->None:\n super().tearDown()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).unlink(missing_ok=True)\n\n\nclass Specific_TestExampleIO(unittest.TestCase):\n\n def test_read_segment_lazy(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(lazy=True)\n for ana in seg.analogsignals:\n assert isinstance(ana, AnalogSignalProxy)\n ana = ana.load()\n assert isinstance(ana, AnalogSignal)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrainProxy)\n st = st.load()\n assert isinstance(st, SpikeTrain)\n seg = r.read_segment(lazy=False)\n for anasig in seg.analogsignals:\n assert isinstance(ana, AnalogSignal)\n self.assertNotEqual(anasig.size, 0)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrain)\n self.assertNotEqual(st.size, 0)\n assert 'seg_extra_info' in seg.annotations\n assert seg.name == 'Seg #0 Block #0'\n for anasig in seg.analogsignals:\n assert anasig.name is not None\n for st in seg.spiketrains:\n assert st.name is not None\n for ev in seg.events:\n assert ev.name is not None\n for ep in seg.epochs:\n assert ep.name is not None\n\n def test_read_block(self):\n r = ExampleIO(filename=None)\n bl = r.read_block(lazy=True)\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n assert shape_full[0] > shape_slice[0]\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass TestExampleIO(BaseTestIO, unittest.TestCase):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def setUp(self):\n super().setUp()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).touch()\n\n def tearDown(self) ->None:\n super().tearDown()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).unlink(missing_ok=True)\n\n\nclass Specific_TestExampleIO(unittest.TestCase):\n\n def test_read_segment_lazy(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(lazy=True)\n for ana in seg.analogsignals:\n assert isinstance(ana, AnalogSignalProxy)\n ana = ana.load()\n assert isinstance(ana, AnalogSignal)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrainProxy)\n st = st.load()\n assert isinstance(st, SpikeTrain)\n seg = r.read_segment(lazy=False)\n for anasig in seg.analogsignals:\n assert isinstance(ana, AnalogSignal)\n self.assertNotEqual(anasig.size, 0)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrain)\n self.assertNotEqual(st.size, 0)\n assert 'seg_extra_info' in seg.annotations\n assert seg.name == 'Seg #0 Block #0'\n for anasig in seg.analogsignals:\n assert anasig.name is not None\n for st in seg.spiketrains:\n assert st.name is not None\n for ev in seg.events:\n assert ev.name is not None\n for ep in seg.epochs:\n assert ep.name is not None\n\n def test_read_block(self):\n r = ExampleIO(filename=None)\n bl = r.read_block(lazy=True)\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n assert shape_full[0] > shape_slice[0]\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass TestExampleIO(BaseTestIO, unittest.TestCase):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def tearDown(self) ->None:\n super().tearDown()\n for entity in self.entities_to_test:\n full_path = get_test_file_full_path(self.ioclass, filename=\n entity, directory=self.local_test_dir)\n pathlib.Path(full_path).unlink(missing_ok=True)\n\n\nclass Specific_TestExampleIO(unittest.TestCase):\n\n def test_read_segment_lazy(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(lazy=True)\n for ana in seg.analogsignals:\n assert isinstance(ana, AnalogSignalProxy)\n ana = ana.load()\n assert isinstance(ana, AnalogSignal)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrainProxy)\n st = st.load()\n assert isinstance(st, SpikeTrain)\n seg = r.read_segment(lazy=False)\n for anasig in seg.analogsignals:\n assert isinstance(ana, AnalogSignal)\n self.assertNotEqual(anasig.size, 0)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrain)\n self.assertNotEqual(st.size, 0)\n assert 'seg_extra_info' in seg.annotations\n assert seg.name == 'Seg #0 Block #0'\n for anasig in seg.analogsignals:\n assert anasig.name is not None\n for st in seg.spiketrains:\n assert st.name is not None\n for ev in seg.events:\n assert ev.name is not None\n for ep in seg.epochs:\n assert ep.name is not None\n\n def test_read_block(self):\n r = ExampleIO(filename=None)\n bl = r.read_block(lazy=True)\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n assert shape_full[0] > shape_slice[0]\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass TestExampleIO(BaseTestIO, unittest.TestCase):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\nclass Specific_TestExampleIO(unittest.TestCase):\n\n def test_read_segment_lazy(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(lazy=True)\n for ana in seg.analogsignals:\n assert isinstance(ana, AnalogSignalProxy)\n ana = ana.load()\n assert isinstance(ana, AnalogSignal)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrainProxy)\n st = st.load()\n assert isinstance(st, SpikeTrain)\n seg = r.read_segment(lazy=False)\n for anasig in seg.analogsignals:\n assert isinstance(ana, AnalogSignal)\n self.assertNotEqual(anasig.size, 0)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrain)\n self.assertNotEqual(st.size, 0)\n assert 'seg_extra_info' in seg.annotations\n assert seg.name == 'Seg #0 Block #0'\n for anasig in seg.analogsignals:\n assert anasig.name is not None\n for st in seg.spiketrains:\n assert st.name is not None\n for ev in seg.events:\n assert ev.name is not None\n for ep in seg.epochs:\n assert ep.name is not None\n\n def test_read_block(self):\n r = ExampleIO(filename=None)\n bl = r.read_block(lazy=True)\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n assert shape_full[0] > shape_slice[0]\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass Specific_TestExampleIO(unittest.TestCase):\n\n def test_read_segment_lazy(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(lazy=True)\n for ana in seg.analogsignals:\n assert isinstance(ana, AnalogSignalProxy)\n ana = ana.load()\n assert isinstance(ana, AnalogSignal)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrainProxy)\n st = st.load()\n assert isinstance(st, SpikeTrain)\n seg = r.read_segment(lazy=False)\n for anasig in seg.analogsignals:\n assert isinstance(ana, AnalogSignal)\n self.assertNotEqual(anasig.size, 0)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrain)\n self.assertNotEqual(st.size, 0)\n assert 'seg_extra_info' in seg.annotations\n assert seg.name == 'Seg #0 Block #0'\n for anasig in seg.analogsignals:\n assert anasig.name is not None\n for st in seg.spiketrains:\n assert st.name is not None\n for ev in seg.events:\n assert ev.name is not None\n for ep in seg.epochs:\n assert ep.name is not None\n\n def test_read_block(self):\n r = ExampleIO(filename=None)\n bl = r.read_block(lazy=True)\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n assert shape_full[0] > shape_slice[0]\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass Specific_TestExampleIO(unittest.TestCase):\n\n def test_read_segment_lazy(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(lazy=True)\n for ana in seg.analogsignals:\n assert isinstance(ana, AnalogSignalProxy)\n ana = ana.load()\n assert isinstance(ana, AnalogSignal)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrainProxy)\n st = st.load()\n assert isinstance(st, SpikeTrain)\n seg = r.read_segment(lazy=False)\n for anasig in seg.analogsignals:\n assert isinstance(ana, AnalogSignal)\n self.assertNotEqual(anasig.size, 0)\n for st in seg.spiketrains:\n assert isinstance(st, SpikeTrain)\n self.assertNotEqual(st.size, 0)\n assert 'seg_extra_info' in seg.annotations\n assert seg.name == 'Seg #0 Block #0'\n for anasig in seg.analogsignals:\n assert anasig.name is not None\n for st in seg.spiketrains:\n assert st.name is not None\n for ev in seg.events:\n assert ev.name is not None\n for ep in seg.epochs:\n assert ep.name is not None\n <function token>\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n assert shape_full[0] > shape_slice[0]\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass Specific_TestExampleIO(unittest.TestCase):\n <function token>\n <function token>\n\n def test_read_segment_with_time_slice(self):\n r = ExampleIO(filename=None)\n seg = r.read_segment(time_slice=None)\n shape_full = seg.analogsignals[0].shape\n spikes_full = seg.spiketrains[0]\n event_full = seg.events[0]\n t_start, t_stop = 260 * pq.ms, 1.854 * pq.s\n seg = r.read_segment(time_slice=(t_start, t_stop))\n shape_slice = seg.analogsignals[0].shape\n spikes_slice = seg.spiketrains[0]\n event_slice = seg.events[0]\n assert shape_full[0] > shape_slice[0]\n assert spikes_full.size > spikes_slice.size\n assert np.all(spikes_slice >= t_start)\n assert np.all(spikes_slice <= t_stop)\n assert spikes_slice.t_start == t_start\n assert spikes_slice.t_stop == t_stop\n assert event_full.size > event_slice.size\n assert np.all(event_slice.times >= t_start)\n assert np.all(event_slice.times <= t_stop)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass Specific_TestExampleIO(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n<code token>\n"
] | false |
1,082 |
57b51ea36e9e2a095cf7e9646db2cc400cc72b83
|
from mesa.visualization.modules import CanvasGrid
from mesa.visualization.ModularVisualization import ModularServer
from mesa.visualization.modules import ChartModule
from mesa.batchrunner import BatchRunner
from agentPortrayal import agent_portrayal
import metrics
from matplotlib import pyplot as plt
from ArchitectureModel import MASArchitecture
import os
import random
import sys
runBatch = True
architecture = 'Inter-Firm'
saveResults = True
if __name__ == '__main__':
dir_path = os.path.dirname(os.path.realpath(__file__))
if(runBatch):
fixed_params = {'width': 60, 'height': 60,'splitSize':1,'distributed':True,'verbose':False,'searchSize':1,'batchRun':True}
variable_params = {'quantity':[10,20,50,80,100,120,150],'ordersPerWeek':[1,5,20,40,80,120]}
batch_run = BatchRunner(
MASArchitecture,
variable_params,
fixed_params,
iterations=10,
max_steps=800,
model_reporters={
"Utilisation": metrics.machineUtilisation,
"CompleteOrders": metrics.ordersComplete,
'AverageOrderWaitTime': metrics.averageOrderWaitTime,
'TotalMessagesSent': metrics.totalMessagesSent,
'AverageMessagesSent': metrics.averageMessagesSent,
"SuccessfulOrders":metrics.successfulOrders,
"noProposalOrders":metrics.noProposalOrders,
'OutsourcedOrders': metrics.outsourcedOrders,
'LateOrders':metrics.lateOrders,
'WIPBacklog':metrics.totalWIPSize,
'MaxMessagesSentOrder': metrics.maxMessagesSentFromOrder,
'MaxMessagesReceivedOrder': metrics.maxMessagesReceivedByOrder,
'MaxMessagesSentFactory': metrics.maxMessagesSentFromFactory,
'MaxMessagesReceivedFactory': metrics.maxMessagesReceivedByFactory,
'AverageSatisfactionScore':metrics.averageSatisfactionScore,
'AverageSuccessfulSatisfactionScore':metrics.averageSuccessfulSatisfactionScore,
'CheapOrdersWithCheapMachines':metrics.cheapOrdersWithCheapMachines,
'AsapOrdersWithFastMachines':metrics.asapOrdersWithFastMachines,
'AverageSuccessfulPrice': metrics.averageSuccessfulOrderPrice,
'AverageSuccessfulOrderPriceCheap':metrics.averageSuccessfulOrderPriceCheap,
'AverageSuccessfulOrderPriceNeutral':metrics.averageSuccessfulOrderPriceNeutral,
'AverageSuccessfulOrderPriceAsap':metrics.averageSuccessfulOrderPriceAsap,
'AverageSuccessfulMakespan': metrics.averageSuccessfulOrderMakeSpan,
'AverageSuccessfulOrderMakespanCheap':metrics.averageSuccessfulOrderMakespanCheap,
'AverageSuccessfulOrderMakespanNeutral':metrics.averageSuccessfulOrderMakespanNeutral,
'AverageSuccessfulOrderMakespanAsap':metrics.averageSuccessfulOrderMakespanAsap,
'SuccessfulAsapOrders':metrics.percentageOfSuccessfulASAPOrders,
'SuccessfulCheapOrders':metrics.percentageOfSuccessfulCheapOrders,
'SuccessfulNeutralOrders':metrics.percentageOfSuccessfulNeutralOrders
},
agent_reporters={
'id':'unique_id',
# # TODO: add in other agent reports that you would like to use
}
)
batch_run.run_all()
model_data = batch_run.get_model_vars_dataframe()
agent_data = batch_run.get_agent_vars_dataframe()
# Save results
if(saveResults):
number = 0
### CHANGE PATH TO WHERE YOU WANT RESULTS TO BE SAVED
while (os.path.exists('{}/results/test_{}'.format(dir_path,number)) == True):
number += 1
# TODO: maybe make a text file that describes the test that has been run
os.makedirs(
'{}/results/test_{}'.format(dir_path,number))
model_data.to_pickle(
'{}/results/test_{}/model_data.pkl'.format(dir_path,number))
agent_data.to_pickle(
'{}/results/test_{}/agent_data.pkl'.format(dir_path,number))
else:
# TODO: rename all of these
grid = CanvasGrid(agent_portrayal, 60, 60, 600, 600)
chart = ChartModule([{'Label': 'Utilisation', "Color": 'Black'}],data_collector_name='datacollector')
chart2 = ChartModule([{'Label': 'Complete Orders', 'Color': 'Black'}], data_collector_name='datacollector')
chart3 = ChartModule([{'Label': 'Average Order Wait Time','Color': 'Red'}], data_collector_name='datacollector')
chart4 = ChartModule([{'Label': 'Total Messages Sent','Color': 'Red'}], data_collector_name='datacollector')
averageMessagesSentChart = ChartModule([{'Label': 'Average Messages Sent','Color': 'Red'}], data_collector_name='datacollector')
chart5 = ChartModule([{'Label': 'Successful Orders','Color': 'Green'}], data_collector_name='datacollector')
chart6 = ChartModule([{'Label': 'Outsourced Orders','Color': 'Blue'}], data_collector_name='datacollector')
chart7 = ChartModule([{'Label': 'Late Orders','Color': 'Red'}], data_collector_name='datacollector')
chart8 = ChartModule([{'Label': 'WIP Backlog','Color': 'Blue'}], data_collector_name='datacollector')
chart9 = ChartModule([{'Label': 'Max Messages Sent - Order','Color': 'Blue'}], data_collector_name='datacollector')
chart10 = ChartModule([{'Label': 'Max Messages Received - Order','Color': 'Blue'}], data_collector_name='datacollector')
chart11 = ChartModule([{'Label': 'Max Messages Sent - Factory','Color': 'Red'}], data_collector_name='datacollector')
chart12 = ChartModule([{'Label': 'Max Messages Received - Factory','Color': 'Red'}], data_collector_name='datacollector')
chart13 = ChartModule([{'Label': 'Average satisfaction score','Color': 'Blue'}], data_collector_name='datacollector')
chart14 = ChartModule([{'Label': 'Average successful satisfaction score','Color': 'Blue'}], data_collector_name='datacollector')
chart15 = ChartModule([{'Label': '% Cheap orders with cheap machines','Color': 'Green'}], data_collector_name='datacollector')
chart16 = ChartModule([{'Label': '% Asap orders with fast machines','Color': 'Green'}], data_collector_name='datacollector')
chart17 = ChartModule([{'Label': 'Average successful price','Color': 'Blue'}], data_collector_name='datacollector')
chart18 = ChartModule([{'Label': 'Average successful price Cheap','Color': 'Blue'}], data_collector_name='datacollector')
chart19 = ChartModule([{'Label': 'Average successful price Neutral','Color': 'Blue'}], data_collector_name='datacollector')
chart20 = ChartModule([{'Label': 'Average successful price Asap','Color': 'Blue'}], data_collector_name='datacollector')
chart21 = ChartModule([{'Label': 'Average successful makespan','Color': 'Green'}], data_collector_name='datacollector')
chart22 = ChartModule([{'Label': 'Average successful makespan Cheap','Color': 'Green'}], data_collector_name='datacollector')
chart23 = ChartModule([{'Label': 'Average successful makespan Neutral','Color': 'Green'}], data_collector_name='datacollector')
chart24 = ChartModule([{'Label': 'Average successful makespan Asap','Color': 'Green'}], data_collector_name='datacollector')
chart25 = ChartModule([{'Label': 'Successful Cheap Orders','Color': 'Red'}], data_collector_name='datacollector')
chart26 = ChartModule([{'Label': 'Successful Neutral Orders','Color': 'Red'}], data_collector_name='datacollector')
chart27 = ChartModule([{'Label': 'Successful Asap Orders','Color': 'Red'}], data_collector_name='datacollector')
noProposalOrdersChart = ChartModule([{'Label': 'Orders that received no proposals','Color': 'Red'}], data_collector_name='datacollector')
server = ModularServer(MASArchitecture,
[grid,
chart,
chart2,
chart3,
chart4,
averageMessagesSentChart,
chart5,
noProposalOrdersChart,
chart6,
chart7,
chart8, chart9, chart10,chart11, chart12,
chart13,chart14,
chart15,
chart16,chart17,
chart18, chart19, chart20,chart21,chart22,chart23,chart24,chart25,chart26,chart27
],
'Festo-Fetch.ai',
{'width': 60, 'height': 60, 'distributed':True,'quantity':10,'splitSize':1,'newOrderProbability':5,'verbose':True,'ordersPerWeek':40,
'model_reporters_dict': {
"Utilisation": metrics.machineUtilisation,
"Complete Orders": metrics.ordersComplete,
'Average Order Wait Time': metrics.averageOrderWaitTime,
"Successful Orders":metrics.successfulOrders,
'Total Messages Sent': metrics.totalMessagesSent,
'Average Messages Sent': metrics.averageMessagesSent,
'Late Orders':metrics.lateOrders,
'WIP Backlog':metrics.totalWIPSize,
'Max Messages Sent - Order': metrics.maxMessagesSentFromOrder,
'Max Messages Received - Order': metrics.maxMessagesReceivedByOrder,
'Max Messages Sent - Factory': metrics.maxMessagesSentFromFactory,
'Max Messages Received - Factory': metrics.maxMessagesReceivedByFactory,
'Outsourced Orders': metrics.outsourcedOrders,
'Orders that received no proposals':metrics.noProposalOrders,
'Average successful satisfaction score':metrics.averageSuccessfulSatisfactionScore,
'Average satisfaction score':metrics.averageSatisfactionScore,
'% Cheap orders with cheap machines':metrics.cheapOrdersWithCheapMachines,
'% Asap orders with fast machines':metrics.asapOrdersWithFastMachines,
'Average successful price': metrics.averageSuccessfulOrderPrice,
'Average successful price Cheap':metrics.averageSuccessfulOrderPriceCheap,
'Average successful price Neutral':metrics.averageSuccessfulOrderPriceNeutral,
'Average successful price Asap':metrics.averageSuccessfulOrderPriceAsap,
'Average successful makespan': metrics.averageSuccessfulOrderMakeSpan,
'Average successful makespan Cheap':metrics.averageSuccessfulOrderMakespanCheap,
'Average successful makespan Neutral':metrics.averageSuccessfulOrderMakespanNeutral,
'Average successful makespan Asap':metrics.averageSuccessfulOrderMakespanAsap,
'Successful Cheap Orders':metrics.percentageOfSuccessfulASAPOrders,
'Successful Neutral Orders':metrics.percentageOfSuccessfulCheapOrders,
'Successful Asap Orders':metrics.percentageOfSuccessfulNeutralOrders
}})
server.port = 8521
server.launch()
|
[
"from mesa.visualization.modules import CanvasGrid\nfrom mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.modules import ChartModule\nfrom mesa.batchrunner import BatchRunner\nfrom agentPortrayal import agent_portrayal\nimport metrics\nfrom matplotlib import pyplot as plt\nfrom ArchitectureModel import MASArchitecture\nimport os\nimport random\nimport sys\n\nrunBatch = True\narchitecture = 'Inter-Firm'\nsaveResults = True\n\n\n\nif __name__ == '__main__':\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n \n\n if(runBatch):\n fixed_params = {'width': 60, 'height': 60,'splitSize':1,'distributed':True,'verbose':False,'searchSize':1,'batchRun':True}\n\n variable_params = {'quantity':[10,20,50,80,100,120,150],'ordersPerWeek':[1,5,20,40,80,120]}\n\n batch_run = BatchRunner(\n MASArchitecture,\n variable_params,\n fixed_params,\n iterations=10,\n max_steps=800,\n model_reporters={\n \"Utilisation\": metrics.machineUtilisation,\n \"CompleteOrders\": metrics.ordersComplete,\n 'AverageOrderWaitTime': metrics.averageOrderWaitTime,\n 'TotalMessagesSent': metrics.totalMessagesSent, \n 'AverageMessagesSent': metrics.averageMessagesSent, \n \"SuccessfulOrders\":metrics.successfulOrders,\n \"noProposalOrders\":metrics.noProposalOrders,\n 'OutsourcedOrders': metrics.outsourcedOrders,\n 'LateOrders':metrics.lateOrders,\n 'WIPBacklog':metrics.totalWIPSize, \n 'MaxMessagesSentOrder': metrics.maxMessagesSentFromOrder, \n 'MaxMessagesReceivedOrder': metrics.maxMessagesReceivedByOrder,\n 'MaxMessagesSentFactory': metrics.maxMessagesSentFromFactory, \n 'MaxMessagesReceivedFactory': metrics.maxMessagesReceivedByFactory,\n \n 'AverageSatisfactionScore':metrics.averageSatisfactionScore,\n 'AverageSuccessfulSatisfactionScore':metrics.averageSuccessfulSatisfactionScore,\n 'CheapOrdersWithCheapMachines':metrics.cheapOrdersWithCheapMachines,\n 'AsapOrdersWithFastMachines':metrics.asapOrdersWithFastMachines,\n \n 'AverageSuccessfulPrice': metrics.averageSuccessfulOrderPrice,\n 'AverageSuccessfulOrderPriceCheap':metrics.averageSuccessfulOrderPriceCheap,\n 'AverageSuccessfulOrderPriceNeutral':metrics.averageSuccessfulOrderPriceNeutral,\n 'AverageSuccessfulOrderPriceAsap':metrics.averageSuccessfulOrderPriceAsap,\n \n 'AverageSuccessfulMakespan': metrics.averageSuccessfulOrderMakeSpan,\n 'AverageSuccessfulOrderMakespanCheap':metrics.averageSuccessfulOrderMakespanCheap,\n 'AverageSuccessfulOrderMakespanNeutral':metrics.averageSuccessfulOrderMakespanNeutral,\n 'AverageSuccessfulOrderMakespanAsap':metrics.averageSuccessfulOrderMakespanAsap,\n\n 'SuccessfulAsapOrders':metrics.percentageOfSuccessfulASAPOrders,\n 'SuccessfulCheapOrders':metrics.percentageOfSuccessfulCheapOrders,\n 'SuccessfulNeutralOrders':metrics.percentageOfSuccessfulNeutralOrders\n },\n agent_reporters={\n 'id':'unique_id',\n # # TODO: add in other agent reports that you would like to use\n }\n )\n\n batch_run.run_all()\n\n model_data = batch_run.get_model_vars_dataframe()\n agent_data = batch_run.get_agent_vars_dataframe()\n\n \n # Save results\n if(saveResults):\n number = 0\n ### CHANGE PATH TO WHERE YOU WANT RESULTS TO BE SAVED\n while (os.path.exists('{}/results/test_{}'.format(dir_path,number)) == True):\n number += 1\n\n # TODO: maybe make a text file that describes the test that has been run\n os.makedirs(\n '{}/results/test_{}'.format(dir_path,number))\n\n model_data.to_pickle(\n '{}/results/test_{}/model_data.pkl'.format(dir_path,number))\n agent_data.to_pickle(\n '{}/results/test_{}/agent_data.pkl'.format(dir_path,number))\n\n \n \n else:\n # TODO: rename all of these\n grid = CanvasGrid(agent_portrayal, 60, 60, 600, 600)\n chart = ChartModule([{'Label': 'Utilisation', \"Color\": 'Black'}],data_collector_name='datacollector')\n chart2 = ChartModule([{'Label': 'Complete Orders', 'Color': 'Black'}], data_collector_name='datacollector')\n chart3 = ChartModule([{'Label': 'Average Order Wait Time','Color': 'Red'}], data_collector_name='datacollector')\n chart4 = ChartModule([{'Label': 'Total Messages Sent','Color': 'Red'}], data_collector_name='datacollector')\n averageMessagesSentChart = ChartModule([{'Label': 'Average Messages Sent','Color': 'Red'}], data_collector_name='datacollector')\n chart5 = ChartModule([{'Label': 'Successful Orders','Color': 'Green'}], data_collector_name='datacollector')\n chart6 = ChartModule([{'Label': 'Outsourced Orders','Color': 'Blue'}], data_collector_name='datacollector')\n chart7 = ChartModule([{'Label': 'Late Orders','Color': 'Red'}], data_collector_name='datacollector')\n chart8 = ChartModule([{'Label': 'WIP Backlog','Color': 'Blue'}], data_collector_name='datacollector')\n chart9 = ChartModule([{'Label': 'Max Messages Sent - Order','Color': 'Blue'}], data_collector_name='datacollector')\n chart10 = ChartModule([{'Label': 'Max Messages Received - Order','Color': 'Blue'}], data_collector_name='datacollector')\n chart11 = ChartModule([{'Label': 'Max Messages Sent - Factory','Color': 'Red'}], data_collector_name='datacollector')\n chart12 = ChartModule([{'Label': 'Max Messages Received - Factory','Color': 'Red'}], data_collector_name='datacollector')\n \n \n\n chart13 = ChartModule([{'Label': 'Average satisfaction score','Color': 'Blue'}], data_collector_name='datacollector')\n chart14 = ChartModule([{'Label': 'Average successful satisfaction score','Color': 'Blue'}], data_collector_name='datacollector')\n chart15 = ChartModule([{'Label': '% Cheap orders with cheap machines','Color': 'Green'}], data_collector_name='datacollector')\n chart16 = ChartModule([{'Label': '% Asap orders with fast machines','Color': 'Green'}], data_collector_name='datacollector')\n\n chart17 = ChartModule([{'Label': 'Average successful price','Color': 'Blue'}], data_collector_name='datacollector')\n chart18 = ChartModule([{'Label': 'Average successful price Cheap','Color': 'Blue'}], data_collector_name='datacollector')\n chart19 = ChartModule([{'Label': 'Average successful price Neutral','Color': 'Blue'}], data_collector_name='datacollector')\n chart20 = ChartModule([{'Label': 'Average successful price Asap','Color': 'Blue'}], data_collector_name='datacollector')\n\n chart21 = ChartModule([{'Label': 'Average successful makespan','Color': 'Green'}], data_collector_name='datacollector')\n chart22 = ChartModule([{'Label': 'Average successful makespan Cheap','Color': 'Green'}], data_collector_name='datacollector')\n chart23 = ChartModule([{'Label': 'Average successful makespan Neutral','Color': 'Green'}], data_collector_name='datacollector')\n chart24 = ChartModule([{'Label': 'Average successful makespan Asap','Color': 'Green'}], data_collector_name='datacollector')\n\n chart25 = ChartModule([{'Label': 'Successful Cheap Orders','Color': 'Red'}], data_collector_name='datacollector')\n chart26 = ChartModule([{'Label': 'Successful Neutral Orders','Color': 'Red'}], data_collector_name='datacollector')\n chart27 = ChartModule([{'Label': 'Successful Asap Orders','Color': 'Red'}], data_collector_name='datacollector')\n noProposalOrdersChart = ChartModule([{'Label': 'Orders that received no proposals','Color': 'Red'}], data_collector_name='datacollector')\n\n\n\n \n\n \n server = ModularServer(MASArchitecture,\n [grid,\n chart,\n chart2,\n chart3,\n chart4,\n averageMessagesSentChart,\n chart5, \n noProposalOrdersChart,\n chart6,\n chart7, \n chart8, chart9, chart10,chart11, chart12,\n chart13,chart14,\n chart15,\n chart16,chart17,\n chart18, chart19, chart20,chart21,chart22,chart23,chart24,chart25,chart26,chart27\n ],\n 'Festo-Fetch.ai',\n\n {'width': 60, 'height': 60, 'distributed':True,'quantity':10,'splitSize':1,'newOrderProbability':5,'verbose':True,'ordersPerWeek':40,\n 'model_reporters_dict': {\n \"Utilisation\": metrics.machineUtilisation,\n \"Complete Orders\": metrics.ordersComplete,\n 'Average Order Wait Time': metrics.averageOrderWaitTime, \n \"Successful Orders\":metrics.successfulOrders,\n 'Total Messages Sent': metrics.totalMessagesSent, \n 'Average Messages Sent': metrics.averageMessagesSent, \n 'Late Orders':metrics.lateOrders,\n 'WIP Backlog':metrics.totalWIPSize, \n 'Max Messages Sent - Order': metrics.maxMessagesSentFromOrder, \n 'Max Messages Received - Order': metrics.maxMessagesReceivedByOrder,\n 'Max Messages Sent - Factory': metrics.maxMessagesSentFromFactory, \n 'Max Messages Received - Factory': metrics.maxMessagesReceivedByFactory,\n 'Outsourced Orders': metrics.outsourcedOrders,\n 'Orders that received no proposals':metrics.noProposalOrders,\n \n 'Average successful satisfaction score':metrics.averageSuccessfulSatisfactionScore,\n 'Average satisfaction score':metrics.averageSatisfactionScore,\n '% Cheap orders with cheap machines':metrics.cheapOrdersWithCheapMachines,\n '% Asap orders with fast machines':metrics.asapOrdersWithFastMachines,\n\n 'Average successful price': metrics.averageSuccessfulOrderPrice,\n\n 'Average successful price Cheap':metrics.averageSuccessfulOrderPriceCheap,\n 'Average successful price Neutral':metrics.averageSuccessfulOrderPriceNeutral,\n 'Average successful price Asap':metrics.averageSuccessfulOrderPriceAsap,\n \n 'Average successful makespan': metrics.averageSuccessfulOrderMakeSpan,\n\n 'Average successful makespan Cheap':metrics.averageSuccessfulOrderMakespanCheap,\n 'Average successful makespan Neutral':metrics.averageSuccessfulOrderMakespanNeutral,\n 'Average successful makespan Asap':metrics.averageSuccessfulOrderMakespanAsap,\n \n 'Successful Cheap Orders':metrics.percentageOfSuccessfulASAPOrders,\n 'Successful Neutral Orders':metrics.percentageOfSuccessfulCheapOrders,\n 'Successful Asap Orders':metrics.percentageOfSuccessfulNeutralOrders\n\n }})\n\n server.port = 8521\n server.launch()",
"from mesa.visualization.modules import CanvasGrid\nfrom mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.modules import ChartModule\nfrom mesa.batchrunner import BatchRunner\nfrom agentPortrayal import agent_portrayal\nimport metrics\nfrom matplotlib import pyplot as plt\nfrom ArchitectureModel import MASArchitecture\nimport os\nimport random\nimport sys\nrunBatch = True\narchitecture = 'Inter-Firm'\nsaveResults = True\nif __name__ == '__main__':\n dir_path = os.path.dirname(os.path.realpath(__file__))\n if runBatch:\n fixed_params = {'width': 60, 'height': 60, 'splitSize': 1,\n 'distributed': True, 'verbose': False, 'searchSize': 1,\n 'batchRun': True}\n variable_params = {'quantity': [10, 20, 50, 80, 100, 120, 150],\n 'ordersPerWeek': [1, 5, 20, 40, 80, 120]}\n batch_run = BatchRunner(MASArchitecture, variable_params,\n fixed_params, iterations=10, max_steps=800, model_reporters={\n 'Utilisation': metrics.machineUtilisation, 'CompleteOrders':\n metrics.ordersComplete, 'AverageOrderWaitTime': metrics.\n averageOrderWaitTime, 'TotalMessagesSent': metrics.\n totalMessagesSent, 'AverageMessagesSent': metrics.\n averageMessagesSent, 'SuccessfulOrders': metrics.\n successfulOrders, 'noProposalOrders': metrics.noProposalOrders,\n 'OutsourcedOrders': metrics.outsourcedOrders, 'LateOrders':\n metrics.lateOrders, 'WIPBacklog': metrics.totalWIPSize,\n 'MaxMessagesSentOrder': metrics.maxMessagesSentFromOrder,\n 'MaxMessagesReceivedOrder': metrics.maxMessagesReceivedByOrder,\n 'MaxMessagesSentFactory': metrics.maxMessagesSentFromFactory,\n 'MaxMessagesReceivedFactory': metrics.\n maxMessagesReceivedByFactory, 'AverageSatisfactionScore':\n metrics.averageSatisfactionScore,\n 'AverageSuccessfulSatisfactionScore': metrics.\n averageSuccessfulSatisfactionScore,\n 'CheapOrdersWithCheapMachines': metrics.\n cheapOrdersWithCheapMachines, 'AsapOrdersWithFastMachines':\n metrics.asapOrdersWithFastMachines, 'AverageSuccessfulPrice':\n metrics.averageSuccessfulOrderPrice,\n 'AverageSuccessfulOrderPriceCheap': metrics.\n averageSuccessfulOrderPriceCheap,\n 'AverageSuccessfulOrderPriceNeutral': metrics.\n averageSuccessfulOrderPriceNeutral,\n 'AverageSuccessfulOrderPriceAsap': metrics.\n averageSuccessfulOrderPriceAsap, 'AverageSuccessfulMakespan':\n metrics.averageSuccessfulOrderMakeSpan,\n 'AverageSuccessfulOrderMakespanCheap': metrics.\n averageSuccessfulOrderMakespanCheap,\n 'AverageSuccessfulOrderMakespanNeutral': metrics.\n averageSuccessfulOrderMakespanNeutral,\n 'AverageSuccessfulOrderMakespanAsap': metrics.\n averageSuccessfulOrderMakespanAsap, 'SuccessfulAsapOrders':\n metrics.percentageOfSuccessfulASAPOrders,\n 'SuccessfulCheapOrders': metrics.\n percentageOfSuccessfulCheapOrders, 'SuccessfulNeutralOrders':\n metrics.percentageOfSuccessfulNeutralOrders}, agent_reporters={\n 'id': 'unique_id'})\n batch_run.run_all()\n model_data = batch_run.get_model_vars_dataframe()\n agent_data = batch_run.get_agent_vars_dataframe()\n if saveResults:\n number = 0\n while os.path.exists('{}/results/test_{}'.format(dir_path, number)\n ) == True:\n number += 1\n os.makedirs('{}/results/test_{}'.format(dir_path, number))\n model_data.to_pickle('{}/results/test_{}/model_data.pkl'.format\n (dir_path, number))\n agent_data.to_pickle('{}/results/test_{}/agent_data.pkl'.format\n (dir_path, number))\n else:\n grid = CanvasGrid(agent_portrayal, 60, 60, 600, 600)\n chart = ChartModule([{'Label': 'Utilisation', 'Color': 'Black'}],\n data_collector_name='datacollector')\n chart2 = ChartModule([{'Label': 'Complete Orders', 'Color': 'Black'\n }], data_collector_name='datacollector')\n chart3 = ChartModule([{'Label': 'Average Order Wait Time', 'Color':\n 'Red'}], data_collector_name='datacollector')\n chart4 = ChartModule([{'Label': 'Total Messages Sent', 'Color':\n 'Red'}], data_collector_name='datacollector')\n averageMessagesSentChart = ChartModule([{'Label':\n 'Average Messages Sent', 'Color': 'Red'}], data_collector_name=\n 'datacollector')\n chart5 = ChartModule([{'Label': 'Successful Orders', 'Color':\n 'Green'}], data_collector_name='datacollector')\n chart6 = ChartModule([{'Label': 'Outsourced Orders', 'Color':\n 'Blue'}], data_collector_name='datacollector')\n chart7 = ChartModule([{'Label': 'Late Orders', 'Color': 'Red'}],\n data_collector_name='datacollector')\n chart8 = ChartModule([{'Label': 'WIP Backlog', 'Color': 'Blue'}],\n data_collector_name='datacollector')\n chart9 = ChartModule([{'Label': 'Max Messages Sent - Order',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart10 = ChartModule([{'Label': 'Max Messages Received - Order',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart11 = ChartModule([{'Label': 'Max Messages Sent - Factory',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart12 = ChartModule([{'Label': 'Max Messages Received - Factory',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart13 = ChartModule([{'Label': 'Average satisfaction score',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart14 = ChartModule([{'Label':\n 'Average successful satisfaction score', 'Color': 'Blue'}],\n data_collector_name='datacollector')\n chart15 = ChartModule([{'Label':\n '% Cheap orders with cheap machines', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart16 = ChartModule([{'Label': '% Asap orders with fast machines',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart17 = ChartModule([{'Label': 'Average successful price',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart18 = ChartModule([{'Label': 'Average successful price Cheap',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart19 = ChartModule([{'Label': 'Average successful price Neutral',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart20 = ChartModule([{'Label': 'Average successful price Asap',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart21 = ChartModule([{'Label': 'Average successful makespan',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart22 = ChartModule([{'Label':\n 'Average successful makespan Cheap', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart23 = ChartModule([{'Label':\n 'Average successful makespan Neutral', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart24 = ChartModule([{'Label': 'Average successful makespan Asap',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart25 = ChartModule([{'Label': 'Successful Cheap Orders', 'Color':\n 'Red'}], data_collector_name='datacollector')\n chart26 = ChartModule([{'Label': 'Successful Neutral Orders',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart27 = ChartModule([{'Label': 'Successful Asap Orders', 'Color':\n 'Red'}], data_collector_name='datacollector')\n noProposalOrdersChart = ChartModule([{'Label':\n 'Orders that received no proposals', 'Color': 'Red'}],\n data_collector_name='datacollector')\n server = ModularServer(MASArchitecture, [grid, chart, chart2,\n chart3, chart4, averageMessagesSentChart, chart5,\n noProposalOrdersChart, chart6, chart7, chart8, chart9, chart10,\n chart11, chart12, chart13, chart14, chart15, chart16, chart17,\n chart18, chart19, chart20, chart21, chart22, chart23, chart24,\n chart25, chart26, chart27], 'Festo-Fetch.ai', {'width': 60,\n 'height': 60, 'distributed': True, 'quantity': 10, 'splitSize':\n 1, 'newOrderProbability': 5, 'verbose': True, 'ordersPerWeek': \n 40, 'model_reporters_dict': {'Utilisation': metrics.\n machineUtilisation, 'Complete Orders': metrics.ordersComplete,\n 'Average Order Wait Time': metrics.averageOrderWaitTime,\n 'Successful Orders': metrics.successfulOrders,\n 'Total Messages Sent': metrics.totalMessagesSent,\n 'Average Messages Sent': metrics.averageMessagesSent,\n 'Late Orders': metrics.lateOrders, 'WIP Backlog': metrics.\n totalWIPSize, 'Max Messages Sent - Order': metrics.\n maxMessagesSentFromOrder, 'Max Messages Received - Order':\n metrics.maxMessagesReceivedByOrder,\n 'Max Messages Sent - Factory': metrics.\n maxMessagesSentFromFactory, 'Max Messages Received - Factory':\n metrics.maxMessagesReceivedByFactory, 'Outsourced Orders':\n metrics.outsourcedOrders, 'Orders that received no proposals':\n metrics.noProposalOrders,\n 'Average successful satisfaction score': metrics.\n averageSuccessfulSatisfactionScore,\n 'Average satisfaction score': metrics.averageSatisfactionScore,\n '% Cheap orders with cheap machines': metrics.\n cheapOrdersWithCheapMachines,\n '% Asap orders with fast machines': metrics.\n asapOrdersWithFastMachines, 'Average successful price': metrics\n .averageSuccessfulOrderPrice, 'Average successful price Cheap':\n metrics.averageSuccessfulOrderPriceCheap,\n 'Average successful price Neutral': metrics.\n averageSuccessfulOrderPriceNeutral,\n 'Average successful price Asap': metrics.\n averageSuccessfulOrderPriceAsap, 'Average successful makespan':\n metrics.averageSuccessfulOrderMakeSpan,\n 'Average successful makespan Cheap': metrics.\n averageSuccessfulOrderMakespanCheap,\n 'Average successful makespan Neutral': metrics.\n averageSuccessfulOrderMakespanNeutral,\n 'Average successful makespan Asap': metrics.\n averageSuccessfulOrderMakespanAsap, 'Successful Cheap Orders':\n metrics.percentageOfSuccessfulASAPOrders,\n 'Successful Neutral Orders': metrics.\n percentageOfSuccessfulCheapOrders, 'Successful Asap Orders':\n metrics.percentageOfSuccessfulNeutralOrders}})\n server.port = 8521\n server.launch()\n",
"<import token>\nrunBatch = True\narchitecture = 'Inter-Firm'\nsaveResults = True\nif __name__ == '__main__':\n dir_path = os.path.dirname(os.path.realpath(__file__))\n if runBatch:\n fixed_params = {'width': 60, 'height': 60, 'splitSize': 1,\n 'distributed': True, 'verbose': False, 'searchSize': 1,\n 'batchRun': True}\n variable_params = {'quantity': [10, 20, 50, 80, 100, 120, 150],\n 'ordersPerWeek': [1, 5, 20, 40, 80, 120]}\n batch_run = BatchRunner(MASArchitecture, variable_params,\n fixed_params, iterations=10, max_steps=800, model_reporters={\n 'Utilisation': metrics.machineUtilisation, 'CompleteOrders':\n metrics.ordersComplete, 'AverageOrderWaitTime': metrics.\n averageOrderWaitTime, 'TotalMessagesSent': metrics.\n totalMessagesSent, 'AverageMessagesSent': metrics.\n averageMessagesSent, 'SuccessfulOrders': metrics.\n successfulOrders, 'noProposalOrders': metrics.noProposalOrders,\n 'OutsourcedOrders': metrics.outsourcedOrders, 'LateOrders':\n metrics.lateOrders, 'WIPBacklog': metrics.totalWIPSize,\n 'MaxMessagesSentOrder': metrics.maxMessagesSentFromOrder,\n 'MaxMessagesReceivedOrder': metrics.maxMessagesReceivedByOrder,\n 'MaxMessagesSentFactory': metrics.maxMessagesSentFromFactory,\n 'MaxMessagesReceivedFactory': metrics.\n maxMessagesReceivedByFactory, 'AverageSatisfactionScore':\n metrics.averageSatisfactionScore,\n 'AverageSuccessfulSatisfactionScore': metrics.\n averageSuccessfulSatisfactionScore,\n 'CheapOrdersWithCheapMachines': metrics.\n cheapOrdersWithCheapMachines, 'AsapOrdersWithFastMachines':\n metrics.asapOrdersWithFastMachines, 'AverageSuccessfulPrice':\n metrics.averageSuccessfulOrderPrice,\n 'AverageSuccessfulOrderPriceCheap': metrics.\n averageSuccessfulOrderPriceCheap,\n 'AverageSuccessfulOrderPriceNeutral': metrics.\n averageSuccessfulOrderPriceNeutral,\n 'AverageSuccessfulOrderPriceAsap': metrics.\n averageSuccessfulOrderPriceAsap, 'AverageSuccessfulMakespan':\n metrics.averageSuccessfulOrderMakeSpan,\n 'AverageSuccessfulOrderMakespanCheap': metrics.\n averageSuccessfulOrderMakespanCheap,\n 'AverageSuccessfulOrderMakespanNeutral': metrics.\n averageSuccessfulOrderMakespanNeutral,\n 'AverageSuccessfulOrderMakespanAsap': metrics.\n averageSuccessfulOrderMakespanAsap, 'SuccessfulAsapOrders':\n metrics.percentageOfSuccessfulASAPOrders,\n 'SuccessfulCheapOrders': metrics.\n percentageOfSuccessfulCheapOrders, 'SuccessfulNeutralOrders':\n metrics.percentageOfSuccessfulNeutralOrders}, agent_reporters={\n 'id': 'unique_id'})\n batch_run.run_all()\n model_data = batch_run.get_model_vars_dataframe()\n agent_data = batch_run.get_agent_vars_dataframe()\n if saveResults:\n number = 0\n while os.path.exists('{}/results/test_{}'.format(dir_path, number)\n ) == True:\n number += 1\n os.makedirs('{}/results/test_{}'.format(dir_path, number))\n model_data.to_pickle('{}/results/test_{}/model_data.pkl'.format\n (dir_path, number))\n agent_data.to_pickle('{}/results/test_{}/agent_data.pkl'.format\n (dir_path, number))\n else:\n grid = CanvasGrid(agent_portrayal, 60, 60, 600, 600)\n chart = ChartModule([{'Label': 'Utilisation', 'Color': 'Black'}],\n data_collector_name='datacollector')\n chart2 = ChartModule([{'Label': 'Complete Orders', 'Color': 'Black'\n }], data_collector_name='datacollector')\n chart3 = ChartModule([{'Label': 'Average Order Wait Time', 'Color':\n 'Red'}], data_collector_name='datacollector')\n chart4 = ChartModule([{'Label': 'Total Messages Sent', 'Color':\n 'Red'}], data_collector_name='datacollector')\n averageMessagesSentChart = ChartModule([{'Label':\n 'Average Messages Sent', 'Color': 'Red'}], data_collector_name=\n 'datacollector')\n chart5 = ChartModule([{'Label': 'Successful Orders', 'Color':\n 'Green'}], data_collector_name='datacollector')\n chart6 = ChartModule([{'Label': 'Outsourced Orders', 'Color':\n 'Blue'}], data_collector_name='datacollector')\n chart7 = ChartModule([{'Label': 'Late Orders', 'Color': 'Red'}],\n data_collector_name='datacollector')\n chart8 = ChartModule([{'Label': 'WIP Backlog', 'Color': 'Blue'}],\n data_collector_name='datacollector')\n chart9 = ChartModule([{'Label': 'Max Messages Sent - Order',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart10 = ChartModule([{'Label': 'Max Messages Received - Order',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart11 = ChartModule([{'Label': 'Max Messages Sent - Factory',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart12 = ChartModule([{'Label': 'Max Messages Received - Factory',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart13 = ChartModule([{'Label': 'Average satisfaction score',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart14 = ChartModule([{'Label':\n 'Average successful satisfaction score', 'Color': 'Blue'}],\n data_collector_name='datacollector')\n chart15 = ChartModule([{'Label':\n '% Cheap orders with cheap machines', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart16 = ChartModule([{'Label': '% Asap orders with fast machines',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart17 = ChartModule([{'Label': 'Average successful price',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart18 = ChartModule([{'Label': 'Average successful price Cheap',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart19 = ChartModule([{'Label': 'Average successful price Neutral',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart20 = ChartModule([{'Label': 'Average successful price Asap',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart21 = ChartModule([{'Label': 'Average successful makespan',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart22 = ChartModule([{'Label':\n 'Average successful makespan Cheap', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart23 = ChartModule([{'Label':\n 'Average successful makespan Neutral', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart24 = ChartModule([{'Label': 'Average successful makespan Asap',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart25 = ChartModule([{'Label': 'Successful Cheap Orders', 'Color':\n 'Red'}], data_collector_name='datacollector')\n chart26 = ChartModule([{'Label': 'Successful Neutral Orders',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart27 = ChartModule([{'Label': 'Successful Asap Orders', 'Color':\n 'Red'}], data_collector_name='datacollector')\n noProposalOrdersChart = ChartModule([{'Label':\n 'Orders that received no proposals', 'Color': 'Red'}],\n data_collector_name='datacollector')\n server = ModularServer(MASArchitecture, [grid, chart, chart2,\n chart3, chart4, averageMessagesSentChart, chart5,\n noProposalOrdersChart, chart6, chart7, chart8, chart9, chart10,\n chart11, chart12, chart13, chart14, chart15, chart16, chart17,\n chart18, chart19, chart20, chart21, chart22, chart23, chart24,\n chart25, chart26, chart27], 'Festo-Fetch.ai', {'width': 60,\n 'height': 60, 'distributed': True, 'quantity': 10, 'splitSize':\n 1, 'newOrderProbability': 5, 'verbose': True, 'ordersPerWeek': \n 40, 'model_reporters_dict': {'Utilisation': metrics.\n machineUtilisation, 'Complete Orders': metrics.ordersComplete,\n 'Average Order Wait Time': metrics.averageOrderWaitTime,\n 'Successful Orders': metrics.successfulOrders,\n 'Total Messages Sent': metrics.totalMessagesSent,\n 'Average Messages Sent': metrics.averageMessagesSent,\n 'Late Orders': metrics.lateOrders, 'WIP Backlog': metrics.\n totalWIPSize, 'Max Messages Sent - Order': metrics.\n maxMessagesSentFromOrder, 'Max Messages Received - Order':\n metrics.maxMessagesReceivedByOrder,\n 'Max Messages Sent - Factory': metrics.\n maxMessagesSentFromFactory, 'Max Messages Received - Factory':\n metrics.maxMessagesReceivedByFactory, 'Outsourced Orders':\n metrics.outsourcedOrders, 'Orders that received no proposals':\n metrics.noProposalOrders,\n 'Average successful satisfaction score': metrics.\n averageSuccessfulSatisfactionScore,\n 'Average satisfaction score': metrics.averageSatisfactionScore,\n '% Cheap orders with cheap machines': metrics.\n cheapOrdersWithCheapMachines,\n '% Asap orders with fast machines': metrics.\n asapOrdersWithFastMachines, 'Average successful price': metrics\n .averageSuccessfulOrderPrice, 'Average successful price Cheap':\n metrics.averageSuccessfulOrderPriceCheap,\n 'Average successful price Neutral': metrics.\n averageSuccessfulOrderPriceNeutral,\n 'Average successful price Asap': metrics.\n averageSuccessfulOrderPriceAsap, 'Average successful makespan':\n metrics.averageSuccessfulOrderMakeSpan,\n 'Average successful makespan Cheap': metrics.\n averageSuccessfulOrderMakespanCheap,\n 'Average successful makespan Neutral': metrics.\n averageSuccessfulOrderMakespanNeutral,\n 'Average successful makespan Asap': metrics.\n averageSuccessfulOrderMakespanAsap, 'Successful Cheap Orders':\n metrics.percentageOfSuccessfulASAPOrders,\n 'Successful Neutral Orders': metrics.\n percentageOfSuccessfulCheapOrders, 'Successful Asap Orders':\n metrics.percentageOfSuccessfulNeutralOrders}})\n server.port = 8521\n server.launch()\n",
"<import token>\n<assignment token>\nif __name__ == '__main__':\n dir_path = os.path.dirname(os.path.realpath(__file__))\n if runBatch:\n fixed_params = {'width': 60, 'height': 60, 'splitSize': 1,\n 'distributed': True, 'verbose': False, 'searchSize': 1,\n 'batchRun': True}\n variable_params = {'quantity': [10, 20, 50, 80, 100, 120, 150],\n 'ordersPerWeek': [1, 5, 20, 40, 80, 120]}\n batch_run = BatchRunner(MASArchitecture, variable_params,\n fixed_params, iterations=10, max_steps=800, model_reporters={\n 'Utilisation': metrics.machineUtilisation, 'CompleteOrders':\n metrics.ordersComplete, 'AverageOrderWaitTime': metrics.\n averageOrderWaitTime, 'TotalMessagesSent': metrics.\n totalMessagesSent, 'AverageMessagesSent': metrics.\n averageMessagesSent, 'SuccessfulOrders': metrics.\n successfulOrders, 'noProposalOrders': metrics.noProposalOrders,\n 'OutsourcedOrders': metrics.outsourcedOrders, 'LateOrders':\n metrics.lateOrders, 'WIPBacklog': metrics.totalWIPSize,\n 'MaxMessagesSentOrder': metrics.maxMessagesSentFromOrder,\n 'MaxMessagesReceivedOrder': metrics.maxMessagesReceivedByOrder,\n 'MaxMessagesSentFactory': metrics.maxMessagesSentFromFactory,\n 'MaxMessagesReceivedFactory': metrics.\n maxMessagesReceivedByFactory, 'AverageSatisfactionScore':\n metrics.averageSatisfactionScore,\n 'AverageSuccessfulSatisfactionScore': metrics.\n averageSuccessfulSatisfactionScore,\n 'CheapOrdersWithCheapMachines': metrics.\n cheapOrdersWithCheapMachines, 'AsapOrdersWithFastMachines':\n metrics.asapOrdersWithFastMachines, 'AverageSuccessfulPrice':\n metrics.averageSuccessfulOrderPrice,\n 'AverageSuccessfulOrderPriceCheap': metrics.\n averageSuccessfulOrderPriceCheap,\n 'AverageSuccessfulOrderPriceNeutral': metrics.\n averageSuccessfulOrderPriceNeutral,\n 'AverageSuccessfulOrderPriceAsap': metrics.\n averageSuccessfulOrderPriceAsap, 'AverageSuccessfulMakespan':\n metrics.averageSuccessfulOrderMakeSpan,\n 'AverageSuccessfulOrderMakespanCheap': metrics.\n averageSuccessfulOrderMakespanCheap,\n 'AverageSuccessfulOrderMakespanNeutral': metrics.\n averageSuccessfulOrderMakespanNeutral,\n 'AverageSuccessfulOrderMakespanAsap': metrics.\n averageSuccessfulOrderMakespanAsap, 'SuccessfulAsapOrders':\n metrics.percentageOfSuccessfulASAPOrders,\n 'SuccessfulCheapOrders': metrics.\n percentageOfSuccessfulCheapOrders, 'SuccessfulNeutralOrders':\n metrics.percentageOfSuccessfulNeutralOrders}, agent_reporters={\n 'id': 'unique_id'})\n batch_run.run_all()\n model_data = batch_run.get_model_vars_dataframe()\n agent_data = batch_run.get_agent_vars_dataframe()\n if saveResults:\n number = 0\n while os.path.exists('{}/results/test_{}'.format(dir_path, number)\n ) == True:\n number += 1\n os.makedirs('{}/results/test_{}'.format(dir_path, number))\n model_data.to_pickle('{}/results/test_{}/model_data.pkl'.format\n (dir_path, number))\n agent_data.to_pickle('{}/results/test_{}/agent_data.pkl'.format\n (dir_path, number))\n else:\n grid = CanvasGrid(agent_portrayal, 60, 60, 600, 600)\n chart = ChartModule([{'Label': 'Utilisation', 'Color': 'Black'}],\n data_collector_name='datacollector')\n chart2 = ChartModule([{'Label': 'Complete Orders', 'Color': 'Black'\n }], data_collector_name='datacollector')\n chart3 = ChartModule([{'Label': 'Average Order Wait Time', 'Color':\n 'Red'}], data_collector_name='datacollector')\n chart4 = ChartModule([{'Label': 'Total Messages Sent', 'Color':\n 'Red'}], data_collector_name='datacollector')\n averageMessagesSentChart = ChartModule([{'Label':\n 'Average Messages Sent', 'Color': 'Red'}], data_collector_name=\n 'datacollector')\n chart5 = ChartModule([{'Label': 'Successful Orders', 'Color':\n 'Green'}], data_collector_name='datacollector')\n chart6 = ChartModule([{'Label': 'Outsourced Orders', 'Color':\n 'Blue'}], data_collector_name='datacollector')\n chart7 = ChartModule([{'Label': 'Late Orders', 'Color': 'Red'}],\n data_collector_name='datacollector')\n chart8 = ChartModule([{'Label': 'WIP Backlog', 'Color': 'Blue'}],\n data_collector_name='datacollector')\n chart9 = ChartModule([{'Label': 'Max Messages Sent - Order',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart10 = ChartModule([{'Label': 'Max Messages Received - Order',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart11 = ChartModule([{'Label': 'Max Messages Sent - Factory',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart12 = ChartModule([{'Label': 'Max Messages Received - Factory',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart13 = ChartModule([{'Label': 'Average satisfaction score',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart14 = ChartModule([{'Label':\n 'Average successful satisfaction score', 'Color': 'Blue'}],\n data_collector_name='datacollector')\n chart15 = ChartModule([{'Label':\n '% Cheap orders with cheap machines', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart16 = ChartModule([{'Label': '% Asap orders with fast machines',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart17 = ChartModule([{'Label': 'Average successful price',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart18 = ChartModule([{'Label': 'Average successful price Cheap',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart19 = ChartModule([{'Label': 'Average successful price Neutral',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart20 = ChartModule([{'Label': 'Average successful price Asap',\n 'Color': 'Blue'}], data_collector_name='datacollector')\n chart21 = ChartModule([{'Label': 'Average successful makespan',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart22 = ChartModule([{'Label':\n 'Average successful makespan Cheap', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart23 = ChartModule([{'Label':\n 'Average successful makespan Neutral', 'Color': 'Green'}],\n data_collector_name='datacollector')\n chart24 = ChartModule([{'Label': 'Average successful makespan Asap',\n 'Color': 'Green'}], data_collector_name='datacollector')\n chart25 = ChartModule([{'Label': 'Successful Cheap Orders', 'Color':\n 'Red'}], data_collector_name='datacollector')\n chart26 = ChartModule([{'Label': 'Successful Neutral Orders',\n 'Color': 'Red'}], data_collector_name='datacollector')\n chart27 = ChartModule([{'Label': 'Successful Asap Orders', 'Color':\n 'Red'}], data_collector_name='datacollector')\n noProposalOrdersChart = ChartModule([{'Label':\n 'Orders that received no proposals', 'Color': 'Red'}],\n data_collector_name='datacollector')\n server = ModularServer(MASArchitecture, [grid, chart, chart2,\n chart3, chart4, averageMessagesSentChart, chart5,\n noProposalOrdersChart, chart6, chart7, chart8, chart9, chart10,\n chart11, chart12, chart13, chart14, chart15, chart16, chart17,\n chart18, chart19, chart20, chart21, chart22, chart23, chart24,\n chart25, chart26, chart27], 'Festo-Fetch.ai', {'width': 60,\n 'height': 60, 'distributed': True, 'quantity': 10, 'splitSize':\n 1, 'newOrderProbability': 5, 'verbose': True, 'ordersPerWeek': \n 40, 'model_reporters_dict': {'Utilisation': metrics.\n machineUtilisation, 'Complete Orders': metrics.ordersComplete,\n 'Average Order Wait Time': metrics.averageOrderWaitTime,\n 'Successful Orders': metrics.successfulOrders,\n 'Total Messages Sent': metrics.totalMessagesSent,\n 'Average Messages Sent': metrics.averageMessagesSent,\n 'Late Orders': metrics.lateOrders, 'WIP Backlog': metrics.\n totalWIPSize, 'Max Messages Sent - Order': metrics.\n maxMessagesSentFromOrder, 'Max Messages Received - Order':\n metrics.maxMessagesReceivedByOrder,\n 'Max Messages Sent - Factory': metrics.\n maxMessagesSentFromFactory, 'Max Messages Received - Factory':\n metrics.maxMessagesReceivedByFactory, 'Outsourced Orders':\n metrics.outsourcedOrders, 'Orders that received no proposals':\n metrics.noProposalOrders,\n 'Average successful satisfaction score': metrics.\n averageSuccessfulSatisfactionScore,\n 'Average satisfaction score': metrics.averageSatisfactionScore,\n '% Cheap orders with cheap machines': metrics.\n cheapOrdersWithCheapMachines,\n '% Asap orders with fast machines': metrics.\n asapOrdersWithFastMachines, 'Average successful price': metrics\n .averageSuccessfulOrderPrice, 'Average successful price Cheap':\n metrics.averageSuccessfulOrderPriceCheap,\n 'Average successful price Neutral': metrics.\n averageSuccessfulOrderPriceNeutral,\n 'Average successful price Asap': metrics.\n averageSuccessfulOrderPriceAsap, 'Average successful makespan':\n metrics.averageSuccessfulOrderMakeSpan,\n 'Average successful makespan Cheap': metrics.\n averageSuccessfulOrderMakespanCheap,\n 'Average successful makespan Neutral': metrics.\n averageSuccessfulOrderMakespanNeutral,\n 'Average successful makespan Asap': metrics.\n averageSuccessfulOrderMakespanAsap, 'Successful Cheap Orders':\n metrics.percentageOfSuccessfulASAPOrders,\n 'Successful Neutral Orders': metrics.\n percentageOfSuccessfulCheapOrders, 'Successful Asap Orders':\n metrics.percentageOfSuccessfulNeutralOrders}})\n server.port = 8521\n server.launch()\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
1,083 |
f5dffa3c22bb35ed07cb5ca28f2ba02ea3c07dda
|
import math
import random
import pygame
pygame.init()
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
clock = pygame.time.Clock()
pygame.display.set_caption('space invaders')
background = pygame.image.load('background.png')
score = 0
previous_score = 0
score_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 32)
textX = 10
testY = 10
# intro
intro = True
intro_text = "SpaceInvaders"
intro_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)
intro_font2 = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)
# PlayButton
play_button = pygame.image.load('play-button.png')
play_button_X = (SCREEN_WIDTH / 2) - play_button.get_width()
play_button_Y = (SCREEN_HEIGHT / (4 / 3)) - play_button.get_height()
# GameOver
gameover = False
gameover_text = "Game Over"
replay_button = pygame.image.load('replay.png')
# player
player_image = pygame.image.load('spaceship.png')
player_X = 370
player_Y = 480
player_movement = 0
# bullet
bullet_image = pygame.image.load('hot.png')
bullet_X = []
bullet_Y = []
bullet_movement = 0.7
bullet_fired = []
num_bullet = 1
for i in range(num_bullet):
bullet_X.append(0)
bullet_Y.append(player_Y)
bullet_fired.append(False)
# enemy
enemy_image = pygame.image.load('ufo.png')
enemy_X = []
enemy_Y = []
enemy_X_movement = []
enemy_Y_movement = 40
num_enemies = 2
# gamespeedincrement
gamespeed = 0
gamespeed_increment = 0.05
for i in range(num_enemies):
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(0.2)
def player(x, y):
screen.blit(player_image, (x, y))
def fire_bullet(x, y, n):
global bullet_fired
bullet_fired[n] = True
screen.blit(bullet_image, (x + 16, y + 10))
def add_bullet():
global num_bullet
num_bullet += 1
bullet_X.append(0)
bullet_Y.append(player_Y)
bullet_fired.append(False)
def spawn_enemy(x, y):
screen.blit(enemy_image, (x, y))
def add_enemy():
global num_enemies
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(0.2)
num_enemies += 1
def reset_enemy(index):
enemy_X[index] = random.randint(0, 736)
enemy_Y[index] = random.randint(50, 150)
enemy_X_movement[index] = 0.2
def reset_bullet(n):
global bullet_fired, bullet_Y
bullet_fired[n] = False
bullet_Y[n] = player_Y
def isCollion(eX, eY, bX, bY):
distance = math.sqrt(math.pow(eX - bX, 2) + (math.pow(eY - bY, 2)))
if distance < 27:
return True
else:
return False
def show_score():
text = score_font.render("Score: " + str(score), True, (255, 255, 255))
screen.blit(text, (textX, testY))
def show_intro():
show_big_text(intro_text)
show_play_button()
def show_big_text(s):
text = intro_font.render(s, True, (89, 203, 255))
text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))
screen.blit(text, text_rect)
text2 = intro_font2.render(s, True, (250, 50, 183))
text_rect2 = text.get_rect(center=((SCREEN_WIDTH / 2) + 3, (SCREEN_HEIGHT / 2) + 3))
screen.blit(text2, text_rect2)
def show_play_button():
screen.blit(play_button, (play_button_X, play_button_Y))
def show_replay_button():
screen.blit(replay_button, (play_button_X, play_button_Y))
def play_button_clicked():
click = pygame.mouse.get_pressed()
if click[0] == 1:
pos = pygame.mouse.get_pos()
if play_button_X < pos[0] < play_button_X + play_button.get_width():
if play_button_Y < pos[1] < play_button_Y + play_button.get_height():
return True
return False
def game_over_screen():
show_big_text(gameover_text)
show_score()
show_replay_button()
def reset():
global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y
num_enemies = 2
enemy_X = []
enemy_Y = []
for i in range(num_enemies):
enemy_X.append(random.randint(0, 736))
enemy_Y.append(random.randint(50, 150))
enemy_X_movement.append(2)
player_X = 370
player_Y = 480
score = 0
bullet_fired = []
bullet_fired.append(False)
gamespeed = 0
num_bullet = 1
bullet_X = []
bullet_X.append(0)
bullet_Y = []
bullet_Y.append(player_Y)
running = True
while running:
screen.fill((0, 0, 0))
screen.blit(background, (0, 0))
dt = clock.tick(60)
while intro:
show_intro()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if play_button_clicked():
intro = False
pygame.display.update()
while gameover:
game_over_screen()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if play_button_clicked():
reset()
gameover = False
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player_movement = -0.2 - gamespeed
if event.key == pygame.K_RIGHT:
player_movement = 0.2 + gamespeed
if event.key == pygame.K_SPACE:
for i in range(num_bullet):
if not bullet_fired[i]:
bullet_X[i] = player_X
fire_bullet(bullet_X[i], bullet_Y[i], i)
break
if event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:
player_movement = 0
# playermovement
player_X += player_movement * dt
if player_X <= 1:
player_X = 1
elif player_X >= 735:
player_X = 735
# bulletmovement
for i in range(num_bullet):
if bullet_Y[i] <= 1:
reset_bullet(i)
if bullet_fired[i]:
bullet_Y[i] -= bullet_movement * dt
fire_bullet(bullet_X[i], bullet_Y[i], i)
# enemy_movement
for i in range(num_enemies):
if enemy_Y[i] >= 440:
gameover = True
for j in range(num_bullet):
if bullet_fired[j]:
collision = isCollion(enemy_X[i], enemy_Y[i], bullet_X[j], bullet_Y[j])
if collision:
reset_enemy(i)
reset_bullet(j)
score += 1
if score != 0 and previous_score != score:
if score % 3 == 0:
add_enemy()
print("added enemy")
if score % 10 == 0:
gamespeed += gamespeed_increment
print("increased gamespeed")
if score % 20 == 0:
add_bullet()
print("added bullet")
previous_score = score
if enemy_X_movement[i] < 0:
enemy_X[i] += (enemy_X_movement[i] - gamespeed) * dt
else:
enemy_X[i] += (enemy_X_movement[i] + gamespeed) * dt
if enemy_X[i] <= 1:
enemy_X[i] = 2
enemy_X_movement[i] = -enemy_X_movement[i]
enemy_Y[i] += (enemy_Y_movement + gamespeed)
elif enemy_X[i] >= 735:
enemy_X[i] = 734
enemy_X_movement[i] = -enemy_X_movement[i]
enemy_Y[i] += (enemy_Y_movement + gamespeed)
spawn_enemy(enemy_X[i], enemy_Y[i])
player(player_X, player_Y)
show_score()
pygame.display.update()
|
[
"import math\nimport random\n\nimport pygame\n\npygame.init()\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\nclock = pygame.time.Clock()\n\npygame.display.set_caption('space invaders')\n\nbackground = pygame.image.load('background.png')\n\nscore = 0\nprevious_score = 0\nscore_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 32)\ntextX = 10\ntestY = 10\n\n# intro\nintro = True\nintro_text = \"SpaceInvaders\"\nintro_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)\nintro_font2 = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)\n\n# PlayButton\nplay_button = pygame.image.load('play-button.png')\nplay_button_X = (SCREEN_WIDTH / 2) - play_button.get_width()\nplay_button_Y = (SCREEN_HEIGHT / (4 / 3)) - play_button.get_height()\n\n# GameOver\ngameover = False\ngameover_text = \"Game Over\"\nreplay_button = pygame.image.load('replay.png')\n\n# player\nplayer_image = pygame.image.load('spaceship.png')\nplayer_X = 370\nplayer_Y = 480\nplayer_movement = 0\n\n# bullet\nbullet_image = pygame.image.load('hot.png')\nbullet_X = []\nbullet_Y = []\nbullet_movement = 0.7\nbullet_fired = []\nnum_bullet = 1\nfor i in range(num_bullet):\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\n\n# enemy\nenemy_image = pygame.image.load('ufo.png')\nenemy_X = []\nenemy_Y = []\nenemy_X_movement = []\nenemy_Y_movement = 40\nnum_enemies = 2\n\n# gamespeedincrement\ngamespeed = 0\ngamespeed_increment = 0.05\n\nfor i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n\n\ndef player(x, y):\n screen.blit(player_image, (x, y))\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\ndef add_bullet():\n global num_bullet\n num_bullet += 1\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\n\n\ndef spawn_enemy(x, y):\n screen.blit(enemy_image, (x, y))\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\ndef isCollion(eX, eY, bX, bY):\n distance = math.sqrt(math.pow(eX - bX, 2) + (math.pow(eY - bY, 2)))\n if distance < 27:\n return True\n else:\n return False\n\n\ndef show_score():\n text = score_font.render(\"Score: \" + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\ndef show_intro():\n show_big_text(intro_text)\n show_play_button()\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=((SCREEN_WIDTH / 2) + 3, (SCREEN_HEIGHT / 2) + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height():\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\nrunning = True\nwhile running:\n\n screen.fill((0, 0, 0))\n screen.blit(background, (0, 0))\n dt = clock.tick(60)\n\n while intro:\n show_intro()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n if play_button_clicked():\n intro = False\n\n pygame.display.update()\n\n while gameover:\n game_over_screen()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n if play_button_clicked():\n reset()\n gameover = False\n\n pygame.display.update()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n player_movement = -0.2 - gamespeed\n if event.key == pygame.K_RIGHT:\n player_movement = 0.2 + gamespeed\n if event.key == pygame.K_SPACE:\n for i in range(num_bullet):\n if not bullet_fired[i]:\n bullet_X[i] = player_X\n fire_bullet(bullet_X[i], bullet_Y[i], i)\n break\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:\n player_movement = 0\n\n # playermovement\n player_X += player_movement * dt\n if player_X <= 1:\n player_X = 1\n elif player_X >= 735:\n player_X = 735\n\n # bulletmovement\n for i in range(num_bullet):\n if bullet_Y[i] <= 1:\n reset_bullet(i)\n if bullet_fired[i]:\n bullet_Y[i] -= bullet_movement * dt\n fire_bullet(bullet_X[i], bullet_Y[i], i)\n\n # enemy_movement\n for i in range(num_enemies):\n if enemy_Y[i] >= 440:\n gameover = True\n\n for j in range(num_bullet):\n if bullet_fired[j]:\n collision = isCollion(enemy_X[i], enemy_Y[i], bullet_X[j], bullet_Y[j])\n if collision:\n reset_enemy(i)\n reset_bullet(j)\n score += 1\n\n if score != 0 and previous_score != score:\n if score % 3 == 0:\n add_enemy()\n print(\"added enemy\")\n if score % 10 == 0:\n gamespeed += gamespeed_increment\n print(\"increased gamespeed\")\n if score % 20 == 0:\n add_bullet()\n print(\"added bullet\")\n previous_score = score\n\n if enemy_X_movement[i] < 0:\n enemy_X[i] += (enemy_X_movement[i] - gamespeed) * dt\n else:\n enemy_X[i] += (enemy_X_movement[i] + gamespeed) * dt\n if enemy_X[i] <= 1:\n enemy_X[i] = 2\n enemy_X_movement[i] = -enemy_X_movement[i]\n enemy_Y[i] += (enemy_Y_movement + gamespeed)\n elif enemy_X[i] >= 735:\n enemy_X[i] = 734\n enemy_X_movement[i] = -enemy_X_movement[i]\n enemy_Y[i] += (enemy_Y_movement + gamespeed)\n\n spawn_enemy(enemy_X[i], enemy_Y[i])\n\n player(player_X, player_Y)\n show_score()\n pygame.display.update()\n",
"import math\nimport random\nimport pygame\npygame.init()\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\nclock = pygame.time.Clock()\npygame.display.set_caption('space invaders')\nbackground = pygame.image.load('background.png')\nscore = 0\nprevious_score = 0\nscore_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 32)\ntextX = 10\ntestY = 10\nintro = True\nintro_text = 'SpaceInvaders'\nintro_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)\nintro_font2 = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)\nplay_button = pygame.image.load('play-button.png')\nplay_button_X = SCREEN_WIDTH / 2 - play_button.get_width()\nplay_button_Y = SCREEN_HEIGHT / (4 / 3) - play_button.get_height()\ngameover = False\ngameover_text = 'Game Over'\nreplay_button = pygame.image.load('replay.png')\nplayer_image = pygame.image.load('spaceship.png')\nplayer_X = 370\nplayer_Y = 480\nplayer_movement = 0\nbullet_image = pygame.image.load('hot.png')\nbullet_X = []\nbullet_Y = []\nbullet_movement = 0.7\nbullet_fired = []\nnum_bullet = 1\nfor i in range(num_bullet):\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\nenemy_image = pygame.image.load('ufo.png')\nenemy_X = []\nenemy_Y = []\nenemy_X_movement = []\nenemy_Y_movement = 40\nnum_enemies = 2\ngamespeed = 0\ngamespeed_increment = 0.05\nfor i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n\n\ndef player(x, y):\n screen.blit(player_image, (x, y))\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\ndef add_bullet():\n global num_bullet\n num_bullet += 1\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\n\n\ndef spawn_enemy(x, y):\n screen.blit(enemy_image, (x, y))\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\ndef isCollion(eX, eY, bX, bY):\n distance = math.sqrt(math.pow(eX - bX, 2) + math.pow(eY - bY, 2))\n if distance < 27:\n return True\n else:\n return False\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\ndef show_intro():\n show_big_text(intro_text)\n show_play_button()\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\nrunning = True\nwhile running:\n screen.fill((0, 0, 0))\n screen.blit(background, (0, 0))\n dt = clock.tick(60)\n while intro:\n show_intro()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if play_button_clicked():\n intro = False\n pygame.display.update()\n while gameover:\n game_over_screen()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if play_button_clicked():\n reset()\n gameover = False\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n player_movement = -0.2 - gamespeed\n if event.key == pygame.K_RIGHT:\n player_movement = 0.2 + gamespeed\n if event.key == pygame.K_SPACE:\n for i in range(num_bullet):\n if not bullet_fired[i]:\n bullet_X[i] = player_X\n fire_bullet(bullet_X[i], bullet_Y[i], i)\n break\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:\n player_movement = 0\n player_X += player_movement * dt\n if player_X <= 1:\n player_X = 1\n elif player_X >= 735:\n player_X = 735\n for i in range(num_bullet):\n if bullet_Y[i] <= 1:\n reset_bullet(i)\n if bullet_fired[i]:\n bullet_Y[i] -= bullet_movement * dt\n fire_bullet(bullet_X[i], bullet_Y[i], i)\n for i in range(num_enemies):\n if enemy_Y[i] >= 440:\n gameover = True\n for j in range(num_bullet):\n if bullet_fired[j]:\n collision = isCollion(enemy_X[i], enemy_Y[i], bullet_X[j],\n bullet_Y[j])\n if collision:\n reset_enemy(i)\n reset_bullet(j)\n score += 1\n if score != 0 and previous_score != score:\n if score % 3 == 0:\n add_enemy()\n print('added enemy')\n if score % 10 == 0:\n gamespeed += gamespeed_increment\n print('increased gamespeed')\n if score % 20 == 0:\n add_bullet()\n print('added bullet')\n previous_score = score\n if enemy_X_movement[i] < 0:\n enemy_X[i] += (enemy_X_movement[i] - gamespeed) * dt\n else:\n enemy_X[i] += (enemy_X_movement[i] + gamespeed) * dt\n if enemy_X[i] <= 1:\n enemy_X[i] = 2\n enemy_X_movement[i] = -enemy_X_movement[i]\n enemy_Y[i] += enemy_Y_movement + gamespeed\n elif enemy_X[i] >= 735:\n enemy_X[i] = 734\n enemy_X_movement[i] = -enemy_X_movement[i]\n enemy_Y[i] += enemy_Y_movement + gamespeed\n spawn_enemy(enemy_X[i], enemy_Y[i])\n player(player_X, player_Y)\n show_score()\n pygame.display.update()\n",
"<import token>\npygame.init()\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\nclock = pygame.time.Clock()\npygame.display.set_caption('space invaders')\nbackground = pygame.image.load('background.png')\nscore = 0\nprevious_score = 0\nscore_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 32)\ntextX = 10\ntestY = 10\nintro = True\nintro_text = 'SpaceInvaders'\nintro_font = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)\nintro_font2 = pygame.font.Font('arcade_weknow/ARCADE.otf', 64)\nplay_button = pygame.image.load('play-button.png')\nplay_button_X = SCREEN_WIDTH / 2 - play_button.get_width()\nplay_button_Y = SCREEN_HEIGHT / (4 / 3) - play_button.get_height()\ngameover = False\ngameover_text = 'Game Over'\nreplay_button = pygame.image.load('replay.png')\nplayer_image = pygame.image.load('spaceship.png')\nplayer_X = 370\nplayer_Y = 480\nplayer_movement = 0\nbullet_image = pygame.image.load('hot.png')\nbullet_X = []\nbullet_Y = []\nbullet_movement = 0.7\nbullet_fired = []\nnum_bullet = 1\nfor i in range(num_bullet):\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\nenemy_image = pygame.image.load('ufo.png')\nenemy_X = []\nenemy_Y = []\nenemy_X_movement = []\nenemy_Y_movement = 40\nnum_enemies = 2\ngamespeed = 0\ngamespeed_increment = 0.05\nfor i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n\n\ndef player(x, y):\n screen.blit(player_image, (x, y))\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\ndef add_bullet():\n global num_bullet\n num_bullet += 1\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\n\n\ndef spawn_enemy(x, y):\n screen.blit(enemy_image, (x, y))\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\ndef isCollion(eX, eY, bX, bY):\n distance = math.sqrt(math.pow(eX - bX, 2) + math.pow(eY - bY, 2))\n if distance < 27:\n return True\n else:\n return False\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\ndef show_intro():\n show_big_text(intro_text)\n show_play_button()\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\nrunning = True\nwhile running:\n screen.fill((0, 0, 0))\n screen.blit(background, (0, 0))\n dt = clock.tick(60)\n while intro:\n show_intro()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if play_button_clicked():\n intro = False\n pygame.display.update()\n while gameover:\n game_over_screen()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if play_button_clicked():\n reset()\n gameover = False\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n player_movement = -0.2 - gamespeed\n if event.key == pygame.K_RIGHT:\n player_movement = 0.2 + gamespeed\n if event.key == pygame.K_SPACE:\n for i in range(num_bullet):\n if not bullet_fired[i]:\n bullet_X[i] = player_X\n fire_bullet(bullet_X[i], bullet_Y[i], i)\n break\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:\n player_movement = 0\n player_X += player_movement * dt\n if player_X <= 1:\n player_X = 1\n elif player_X >= 735:\n player_X = 735\n for i in range(num_bullet):\n if bullet_Y[i] <= 1:\n reset_bullet(i)\n if bullet_fired[i]:\n bullet_Y[i] -= bullet_movement * dt\n fire_bullet(bullet_X[i], bullet_Y[i], i)\n for i in range(num_enemies):\n if enemy_Y[i] >= 440:\n gameover = True\n for j in range(num_bullet):\n if bullet_fired[j]:\n collision = isCollion(enemy_X[i], enemy_Y[i], bullet_X[j],\n bullet_Y[j])\n if collision:\n reset_enemy(i)\n reset_bullet(j)\n score += 1\n if score != 0 and previous_score != score:\n if score % 3 == 0:\n add_enemy()\n print('added enemy')\n if score % 10 == 0:\n gamespeed += gamespeed_increment\n print('increased gamespeed')\n if score % 20 == 0:\n add_bullet()\n print('added bullet')\n previous_score = score\n if enemy_X_movement[i] < 0:\n enemy_X[i] += (enemy_X_movement[i] - gamespeed) * dt\n else:\n enemy_X[i] += (enemy_X_movement[i] + gamespeed) * dt\n if enemy_X[i] <= 1:\n enemy_X[i] = 2\n enemy_X_movement[i] = -enemy_X_movement[i]\n enemy_Y[i] += enemy_Y_movement + gamespeed\n elif enemy_X[i] >= 735:\n enemy_X[i] = 734\n enemy_X_movement[i] = -enemy_X_movement[i]\n enemy_Y[i] += enemy_Y_movement + gamespeed\n spawn_enemy(enemy_X[i], enemy_Y[i])\n player(player_X, player_Y)\n show_score()\n pygame.display.update()\n",
"<import token>\npygame.init()\n<assignment token>\npygame.display.set_caption('space invaders')\n<assignment token>\nfor i in range(num_bullet):\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\n<assignment token>\nfor i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n\n\ndef player(x, y):\n screen.blit(player_image, (x, y))\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\ndef add_bullet():\n global num_bullet\n num_bullet += 1\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\n\n\ndef spawn_enemy(x, y):\n screen.blit(enemy_image, (x, y))\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\ndef isCollion(eX, eY, bX, bY):\n distance = math.sqrt(math.pow(eX - bX, 2) + math.pow(eY - bY, 2))\n if distance < 27:\n return True\n else:\n return False\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\ndef show_intro():\n show_big_text(intro_text)\n show_play_button()\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\n<assignment token>\nwhile running:\n screen.fill((0, 0, 0))\n screen.blit(background, (0, 0))\n dt = clock.tick(60)\n while intro:\n show_intro()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if play_button_clicked():\n intro = False\n pygame.display.update()\n while gameover:\n game_over_screen()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if play_button_clicked():\n reset()\n gameover = False\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n player_movement = -0.2 - gamespeed\n if event.key == pygame.K_RIGHT:\n player_movement = 0.2 + gamespeed\n if event.key == pygame.K_SPACE:\n for i in range(num_bullet):\n if not bullet_fired[i]:\n bullet_X[i] = player_X\n fire_bullet(bullet_X[i], bullet_Y[i], i)\n break\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:\n player_movement = 0\n player_X += player_movement * dt\n if player_X <= 1:\n player_X = 1\n elif player_X >= 735:\n player_X = 735\n for i in range(num_bullet):\n if bullet_Y[i] <= 1:\n reset_bullet(i)\n if bullet_fired[i]:\n bullet_Y[i] -= bullet_movement * dt\n fire_bullet(bullet_X[i], bullet_Y[i], i)\n for i in range(num_enemies):\n if enemy_Y[i] >= 440:\n gameover = True\n for j in range(num_bullet):\n if bullet_fired[j]:\n collision = isCollion(enemy_X[i], enemy_Y[i], bullet_X[j],\n bullet_Y[j])\n if collision:\n reset_enemy(i)\n reset_bullet(j)\n score += 1\n if score != 0 and previous_score != score:\n if score % 3 == 0:\n add_enemy()\n print('added enemy')\n if score % 10 == 0:\n gamespeed += gamespeed_increment\n print('increased gamespeed')\n if score % 20 == 0:\n add_bullet()\n print('added bullet')\n previous_score = score\n if enemy_X_movement[i] < 0:\n enemy_X[i] += (enemy_X_movement[i] - gamespeed) * dt\n else:\n enemy_X[i] += (enemy_X_movement[i] + gamespeed) * dt\n if enemy_X[i] <= 1:\n enemy_X[i] = 2\n enemy_X_movement[i] = -enemy_X_movement[i]\n enemy_Y[i] += enemy_Y_movement + gamespeed\n elif enemy_X[i] >= 735:\n enemy_X[i] = 734\n enemy_X_movement[i] = -enemy_X_movement[i]\n enemy_Y[i] += enemy_Y_movement + gamespeed\n spawn_enemy(enemy_X[i], enemy_Y[i])\n player(player_X, player_Y)\n show_score()\n pygame.display.update()\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef player(x, y):\n screen.blit(player_image, (x, y))\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\ndef add_bullet():\n global num_bullet\n num_bullet += 1\n bullet_X.append(0)\n bullet_Y.append(player_Y)\n bullet_fired.append(False)\n\n\ndef spawn_enemy(x, y):\n screen.blit(enemy_image, (x, y))\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\ndef isCollion(eX, eY, bX, bY):\n distance = math.sqrt(math.pow(eX - bX, 2) + math.pow(eY - bY, 2))\n if distance < 27:\n return True\n else:\n return False\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\ndef show_intro():\n show_big_text(intro_text)\n show_play_button()\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef player(x, y):\n screen.blit(player_image, (x, y))\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\n<function token>\n\n\ndef spawn_enemy(x, y):\n screen.blit(enemy_image, (x, y))\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\ndef isCollion(eX, eY, bX, bY):\n distance = math.sqrt(math.pow(eX - bX, 2) + math.pow(eY - bY, 2))\n if distance < 27:\n return True\n else:\n return False\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\ndef show_intro():\n show_big_text(intro_text)\n show_play_button()\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef player(x, y):\n screen.blit(player_image, (x, y))\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\n<function token>\n<function token>\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\ndef isCollion(eX, eY, bX, bY):\n distance = math.sqrt(math.pow(eX - bX, 2) + math.pow(eY - bY, 2))\n if distance < 27:\n return True\n else:\n return False\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\ndef show_intro():\n show_big_text(intro_text)\n show_play_button()\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef player(x, y):\n screen.blit(player_image, (x, y))\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\n<function token>\n<function token>\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\n<function token>\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\ndef show_intro():\n show_big_text(intro_text)\n show_play_button()\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\n<function token>\n<function token>\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\n<function token>\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\ndef show_intro():\n show_big_text(intro_text)\n show_play_button()\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\n<function token>\n<function token>\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\n<function token>\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\n<function token>\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\ndef show_replay_button():\n screen.blit(replay_button, (play_button_X, play_button_Y))\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\n<function token>\n<function token>\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\n<function token>\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\n<function token>\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\n<function token>\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\ndef reset():\n global num_enemies, enemy_X, enemy_Y, player_X, player_Y, score, bullet_fired, gamespeed, num_bullet, bullet_X, bullet_Y\n num_enemies = 2\n enemy_X = []\n enemy_Y = []\n for i in range(num_enemies):\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(2)\n player_X = 370\n player_Y = 480\n score = 0\n bullet_fired = []\n bullet_fired.append(False)\n gamespeed = 0\n num_bullet = 1\n bullet_X = []\n bullet_X.append(0)\n bullet_Y = []\n bullet_Y.append(player_Y)\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\n<function token>\n<function token>\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\n<function token>\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\n<function token>\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\n<function token>\n\n\ndef play_button_clicked():\n click = pygame.mouse.get_pressed()\n if click[0] == 1:\n pos = pygame.mouse.get_pos()\n if play_button_X < pos[0] < play_button_X + play_button.get_width():\n if play_button_Y < pos[1] < play_button_Y + play_button.get_height(\n ):\n return True\n return False\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\n<function token>\n<function token>\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\n<function token>\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\n<function token>\n\n\ndef show_big_text(s):\n text = intro_font.render(s, True, (89, 203, 255))\n text_rect = text.get_rect(center=(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\n screen.blit(text, text_rect)\n text2 = intro_font2.render(s, True, (250, 50, 183))\n text_rect2 = text.get_rect(center=(SCREEN_WIDTH / 2 + 3, SCREEN_HEIGHT /\n 2 + 3))\n screen.blit(text2, text_rect2)\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\n<function token>\n<function token>\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\n<function token>\n<function token>\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\ndef reset_enemy(index):\n enemy_X[index] = random.randint(0, 736)\n enemy_Y[index] = random.randint(50, 150)\n enemy_X_movement[index] = 0.2\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\n<function token>\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\n<function token>\n<function token>\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\n<function token>\n<function token>\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\n<function token>\n<function token>\n\n\ndef add_enemy():\n global num_enemies\n enemy_X.append(random.randint(0, 736))\n enemy_Y.append(random.randint(50, 150))\n enemy_X_movement.append(0.2)\n num_enemies += 1\n\n\n<function token>\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\n<function token>\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\n<function token>\n<function token>\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\n<function token>\n<function token>\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\n<function token>\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\n<function token>\n<function token>\n\n\ndef show_play_button():\n screen.blit(play_button, (play_button_X, play_button_Y))\n\n\n<function token>\n<function token>\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef fire_bullet(x, y, n):\n global bullet_fired\n bullet_fired[n] = True\n screen.blit(bullet_image, (x + 16, y + 10))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\n<function token>\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\n<function token>\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef game_over_screen():\n show_big_text(gameover_text)\n show_score()\n show_replay_button()\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef reset_bullet(n):\n global bullet_fired, bullet_Y\n bullet_fired[n] = False\n bullet_Y[n] = player_Y\n\n\n<function token>\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef show_score():\n text = score_font.render('Score: ' + str(score), True, (255, 255, 255))\n screen.blit(text, (textX, testY))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
1,084 |
e2e4adaa8f7f62662e0c2915faff1bed72986351
|
from django.contrib import admin
from .models import Hash
admin.site.register(Hash)
|
[
"from django.contrib import admin\nfrom .models import Hash\nadmin.site.register(Hash)\n",
"<import token>\nadmin.site.register(Hash)\n",
"<import token>\n<code token>\n"
] | false |
1,085 |
d8af43d24a2f2b99bc8b5098f251e017852d6d86
|
import subprocess
class BaseExecution:
def __init__(self, flag, parser):
self.flag = flag
self.parser = parser
def execute(self):
process = subprocess.Popen(f'df {self.flag}', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = process.communicate()
return_code = process.returncode
parser = self.parser(output, err, return_code)
result = parser.parse()
return result
|
[
"import subprocess\n\n\nclass BaseExecution:\n def __init__(self, flag, parser):\n self.flag = flag\n self.parser = parser\n\n def execute(self):\n process = subprocess.Popen(f'df {self.flag}', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, err = process.communicate()\n return_code = process.returncode\n parser = self.parser(output, err, return_code)\n result = parser.parse()\n return result\n",
"import subprocess\n\n\nclass BaseExecution:\n\n def __init__(self, flag, parser):\n self.flag = flag\n self.parser = parser\n\n def execute(self):\n process = subprocess.Popen(f'df {self.flag}', shell=True, stdout=\n subprocess.PIPE, stderr=subprocess.PIPE)\n output, err = process.communicate()\n return_code = process.returncode\n parser = self.parser(output, err, return_code)\n result = parser.parse()\n return result\n",
"<import token>\n\n\nclass BaseExecution:\n\n def __init__(self, flag, parser):\n self.flag = flag\n self.parser = parser\n\n def execute(self):\n process = subprocess.Popen(f'df {self.flag}', shell=True, stdout=\n subprocess.PIPE, stderr=subprocess.PIPE)\n output, err = process.communicate()\n return_code = process.returncode\n parser = self.parser(output, err, return_code)\n result = parser.parse()\n return result\n",
"<import token>\n\n\nclass BaseExecution:\n\n def __init__(self, flag, parser):\n self.flag = flag\n self.parser = parser\n <function token>\n",
"<import token>\n\n\nclass BaseExecution:\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
1,086 |
2f2030107f3a23c0d2f404a838eaccc8b35ac410
|
fahrenheit = float(input("Enter a fahrenheit degree: "))
celcius = ((fahrenheit - 32) * 5) / 9
print("From fahrenheit to celcius", celcius)
|
[
"fahrenheit = float(input(\"Enter a fahrenheit degree: \"))\ncelcius = ((fahrenheit - 32) * 5) / 9\nprint(\"From fahrenheit to celcius\", celcius)",
"fahrenheit = float(input('Enter a fahrenheit degree: '))\ncelcius = (fahrenheit - 32) * 5 / 9\nprint('From fahrenheit to celcius', celcius)\n",
"<assignment token>\nprint('From fahrenheit to celcius', celcius)\n",
"<assignment token>\n<code token>\n"
] | false |
1,087 |
c4fbf206482a04f3e2d2aa98a0dbf525a176c4e7
|
__author__ = 'Joe'
import sys
sys.path.insert(0,'../src/')
import grocery_functions
import unittest
class TestGroceryFuncs(unittest.TestCase):
def test_getRecipeNames(self):
recipe_names = grocery_functions.get_recipe_names("test-recipes")
self.assertTrue(recipe_names[0] == "Cajun Chicken & Rice")
self.assertTrue(recipe_names[1] == "Chicken Curry in a Hurry")
self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')
self.assertTrue(recipe_names[3] == 'Healthy Roasted Chicken and Veggies (one pan)')
self.assertTrue(recipe_names[4] == 'Kielbasa, Pepper, Onion and Potato Hash')
def test_getIngredientsFromFile(self):
list=grocery_functions.get_ingredients_from_recipe_file("test-recipes\Kielbasa, Pepper, Onion and Potato Hash.txt")
self.assertTrue(list[0].name == 'turkey kielbasa')
self.assertTrue(list[0].unit == 'ounce')
self.assertTrue(list[0].number == '14')
self.assertTrue(list[2].name == 'non-green bell pepper')
self.assertTrue(list[2].unit == '')
self.assertTrue(list[2].number == '1')
self.assertTrue(list[6].name == 'salt')
self.assertTrue(list[6].unit == '')
self.assertTrue(list[6].number == '1')
def test_getTagsFromFile(self):
list=grocery_functions.get_tags_from_recipe_file("test-recipes\Chicken Curry in a Hurry.txt")
self.assertTrue(list[0] == 'chicken')
self.assertTrue(list[1] == 'easy')
self.assertTrue(list[2] == 'stove')
def test_getRecipeFromFile(self):
list=grocery_functions.get_recipe_from_recipe_file("test-recipes\Healthy Roasted Chicken and Veggies (one pan).txt")
self.assertTrue(list[2]=="1 cup bell pepper, chopped (any colors you like)")
self.assertTrue(list[10]=="1 teaspoon italian seasoning")
self.assertTrue(list[15]=="Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, ")
def test_condenseList(self):
recipe_names = grocery_functions.get_recipe_names("test-recipes")
grocery_list=[]
for recipe in recipe_names:
grocery_list += grocery_functions.get_ingredients_from_recipe_file("test-recipes\\"+recipe+".txt")
grocery_list=grocery_functions.condense_grocery_list(grocery_list)
# grocery_functions.print_grocery_list(grocery_list)
# grocery_functions.sort_and_print_grocery_List(grocery_list, "Smiths-Eu-JT-ItemDepartments.txt")
def test_makeAllIngredientsFile(self):
grocery_functions.make_all_ingredients_file()
def test_getItemDeptDicts(self):
grocery_functions.get_item_dept_dicts("Smiths-Eu-JT-ItemDepartments.txt")
def test_checkRecipeFormat(self):
errors=grocery_functions.check_recipe_format("test-recipes", False)
self.assertTrue(errors == [])
errors=grocery_functions.check_recipe_format("broken-test-recipes", False)
self.assertTrue('invalid format, "1 lb, chicken breasts" in: broken-test-recipes//broken_recipe.txt' in errors)
self.assertTrue('invalid heading, "wrong_header" in file: broken-test-recipes//broken_recipe.txt' in errors)
self.assertTrue('Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors)
def test_update_default_ing_dept_file(self):
grocery_functions.update_default_ing_dept_file(grocery_functions.get_all_ingredients("test-recipes"))
def suite(self):
return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"__author__ = 'Joe'\nimport sys\nsys.path.insert(0,'../src/')\n\nimport grocery_functions\nimport unittest\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names(\"test-recipes\")\n self.assertTrue(recipe_names[0] == \"Cajun Chicken & Rice\")\n self.assertTrue(recipe_names[1] == \"Chicken Curry in a Hurry\")\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] == 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] == 'Kielbasa, Pepper, Onion and Potato Hash')\n\n def test_getIngredientsFromFile(self):\n list=grocery_functions.get_ingredients_from_recipe_file(\"test-recipes\\Kielbasa, Pepper, Onion and Potato Hash.txt\")\n self.assertTrue(list[0].name == 'turkey kielbasa')\n self.assertTrue(list[0].unit == 'ounce')\n self.assertTrue(list[0].number == '14')\n self.assertTrue(list[2].name == 'non-green bell pepper')\n self.assertTrue(list[2].unit == '')\n self.assertTrue(list[2].number == '1')\n self.assertTrue(list[6].name == 'salt')\n self.assertTrue(list[6].unit == '')\n self.assertTrue(list[6].number == '1')\n\n def test_getTagsFromFile(self):\n list=grocery_functions.get_tags_from_recipe_file(\"test-recipes\\Chicken Curry in a Hurry.txt\")\n self.assertTrue(list[0] == 'chicken')\n self.assertTrue(list[1] == 'easy')\n self.assertTrue(list[2] == 'stove')\n\n def test_getRecipeFromFile(self):\n list=grocery_functions.get_recipe_from_recipe_file(\"test-recipes\\Healthy Roasted Chicken and Veggies (one pan).txt\")\n self.assertTrue(list[2]==\"1 cup bell pepper, chopped (any colors you like)\")\n self.assertTrue(list[10]==\"1 teaspoon italian seasoning\")\n self.assertTrue(list[15]==\"Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, \")\n\n def test_condenseList(self):\n recipe_names = grocery_functions.get_recipe_names(\"test-recipes\")\n grocery_list=[]\n for recipe in recipe_names:\n grocery_list += grocery_functions.get_ingredients_from_recipe_file(\"test-recipes\\\\\"+recipe+\".txt\")\n grocery_list=grocery_functions.condense_grocery_list(grocery_list)\n # grocery_functions.print_grocery_list(grocery_list)\n # grocery_functions.sort_and_print_grocery_List(grocery_list, \"Smiths-Eu-JT-ItemDepartments.txt\")\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n\n def test_getItemDeptDicts(self):\n grocery_functions.get_item_dept_dicts(\"Smiths-Eu-JT-ItemDepartments.txt\")\n\n def test_checkRecipeFormat(self):\n errors=grocery_functions.check_recipe_format(\"test-recipes\", False)\n self.assertTrue(errors == [])\n errors=grocery_functions.check_recipe_format(\"broken-test-recipes\", False)\n self.assertTrue('invalid format, \"1 lb, chicken breasts\" in: broken-test-recipes//broken_recipe.txt' in errors)\n self.assertTrue('invalid heading, \"wrong_header\" in file: broken-test-recipes//broken_recipe.txt' in errors)\n self.assertTrue('Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors)\n\n def test_update_default_ing_dept_file(self):\n grocery_functions.update_default_ing_dept_file(grocery_functions.get_all_ingredients(\"test-recipes\"))\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n unittest.TextTestRunner(verbosity=2).run(suite)",
"__author__ = 'Joe'\nimport sys\nsys.path.insert(0, '../src/')\nimport grocery_functions\nimport unittest\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n\n def test_getIngredientsFromFile(self):\n list = grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\Kielbasa, Pepper, Onion and Potato Hash.txt')\n self.assertTrue(list[0].name == 'turkey kielbasa')\n self.assertTrue(list[0].unit == 'ounce')\n self.assertTrue(list[0].number == '14')\n self.assertTrue(list[2].name == 'non-green bell pepper')\n self.assertTrue(list[2].unit == '')\n self.assertTrue(list[2].number == '1')\n self.assertTrue(list[6].name == 'salt')\n self.assertTrue(list[6].unit == '')\n self.assertTrue(list[6].number == '1')\n\n def test_getTagsFromFile(self):\n list = grocery_functions.get_tags_from_recipe_file(\n 'test-recipes\\\\Chicken Curry in a Hurry.txt')\n self.assertTrue(list[0] == 'chicken')\n self.assertTrue(list[1] == 'easy')\n self.assertTrue(list[2] == 'stove')\n\n def test_getRecipeFromFile(self):\n list = grocery_functions.get_recipe_from_recipe_file(\n 'test-recipes\\\\Healthy Roasted Chicken and Veggies (one pan).txt')\n self.assertTrue(list[2] ==\n '1 cup bell pepper, chopped (any colors you like)')\n self.assertTrue(list[10] == '1 teaspoon italian seasoning')\n self.assertTrue(list[15] ==\n 'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '\n )\n\n def test_condenseList(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n grocery_list = []\n for recipe in recipe_names:\n grocery_list += grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\' + recipe + '.txt')\n grocery_list = grocery_functions.condense_grocery_list(grocery_list)\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n\n def test_getItemDeptDicts(self):\n grocery_functions.get_item_dept_dicts(\n 'Smiths-Eu-JT-ItemDepartments.txt')\n\n def test_checkRecipeFormat(self):\n errors = grocery_functions.check_recipe_format('test-recipes', False)\n self.assertTrue(errors == [])\n errors = grocery_functions.check_recipe_format('broken-test-recipes',\n False)\n self.assertTrue(\n 'invalid format, \"1 lb, chicken breasts\" in: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'invalid heading, \"wrong_header\" in file: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors\n )\n\n def test_update_default_ing_dept_file(self):\n grocery_functions.update_default_ing_dept_file(grocery_functions.\n get_all_ingredients('test-recipes'))\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n unittest.TextTestRunner(verbosity=2).run(suite)\n",
"__author__ = 'Joe'\n<import token>\nsys.path.insert(0, '../src/')\n<import token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n\n def test_getIngredientsFromFile(self):\n list = grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\Kielbasa, Pepper, Onion and Potato Hash.txt')\n self.assertTrue(list[0].name == 'turkey kielbasa')\n self.assertTrue(list[0].unit == 'ounce')\n self.assertTrue(list[0].number == '14')\n self.assertTrue(list[2].name == 'non-green bell pepper')\n self.assertTrue(list[2].unit == '')\n self.assertTrue(list[2].number == '1')\n self.assertTrue(list[6].name == 'salt')\n self.assertTrue(list[6].unit == '')\n self.assertTrue(list[6].number == '1')\n\n def test_getTagsFromFile(self):\n list = grocery_functions.get_tags_from_recipe_file(\n 'test-recipes\\\\Chicken Curry in a Hurry.txt')\n self.assertTrue(list[0] == 'chicken')\n self.assertTrue(list[1] == 'easy')\n self.assertTrue(list[2] == 'stove')\n\n def test_getRecipeFromFile(self):\n list = grocery_functions.get_recipe_from_recipe_file(\n 'test-recipes\\\\Healthy Roasted Chicken and Veggies (one pan).txt')\n self.assertTrue(list[2] ==\n '1 cup bell pepper, chopped (any colors you like)')\n self.assertTrue(list[10] == '1 teaspoon italian seasoning')\n self.assertTrue(list[15] ==\n 'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '\n )\n\n def test_condenseList(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n grocery_list = []\n for recipe in recipe_names:\n grocery_list += grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\' + recipe + '.txt')\n grocery_list = grocery_functions.condense_grocery_list(grocery_list)\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n\n def test_getItemDeptDicts(self):\n grocery_functions.get_item_dept_dicts(\n 'Smiths-Eu-JT-ItemDepartments.txt')\n\n def test_checkRecipeFormat(self):\n errors = grocery_functions.check_recipe_format('test-recipes', False)\n self.assertTrue(errors == [])\n errors = grocery_functions.check_recipe_format('broken-test-recipes',\n False)\n self.assertTrue(\n 'invalid format, \"1 lb, chicken breasts\" in: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'invalid heading, \"wrong_header\" in file: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors\n )\n\n def test_update_default_ing_dept_file(self):\n grocery_functions.update_default_ing_dept_file(grocery_functions.\n get_all_ingredients('test-recipes'))\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n unittest.TextTestRunner(verbosity=2).run(suite)\n",
"<assignment token>\n<import token>\nsys.path.insert(0, '../src/')\n<import token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n\n def test_getIngredientsFromFile(self):\n list = grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\Kielbasa, Pepper, Onion and Potato Hash.txt')\n self.assertTrue(list[0].name == 'turkey kielbasa')\n self.assertTrue(list[0].unit == 'ounce')\n self.assertTrue(list[0].number == '14')\n self.assertTrue(list[2].name == 'non-green bell pepper')\n self.assertTrue(list[2].unit == '')\n self.assertTrue(list[2].number == '1')\n self.assertTrue(list[6].name == 'salt')\n self.assertTrue(list[6].unit == '')\n self.assertTrue(list[6].number == '1')\n\n def test_getTagsFromFile(self):\n list = grocery_functions.get_tags_from_recipe_file(\n 'test-recipes\\\\Chicken Curry in a Hurry.txt')\n self.assertTrue(list[0] == 'chicken')\n self.assertTrue(list[1] == 'easy')\n self.assertTrue(list[2] == 'stove')\n\n def test_getRecipeFromFile(self):\n list = grocery_functions.get_recipe_from_recipe_file(\n 'test-recipes\\\\Healthy Roasted Chicken and Veggies (one pan).txt')\n self.assertTrue(list[2] ==\n '1 cup bell pepper, chopped (any colors you like)')\n self.assertTrue(list[10] == '1 teaspoon italian seasoning')\n self.assertTrue(list[15] ==\n 'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '\n )\n\n def test_condenseList(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n grocery_list = []\n for recipe in recipe_names:\n grocery_list += grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\' + recipe + '.txt')\n grocery_list = grocery_functions.condense_grocery_list(grocery_list)\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n\n def test_getItemDeptDicts(self):\n grocery_functions.get_item_dept_dicts(\n 'Smiths-Eu-JT-ItemDepartments.txt')\n\n def test_checkRecipeFormat(self):\n errors = grocery_functions.check_recipe_format('test-recipes', False)\n self.assertTrue(errors == [])\n errors = grocery_functions.check_recipe_format('broken-test-recipes',\n False)\n self.assertTrue(\n 'invalid format, \"1 lb, chicken breasts\" in: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'invalid heading, \"wrong_header\" in file: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors\n )\n\n def test_update_default_ing_dept_file(self):\n grocery_functions.update_default_ing_dept_file(grocery_functions.\n get_all_ingredients('test-recipes'))\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n unittest.TextTestRunner(verbosity=2).run(suite)\n",
"<assignment token>\n<import token>\n<code token>\n<import token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n\n def test_getIngredientsFromFile(self):\n list = grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\Kielbasa, Pepper, Onion and Potato Hash.txt')\n self.assertTrue(list[0].name == 'turkey kielbasa')\n self.assertTrue(list[0].unit == 'ounce')\n self.assertTrue(list[0].number == '14')\n self.assertTrue(list[2].name == 'non-green bell pepper')\n self.assertTrue(list[2].unit == '')\n self.assertTrue(list[2].number == '1')\n self.assertTrue(list[6].name == 'salt')\n self.assertTrue(list[6].unit == '')\n self.assertTrue(list[6].number == '1')\n\n def test_getTagsFromFile(self):\n list = grocery_functions.get_tags_from_recipe_file(\n 'test-recipes\\\\Chicken Curry in a Hurry.txt')\n self.assertTrue(list[0] == 'chicken')\n self.assertTrue(list[1] == 'easy')\n self.assertTrue(list[2] == 'stove')\n\n def test_getRecipeFromFile(self):\n list = grocery_functions.get_recipe_from_recipe_file(\n 'test-recipes\\\\Healthy Roasted Chicken and Veggies (one pan).txt')\n self.assertTrue(list[2] ==\n '1 cup bell pepper, chopped (any colors you like)')\n self.assertTrue(list[10] == '1 teaspoon italian seasoning')\n self.assertTrue(list[15] ==\n 'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '\n )\n\n def test_condenseList(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n grocery_list = []\n for recipe in recipe_names:\n grocery_list += grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\' + recipe + '.txt')\n grocery_list = grocery_functions.condense_grocery_list(grocery_list)\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n\n def test_getItemDeptDicts(self):\n grocery_functions.get_item_dept_dicts(\n 'Smiths-Eu-JT-ItemDepartments.txt')\n\n def test_checkRecipeFormat(self):\n errors = grocery_functions.check_recipe_format('test-recipes', False)\n self.assertTrue(errors == [])\n errors = grocery_functions.check_recipe_format('broken-test-recipes',\n False)\n self.assertTrue(\n 'invalid format, \"1 lb, chicken breasts\" in: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'invalid heading, \"wrong_header\" in file: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors\n )\n\n def test_update_default_ing_dept_file(self):\n grocery_functions.update_default_ing_dept_file(grocery_functions.\n get_all_ingredients('test-recipes'))\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\n<code token>\n",
"<assignment token>\n<import token>\n<code token>\n<import token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n\n def test_getIngredientsFromFile(self):\n list = grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\Kielbasa, Pepper, Onion and Potato Hash.txt')\n self.assertTrue(list[0].name == 'turkey kielbasa')\n self.assertTrue(list[0].unit == 'ounce')\n self.assertTrue(list[0].number == '14')\n self.assertTrue(list[2].name == 'non-green bell pepper')\n self.assertTrue(list[2].unit == '')\n self.assertTrue(list[2].number == '1')\n self.assertTrue(list[6].name == 'salt')\n self.assertTrue(list[6].unit == '')\n self.assertTrue(list[6].number == '1')\n\n def test_getTagsFromFile(self):\n list = grocery_functions.get_tags_from_recipe_file(\n 'test-recipes\\\\Chicken Curry in a Hurry.txt')\n self.assertTrue(list[0] == 'chicken')\n self.assertTrue(list[1] == 'easy')\n self.assertTrue(list[2] == 'stove')\n\n def test_getRecipeFromFile(self):\n list = grocery_functions.get_recipe_from_recipe_file(\n 'test-recipes\\\\Healthy Roasted Chicken and Veggies (one pan).txt')\n self.assertTrue(list[2] ==\n '1 cup bell pepper, chopped (any colors you like)')\n self.assertTrue(list[10] == '1 teaspoon italian seasoning')\n self.assertTrue(list[15] ==\n 'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '\n )\n <function token>\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n\n def test_getItemDeptDicts(self):\n grocery_functions.get_item_dept_dicts(\n 'Smiths-Eu-JT-ItemDepartments.txt')\n\n def test_checkRecipeFormat(self):\n errors = grocery_functions.check_recipe_format('test-recipes', False)\n self.assertTrue(errors == [])\n errors = grocery_functions.check_recipe_format('broken-test-recipes',\n False)\n self.assertTrue(\n 'invalid format, \"1 lb, chicken breasts\" in: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'invalid heading, \"wrong_header\" in file: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors\n )\n\n def test_update_default_ing_dept_file(self):\n grocery_functions.update_default_ing_dept_file(grocery_functions.\n get_all_ingredients('test-recipes'))\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\n<code token>\n",
"<assignment token>\n<import token>\n<code token>\n<import token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n\n def test_getIngredientsFromFile(self):\n list = grocery_functions.get_ingredients_from_recipe_file(\n 'test-recipes\\\\Kielbasa, Pepper, Onion and Potato Hash.txt')\n self.assertTrue(list[0].name == 'turkey kielbasa')\n self.assertTrue(list[0].unit == 'ounce')\n self.assertTrue(list[0].number == '14')\n self.assertTrue(list[2].name == 'non-green bell pepper')\n self.assertTrue(list[2].unit == '')\n self.assertTrue(list[2].number == '1')\n self.assertTrue(list[6].name == 'salt')\n self.assertTrue(list[6].unit == '')\n self.assertTrue(list[6].number == '1')\n <function token>\n\n def test_getRecipeFromFile(self):\n list = grocery_functions.get_recipe_from_recipe_file(\n 'test-recipes\\\\Healthy Roasted Chicken and Veggies (one pan).txt')\n self.assertTrue(list[2] ==\n '1 cup bell pepper, chopped (any colors you like)')\n self.assertTrue(list[10] == '1 teaspoon italian seasoning')\n self.assertTrue(list[15] ==\n 'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '\n )\n <function token>\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n\n def test_getItemDeptDicts(self):\n grocery_functions.get_item_dept_dicts(\n 'Smiths-Eu-JT-ItemDepartments.txt')\n\n def test_checkRecipeFormat(self):\n errors = grocery_functions.check_recipe_format('test-recipes', False)\n self.assertTrue(errors == [])\n errors = grocery_functions.check_recipe_format('broken-test-recipes',\n False)\n self.assertTrue(\n 'invalid format, \"1 lb, chicken breasts\" in: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'invalid heading, \"wrong_header\" in file: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors\n )\n\n def test_update_default_ing_dept_file(self):\n grocery_functions.update_default_ing_dept_file(grocery_functions.\n get_all_ingredients('test-recipes'))\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\n<code token>\n",
"<assignment token>\n<import token>\n<code token>\n<import token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n <function token>\n <function token>\n\n def test_getRecipeFromFile(self):\n list = grocery_functions.get_recipe_from_recipe_file(\n 'test-recipes\\\\Healthy Roasted Chicken and Veggies (one pan).txt')\n self.assertTrue(list[2] ==\n '1 cup bell pepper, chopped (any colors you like)')\n self.assertTrue(list[10] == '1 teaspoon italian seasoning')\n self.assertTrue(list[15] ==\n 'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '\n )\n <function token>\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n\n def test_getItemDeptDicts(self):\n grocery_functions.get_item_dept_dicts(\n 'Smiths-Eu-JT-ItemDepartments.txt')\n\n def test_checkRecipeFormat(self):\n errors = grocery_functions.check_recipe_format('test-recipes', False)\n self.assertTrue(errors == [])\n errors = grocery_functions.check_recipe_format('broken-test-recipes',\n False)\n self.assertTrue(\n 'invalid format, \"1 lb, chicken breasts\" in: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'invalid heading, \"wrong_header\" in file: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors\n )\n\n def test_update_default_ing_dept_file(self):\n grocery_functions.update_default_ing_dept_file(grocery_functions.\n get_all_ingredients('test-recipes'))\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\n<code token>\n",
"<assignment token>\n<import token>\n<code token>\n<import token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n <function token>\n <function token>\n\n def test_getRecipeFromFile(self):\n list = grocery_functions.get_recipe_from_recipe_file(\n 'test-recipes\\\\Healthy Roasted Chicken and Veggies (one pan).txt')\n self.assertTrue(list[2] ==\n '1 cup bell pepper, chopped (any colors you like)')\n self.assertTrue(list[10] == '1 teaspoon italian seasoning')\n self.assertTrue(list[15] ==\n 'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '\n )\n <function token>\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n\n def test_getItemDeptDicts(self):\n grocery_functions.get_item_dept_dicts(\n 'Smiths-Eu-JT-ItemDepartments.txt')\n\n def test_checkRecipeFormat(self):\n errors = grocery_functions.check_recipe_format('test-recipes', False)\n self.assertTrue(errors == [])\n errors = grocery_functions.check_recipe_format('broken-test-recipes',\n False)\n self.assertTrue(\n 'invalid format, \"1 lb, chicken breasts\" in: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'invalid heading, \"wrong_header\" in file: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors\n )\n <function token>\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\n<code token>\n",
"<assignment token>\n<import token>\n<code token>\n<import token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n <function token>\n <function token>\n\n def test_getRecipeFromFile(self):\n list = grocery_functions.get_recipe_from_recipe_file(\n 'test-recipes\\\\Healthy Roasted Chicken and Veggies (one pan).txt')\n self.assertTrue(list[2] ==\n '1 cup bell pepper, chopped (any colors you like)')\n self.assertTrue(list[10] == '1 teaspoon italian seasoning')\n self.assertTrue(list[15] ==\n 'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '\n )\n <function token>\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n <function token>\n\n def test_checkRecipeFormat(self):\n errors = grocery_functions.check_recipe_format('test-recipes', False)\n self.assertTrue(errors == [])\n errors = grocery_functions.check_recipe_format('broken-test-recipes',\n False)\n self.assertTrue(\n 'invalid format, \"1 lb, chicken breasts\" in: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'invalid heading, \"wrong_header\" in file: broken-test-recipes//broken_recipe.txt'\n in errors)\n self.assertTrue(\n 'Blank recipe in: broken-test-recipes//broken_recipe.txt' in errors\n )\n <function token>\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\n<code token>\n",
"<assignment token>\n<import token>\n<code token>\n<import token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n <function token>\n <function token>\n\n def test_getRecipeFromFile(self):\n list = grocery_functions.get_recipe_from_recipe_file(\n 'test-recipes\\\\Healthy Roasted Chicken and Veggies (one pan).txt')\n self.assertTrue(list[2] ==\n '1 cup bell pepper, chopped (any colors you like)')\n self.assertTrue(list[10] == '1 teaspoon italian seasoning')\n self.assertTrue(list[15] ==\n 'Place the chicken and veggies in a medium roasting dish or sheet pan. Add the olive oil, '\n )\n <function token>\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n <function token>\n <function token>\n <function token>\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\n<code token>\n",
"<assignment token>\n<import token>\n<code token>\n<import token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_makeAllIngredientsFile(self):\n grocery_functions.make_all_ingredients_file()\n <function token>\n <function token>\n <function token>\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\n<code token>\n",
"<assignment token>\n<import token>\n<code token>\n<import token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n\n def test_getRecipeNames(self):\n recipe_names = grocery_functions.get_recipe_names('test-recipes')\n self.assertTrue(recipe_names[0] == 'Cajun Chicken & Rice')\n self.assertTrue(recipe_names[1] == 'Chicken Curry in a Hurry')\n self.assertTrue(recipe_names[2] == 'Chicken_Zucchini_and_Prosciutto')\n self.assertTrue(recipe_names[3] ==\n 'Healthy Roasted Chicken and Veggies (one pan)')\n self.assertTrue(recipe_names[4] ==\n 'Kielbasa, Pepper, Onion and Potato Hash')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\n<code token>\n",
"<assignment token>\n<import token>\n<code token>\n<import token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def suite(self):\n return unittest.TestLoader().loadTestsFromTestCase(TestGroceryFuncs)\n\n\n<code token>\n",
"<assignment token>\n<import token>\n<code token>\n<import token>\n\n\nclass TestGroceryFuncs(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<assignment token>\n<import token>\n<code token>\n<import token>\n<class token>\n<code token>\n"
] | false |
1,088 |
a7db627c49b53cd3a073d866a0373336a46b4053
|
from graphviz import Digraph
dot = Digraph()
dot.edge("BaseException", "SystemExit")
dot.edge("BaseException", "KeyboardInterrupt")
dot.edge("BaseException", "GeneratorExit")
dot.edge("BaseException", "Exception")
dot.edge("Exception", "StopIteration")
dot.edge("Exception", "StopAsyncIteration")
dot.edge("Exception", "ArithmeticError")
dot.edge("ArithmeticError", "FloatingPointError")
dot.edge("ArithmeticError", "OverflowError")
dot.edge("ArithmeticError", "ZeroDivisionError")
dot.edge("Exception", "AssertionError")
dot.edge("Exception", "AttributeError")
dot.edge("Exception", "BufferError")
dot.edge("Exception", "EOFError")
dot.edge("Exception", "ImportError")
dot.edge("ImportError", "ModuleNotFoundError")
dot.edge("Exception", "LookupError")
dot.edge("LookupError", "IndexError")
dot.edge("LookupError", "KeyError")
dot.edge("Exception", "MemoryError")
dot.edge("Exception", "NameError")
dot.edge("NameError", "UnboundLocalError")
dot.edge("Exception", "OSError")
dot.edge("OSError", "BlockingIOError")
dot.edge("OSError", "ChildProcessError")
dot.edge("OSError", "ConnectionError")
dot.edge("ConnectionError", "BrokenPipeError")
dot.edge("ConnectionError", "ConnectionAbortedError")
dot.edge("ConnectionError", "ConnectionRefusedError")
dot.edge("ConnectionError", "ConnectionResetError")
dot.edge("OSError", "FileExistsError")
dot.edge("OSError", "FileNotFoundError")
dot.edge("OSError", "InterruptedError")
dot.edge("OSError", "IsADirectoryError")
dot.edge("OSError", "NotADirectoryError")
dot.edge("OSError", "PermissionError")
dot.edge("OSError", "ProcessLookupError")
dot.edge("OSError", "TimeoutError")
dot.edge("Exception", "ReferenceError")
dot.edge("Exception", "RuntimeError")
dot.edge("RuntimeError", "NotImplementedError")
dot.edge("RuntimeError", "RecursionError")
dot.edge("Exception", "SyntaxError")
dot.edge("SyntaxError", "IndentationError")
dot.edge("SyntaxError", "TabError")
dot.edge("Exception", "SystemError")
dot.edge("Exception", "TypeError")
dot.edge("Exception", "ValueError")
dot.edge("ValueError", "UnicodeError")
dot.edge("UnicodeError", "UnicodeDecodeError")
dot.edge("UnicodeError", "UnicodeEncodeError")
dot.edge("UnicodeError", "UnicodeTranslateError")
dot_source = dot.source
with open("exceptions.dot", "w") as dot_file:
dot_file.write(dot_source)
|
[
"from graphviz import Digraph\n\ndot = Digraph()\n\ndot.edge(\"BaseException\", \"SystemExit\")\ndot.edge(\"BaseException\", \"KeyboardInterrupt\")\ndot.edge(\"BaseException\", \"GeneratorExit\")\ndot.edge(\"BaseException\", \"Exception\")\ndot.edge(\"Exception\", \"StopIteration\")\ndot.edge(\"Exception\", \"StopAsyncIteration\")\ndot.edge(\"Exception\", \"ArithmeticError\")\ndot.edge(\"ArithmeticError\", \"FloatingPointError\")\ndot.edge(\"ArithmeticError\", \"OverflowError\")\ndot.edge(\"ArithmeticError\", \"ZeroDivisionError\")\ndot.edge(\"Exception\", \"AssertionError\")\ndot.edge(\"Exception\", \"AttributeError\")\ndot.edge(\"Exception\", \"BufferError\")\ndot.edge(\"Exception\", \"EOFError\")\ndot.edge(\"Exception\", \"ImportError\")\ndot.edge(\"ImportError\", \"ModuleNotFoundError\")\ndot.edge(\"Exception\", \"LookupError\")\ndot.edge(\"LookupError\", \"IndexError\")\ndot.edge(\"LookupError\", \"KeyError\")\ndot.edge(\"Exception\", \"MemoryError\")\ndot.edge(\"Exception\", \"NameError\")\ndot.edge(\"NameError\", \"UnboundLocalError\")\ndot.edge(\"Exception\", \"OSError\")\ndot.edge(\"OSError\", \"BlockingIOError\")\ndot.edge(\"OSError\", \"ChildProcessError\")\ndot.edge(\"OSError\", \"ConnectionError\")\ndot.edge(\"ConnectionError\", \"BrokenPipeError\")\ndot.edge(\"ConnectionError\", \"ConnectionAbortedError\")\ndot.edge(\"ConnectionError\", \"ConnectionRefusedError\")\ndot.edge(\"ConnectionError\", \"ConnectionResetError\")\ndot.edge(\"OSError\", \"FileExistsError\")\ndot.edge(\"OSError\", \"FileNotFoundError\")\ndot.edge(\"OSError\", \"InterruptedError\")\ndot.edge(\"OSError\", \"IsADirectoryError\")\ndot.edge(\"OSError\", \"NotADirectoryError\")\ndot.edge(\"OSError\", \"PermissionError\")\ndot.edge(\"OSError\", \"ProcessLookupError\")\ndot.edge(\"OSError\", \"TimeoutError\")\ndot.edge(\"Exception\", \"ReferenceError\")\ndot.edge(\"Exception\", \"RuntimeError\")\ndot.edge(\"RuntimeError\", \"NotImplementedError\")\ndot.edge(\"RuntimeError\", \"RecursionError\")\ndot.edge(\"Exception\", \"SyntaxError\")\ndot.edge(\"SyntaxError\", \"IndentationError\")\ndot.edge(\"SyntaxError\", \"TabError\")\ndot.edge(\"Exception\", \"SystemError\")\ndot.edge(\"Exception\", \"TypeError\")\ndot.edge(\"Exception\", \"ValueError\")\ndot.edge(\"ValueError\", \"UnicodeError\")\ndot.edge(\"UnicodeError\", \"UnicodeDecodeError\")\ndot.edge(\"UnicodeError\", \"UnicodeEncodeError\")\ndot.edge(\"UnicodeError\", \"UnicodeTranslateError\")\n\ndot_source = dot.source\n\nwith open(\"exceptions.dot\", \"w\") as dot_file:\n dot_file.write(dot_source)\n",
"from graphviz import Digraph\ndot = Digraph()\ndot.edge('BaseException', 'SystemExit')\ndot.edge('BaseException', 'KeyboardInterrupt')\ndot.edge('BaseException', 'GeneratorExit')\ndot.edge('BaseException', 'Exception')\ndot.edge('Exception', 'StopIteration')\ndot.edge('Exception', 'StopAsyncIteration')\ndot.edge('Exception', 'ArithmeticError')\ndot.edge('ArithmeticError', 'FloatingPointError')\ndot.edge('ArithmeticError', 'OverflowError')\ndot.edge('ArithmeticError', 'ZeroDivisionError')\ndot.edge('Exception', 'AssertionError')\ndot.edge('Exception', 'AttributeError')\ndot.edge('Exception', 'BufferError')\ndot.edge('Exception', 'EOFError')\ndot.edge('Exception', 'ImportError')\ndot.edge('ImportError', 'ModuleNotFoundError')\ndot.edge('Exception', 'LookupError')\ndot.edge('LookupError', 'IndexError')\ndot.edge('LookupError', 'KeyError')\ndot.edge('Exception', 'MemoryError')\ndot.edge('Exception', 'NameError')\ndot.edge('NameError', 'UnboundLocalError')\ndot.edge('Exception', 'OSError')\ndot.edge('OSError', 'BlockingIOError')\ndot.edge('OSError', 'ChildProcessError')\ndot.edge('OSError', 'ConnectionError')\ndot.edge('ConnectionError', 'BrokenPipeError')\ndot.edge('ConnectionError', 'ConnectionAbortedError')\ndot.edge('ConnectionError', 'ConnectionRefusedError')\ndot.edge('ConnectionError', 'ConnectionResetError')\ndot.edge('OSError', 'FileExistsError')\ndot.edge('OSError', 'FileNotFoundError')\ndot.edge('OSError', 'InterruptedError')\ndot.edge('OSError', 'IsADirectoryError')\ndot.edge('OSError', 'NotADirectoryError')\ndot.edge('OSError', 'PermissionError')\ndot.edge('OSError', 'ProcessLookupError')\ndot.edge('OSError', 'TimeoutError')\ndot.edge('Exception', 'ReferenceError')\ndot.edge('Exception', 'RuntimeError')\ndot.edge('RuntimeError', 'NotImplementedError')\ndot.edge('RuntimeError', 'RecursionError')\ndot.edge('Exception', 'SyntaxError')\ndot.edge('SyntaxError', 'IndentationError')\ndot.edge('SyntaxError', 'TabError')\ndot.edge('Exception', 'SystemError')\ndot.edge('Exception', 'TypeError')\ndot.edge('Exception', 'ValueError')\ndot.edge('ValueError', 'UnicodeError')\ndot.edge('UnicodeError', 'UnicodeDecodeError')\ndot.edge('UnicodeError', 'UnicodeEncodeError')\ndot.edge('UnicodeError', 'UnicodeTranslateError')\ndot_source = dot.source\nwith open('exceptions.dot', 'w') as dot_file:\n dot_file.write(dot_source)\n",
"<import token>\ndot = Digraph()\ndot.edge('BaseException', 'SystemExit')\ndot.edge('BaseException', 'KeyboardInterrupt')\ndot.edge('BaseException', 'GeneratorExit')\ndot.edge('BaseException', 'Exception')\ndot.edge('Exception', 'StopIteration')\ndot.edge('Exception', 'StopAsyncIteration')\ndot.edge('Exception', 'ArithmeticError')\ndot.edge('ArithmeticError', 'FloatingPointError')\ndot.edge('ArithmeticError', 'OverflowError')\ndot.edge('ArithmeticError', 'ZeroDivisionError')\ndot.edge('Exception', 'AssertionError')\ndot.edge('Exception', 'AttributeError')\ndot.edge('Exception', 'BufferError')\ndot.edge('Exception', 'EOFError')\ndot.edge('Exception', 'ImportError')\ndot.edge('ImportError', 'ModuleNotFoundError')\ndot.edge('Exception', 'LookupError')\ndot.edge('LookupError', 'IndexError')\ndot.edge('LookupError', 'KeyError')\ndot.edge('Exception', 'MemoryError')\ndot.edge('Exception', 'NameError')\ndot.edge('NameError', 'UnboundLocalError')\ndot.edge('Exception', 'OSError')\ndot.edge('OSError', 'BlockingIOError')\ndot.edge('OSError', 'ChildProcessError')\ndot.edge('OSError', 'ConnectionError')\ndot.edge('ConnectionError', 'BrokenPipeError')\ndot.edge('ConnectionError', 'ConnectionAbortedError')\ndot.edge('ConnectionError', 'ConnectionRefusedError')\ndot.edge('ConnectionError', 'ConnectionResetError')\ndot.edge('OSError', 'FileExistsError')\ndot.edge('OSError', 'FileNotFoundError')\ndot.edge('OSError', 'InterruptedError')\ndot.edge('OSError', 'IsADirectoryError')\ndot.edge('OSError', 'NotADirectoryError')\ndot.edge('OSError', 'PermissionError')\ndot.edge('OSError', 'ProcessLookupError')\ndot.edge('OSError', 'TimeoutError')\ndot.edge('Exception', 'ReferenceError')\ndot.edge('Exception', 'RuntimeError')\ndot.edge('RuntimeError', 'NotImplementedError')\ndot.edge('RuntimeError', 'RecursionError')\ndot.edge('Exception', 'SyntaxError')\ndot.edge('SyntaxError', 'IndentationError')\ndot.edge('SyntaxError', 'TabError')\ndot.edge('Exception', 'SystemError')\ndot.edge('Exception', 'TypeError')\ndot.edge('Exception', 'ValueError')\ndot.edge('ValueError', 'UnicodeError')\ndot.edge('UnicodeError', 'UnicodeDecodeError')\ndot.edge('UnicodeError', 'UnicodeEncodeError')\ndot.edge('UnicodeError', 'UnicodeTranslateError')\ndot_source = dot.source\nwith open('exceptions.dot', 'w') as dot_file:\n dot_file.write(dot_source)\n",
"<import token>\n<assignment token>\ndot.edge('BaseException', 'SystemExit')\ndot.edge('BaseException', 'KeyboardInterrupt')\ndot.edge('BaseException', 'GeneratorExit')\ndot.edge('BaseException', 'Exception')\ndot.edge('Exception', 'StopIteration')\ndot.edge('Exception', 'StopAsyncIteration')\ndot.edge('Exception', 'ArithmeticError')\ndot.edge('ArithmeticError', 'FloatingPointError')\ndot.edge('ArithmeticError', 'OverflowError')\ndot.edge('ArithmeticError', 'ZeroDivisionError')\ndot.edge('Exception', 'AssertionError')\ndot.edge('Exception', 'AttributeError')\ndot.edge('Exception', 'BufferError')\ndot.edge('Exception', 'EOFError')\ndot.edge('Exception', 'ImportError')\ndot.edge('ImportError', 'ModuleNotFoundError')\ndot.edge('Exception', 'LookupError')\ndot.edge('LookupError', 'IndexError')\ndot.edge('LookupError', 'KeyError')\ndot.edge('Exception', 'MemoryError')\ndot.edge('Exception', 'NameError')\ndot.edge('NameError', 'UnboundLocalError')\ndot.edge('Exception', 'OSError')\ndot.edge('OSError', 'BlockingIOError')\ndot.edge('OSError', 'ChildProcessError')\ndot.edge('OSError', 'ConnectionError')\ndot.edge('ConnectionError', 'BrokenPipeError')\ndot.edge('ConnectionError', 'ConnectionAbortedError')\ndot.edge('ConnectionError', 'ConnectionRefusedError')\ndot.edge('ConnectionError', 'ConnectionResetError')\ndot.edge('OSError', 'FileExistsError')\ndot.edge('OSError', 'FileNotFoundError')\ndot.edge('OSError', 'InterruptedError')\ndot.edge('OSError', 'IsADirectoryError')\ndot.edge('OSError', 'NotADirectoryError')\ndot.edge('OSError', 'PermissionError')\ndot.edge('OSError', 'ProcessLookupError')\ndot.edge('OSError', 'TimeoutError')\ndot.edge('Exception', 'ReferenceError')\ndot.edge('Exception', 'RuntimeError')\ndot.edge('RuntimeError', 'NotImplementedError')\ndot.edge('RuntimeError', 'RecursionError')\ndot.edge('Exception', 'SyntaxError')\ndot.edge('SyntaxError', 'IndentationError')\ndot.edge('SyntaxError', 'TabError')\ndot.edge('Exception', 'SystemError')\ndot.edge('Exception', 'TypeError')\ndot.edge('Exception', 'ValueError')\ndot.edge('ValueError', 'UnicodeError')\ndot.edge('UnicodeError', 'UnicodeDecodeError')\ndot.edge('UnicodeError', 'UnicodeEncodeError')\ndot.edge('UnicodeError', 'UnicodeTranslateError')\n<assignment token>\nwith open('exceptions.dot', 'w') as dot_file:\n dot_file.write(dot_source)\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
1,089 |
438efbaf35401a29ea5408fee3b49b85f237760e
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-12 20:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0010_auto_20170512_2248'),
]
operations = [
migrations.AlterField(
model_name='classroom',
name='subject5teacher',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='home.Teacher', verbose_name='Chemistry'),
),
]
|
[
"# -*- coding: utf-8 -*-\r\n# Generated by Django 1.11 on 2017-05-12 20:48\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('home', '0010_auto_20170512_2248'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='classroom',\r\n name='subject5teacher',\r\n field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='home.Teacher', verbose_name='Chemistry'),\r\n ),\r\n ]\r\n",
"from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('home', '0010_auto_20170512_2248')]\n operations = [migrations.AlterField(model_name='classroom', name=\n 'subject5teacher', field=models.ForeignKey(default=None, on_delete=\n django.db.models.deletion.CASCADE, related_name='+', to=\n 'home.Teacher', verbose_name='Chemistry'))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('home', '0010_auto_20170512_2248')]\n operations = [migrations.AlterField(model_name='classroom', name=\n 'subject5teacher', field=models.ForeignKey(default=None, on_delete=\n django.db.models.deletion.CASCADE, related_name='+', to=\n 'home.Teacher', verbose_name='Chemistry'))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
1,090 |
22523304c9e2ce1339a7527cdbd67a81c780d806
|
"""
We have created mash sketches of the GPDB database, the MGV database, and the SDSU phage, and
this will figure out the top hits and summarize their familes.
"""
import os
import sys
import argparse
def best_hits(distf, maxscore, verbose=False):
"""
Find the best hits
"""
bh = {}
allph = set()
with open(distf, 'r') as din:
for li in din:
p = li.strip().split("\t")
if float(p[3]) <= maxscore:
if p[0] not in bh:
bh[p[0]] = set()
bh[p[0]].add(p[1])
allph.add(p[0])
if verbose:
for p in allph:
if p not in bh:
sys.stderr.write(f"WARNING: With a score of {maxscore} did not find any hits to {p}\n")
return bh
def find_vc(mdf, genomecol, vccol, verbose=False):
"""
Read the metadata file and return a hash of genome->viral cluster
"""
vc = {}
with open(mdf, 'r') as fin:
for li in fin:
p = li.strip().split("\t")
vc[p[genomecol]] = p[vccol]
if verbose:
sys.stderr.write(f"Found {len(vc)} virus clusters in {mdf}\n")
return vc
def count_hits(bh, vc, verbose=False):
"""
Count the vc hits per genome
"""
hc = {}
for g in bh:
hc[g] = {}
for b in bh[g]:
hc[g][vc[b]] = hc[g].get(vc[b], 0) + 1
besthit = None
bhc = 0
for h in hc[g]:
if hc[g][h] > bhc:
bhc = hc[g][h]
besthit = h
#print(f"{g}\t{besthit}\t{bhc}\t{len(bh[g])}")
print(f"{g}\t{besthit}")
return hc
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-d', help='mash distance file', required=True)
parser.add_argument('-c', help='distance cutoff score, default = 0', default=0, type=float)
parser.add_argument('-m', help='metadata file', required=True)
parser.add_argument('-g', help='genome column, default = 0', default=0, type=int)
parser.add_argument('-l', help='virus cluster col in the metadata file', type=int, required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
bh = best_hits(args.d, args.c, args.v)
vc = find_vc(args.m, args.g, args.l, args.v)
count_hits(bh, vc,args.v)
|
[
"\"\"\"\nWe have created mash sketches of the GPDB database, the MGV database, and the SDSU phage, and\nthis will figure out the top hits and summarize their familes.\n\"\"\"\n\nimport os\nimport sys\nimport argparse\n\n\ndef best_hits(distf, maxscore, verbose=False):\n \"\"\"\n Find the best hits\n \"\"\"\n bh = {}\n allph = set()\n with open(distf, 'r') as din:\n for li in din:\n p = li.strip().split(\"\\t\")\n if float(p[3]) <= maxscore:\n if p[0] not in bh:\n bh[p[0]] = set()\n bh[p[0]].add(p[1])\n allph.add(p[0])\n\n if verbose:\n for p in allph:\n if p not in bh:\n sys.stderr.write(f\"WARNING: With a score of {maxscore} did not find any hits to {p}\\n\")\n return bh\n\ndef find_vc(mdf, genomecol, vccol, verbose=False):\n \"\"\"\n Read the metadata file and return a hash of genome->viral cluster\n \"\"\"\n vc = {}\n with open(mdf, 'r') as fin:\n for li in fin:\n p = li.strip().split(\"\\t\")\n vc[p[genomecol]] = p[vccol]\n if verbose:\n sys.stderr.write(f\"Found {len(vc)} virus clusters in {mdf}\\n\")\n return vc\n\n\ndef count_hits(bh, vc, verbose=False):\n \"\"\"\n Count the vc hits per genome\n \"\"\"\n\n hc = {}\n for g in bh:\n hc[g] = {}\n for b in bh[g]:\n hc[g][vc[b]] = hc[g].get(vc[b], 0) + 1\n besthit = None\n bhc = 0\n for h in hc[g]:\n if hc[g][h] > bhc:\n bhc = hc[g][h]\n besthit = h\n #print(f\"{g}\\t{besthit}\\t{bhc}\\t{len(bh[g])}\")\n print(f\"{g}\\t{besthit}\")\n\n return hc\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=' ')\n parser.add_argument('-d', help='mash distance file', required=True)\n parser.add_argument('-c', help='distance cutoff score, default = 0', default=0, type=float)\n parser.add_argument('-m', help='metadata file', required=True)\n parser.add_argument('-g', help='genome column, default = 0', default=0, type=int)\n parser.add_argument('-l', help='virus cluster col in the metadata file', type=int, required=True)\n parser.add_argument('-v', help='verbose output', action='store_true')\n args = parser.parse_args()\n\n bh = best_hits(args.d, args.c, args.v)\n vc = find_vc(args.m, args.g, args.l, args.v)\n count_hits(bh, vc,args.v)\n",
"<docstring token>\nimport os\nimport sys\nimport argparse\n\n\ndef best_hits(distf, maxscore, verbose=False):\n \"\"\"\n Find the best hits\n \"\"\"\n bh = {}\n allph = set()\n with open(distf, 'r') as din:\n for li in din:\n p = li.strip().split('\\t')\n if float(p[3]) <= maxscore:\n if p[0] not in bh:\n bh[p[0]] = set()\n bh[p[0]].add(p[1])\n allph.add(p[0])\n if verbose:\n for p in allph:\n if p not in bh:\n sys.stderr.write(\n f\"\"\"WARNING: With a score of {maxscore} did not find any hits to {p}\n\"\"\"\n )\n return bh\n\n\ndef find_vc(mdf, genomecol, vccol, verbose=False):\n \"\"\"\n Read the metadata file and return a hash of genome->viral cluster\n \"\"\"\n vc = {}\n with open(mdf, 'r') as fin:\n for li in fin:\n p = li.strip().split('\\t')\n vc[p[genomecol]] = p[vccol]\n if verbose:\n sys.stderr.write(f'Found {len(vc)} virus clusters in {mdf}\\n')\n return vc\n\n\ndef count_hits(bh, vc, verbose=False):\n \"\"\"\n Count the vc hits per genome\n \"\"\"\n hc = {}\n for g in bh:\n hc[g] = {}\n for b in bh[g]:\n hc[g][vc[b]] = hc[g].get(vc[b], 0) + 1\n besthit = None\n bhc = 0\n for h in hc[g]:\n if hc[g][h] > bhc:\n bhc = hc[g][h]\n besthit = h\n print(f'{g}\\t{besthit}')\n return hc\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=' ')\n parser.add_argument('-d', help='mash distance file', required=True)\n parser.add_argument('-c', help='distance cutoff score, default = 0',\n default=0, type=float)\n parser.add_argument('-m', help='metadata file', required=True)\n parser.add_argument('-g', help='genome column, default = 0', default=0,\n type=int)\n parser.add_argument('-l', help='virus cluster col in the metadata file',\n type=int, required=True)\n parser.add_argument('-v', help='verbose output', action='store_true')\n args = parser.parse_args()\n bh = best_hits(args.d, args.c, args.v)\n vc = find_vc(args.m, args.g, args.l, args.v)\n count_hits(bh, vc, args.v)\n",
"<docstring token>\n<import token>\n\n\ndef best_hits(distf, maxscore, verbose=False):\n \"\"\"\n Find the best hits\n \"\"\"\n bh = {}\n allph = set()\n with open(distf, 'r') as din:\n for li in din:\n p = li.strip().split('\\t')\n if float(p[3]) <= maxscore:\n if p[0] not in bh:\n bh[p[0]] = set()\n bh[p[0]].add(p[1])\n allph.add(p[0])\n if verbose:\n for p in allph:\n if p not in bh:\n sys.stderr.write(\n f\"\"\"WARNING: With a score of {maxscore} did not find any hits to {p}\n\"\"\"\n )\n return bh\n\n\ndef find_vc(mdf, genomecol, vccol, verbose=False):\n \"\"\"\n Read the metadata file and return a hash of genome->viral cluster\n \"\"\"\n vc = {}\n with open(mdf, 'r') as fin:\n for li in fin:\n p = li.strip().split('\\t')\n vc[p[genomecol]] = p[vccol]\n if verbose:\n sys.stderr.write(f'Found {len(vc)} virus clusters in {mdf}\\n')\n return vc\n\n\ndef count_hits(bh, vc, verbose=False):\n \"\"\"\n Count the vc hits per genome\n \"\"\"\n hc = {}\n for g in bh:\n hc[g] = {}\n for b in bh[g]:\n hc[g][vc[b]] = hc[g].get(vc[b], 0) + 1\n besthit = None\n bhc = 0\n for h in hc[g]:\n if hc[g][h] > bhc:\n bhc = hc[g][h]\n besthit = h\n print(f'{g}\\t{besthit}')\n return hc\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=' ')\n parser.add_argument('-d', help='mash distance file', required=True)\n parser.add_argument('-c', help='distance cutoff score, default = 0',\n default=0, type=float)\n parser.add_argument('-m', help='metadata file', required=True)\n parser.add_argument('-g', help='genome column, default = 0', default=0,\n type=int)\n parser.add_argument('-l', help='virus cluster col in the metadata file',\n type=int, required=True)\n parser.add_argument('-v', help='verbose output', action='store_true')\n args = parser.parse_args()\n bh = best_hits(args.d, args.c, args.v)\n vc = find_vc(args.m, args.g, args.l, args.v)\n count_hits(bh, vc, args.v)\n",
"<docstring token>\n<import token>\n\n\ndef best_hits(distf, maxscore, verbose=False):\n \"\"\"\n Find the best hits\n \"\"\"\n bh = {}\n allph = set()\n with open(distf, 'r') as din:\n for li in din:\n p = li.strip().split('\\t')\n if float(p[3]) <= maxscore:\n if p[0] not in bh:\n bh[p[0]] = set()\n bh[p[0]].add(p[1])\n allph.add(p[0])\n if verbose:\n for p in allph:\n if p not in bh:\n sys.stderr.write(\n f\"\"\"WARNING: With a score of {maxscore} did not find any hits to {p}\n\"\"\"\n )\n return bh\n\n\ndef find_vc(mdf, genomecol, vccol, verbose=False):\n \"\"\"\n Read the metadata file and return a hash of genome->viral cluster\n \"\"\"\n vc = {}\n with open(mdf, 'r') as fin:\n for li in fin:\n p = li.strip().split('\\t')\n vc[p[genomecol]] = p[vccol]\n if verbose:\n sys.stderr.write(f'Found {len(vc)} virus clusters in {mdf}\\n')\n return vc\n\n\ndef count_hits(bh, vc, verbose=False):\n \"\"\"\n Count the vc hits per genome\n \"\"\"\n hc = {}\n for g in bh:\n hc[g] = {}\n for b in bh[g]:\n hc[g][vc[b]] = hc[g].get(vc[b], 0) + 1\n besthit = None\n bhc = 0\n for h in hc[g]:\n if hc[g][h] > bhc:\n bhc = hc[g][h]\n besthit = h\n print(f'{g}\\t{besthit}')\n return hc\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef best_hits(distf, maxscore, verbose=False):\n \"\"\"\n Find the best hits\n \"\"\"\n bh = {}\n allph = set()\n with open(distf, 'r') as din:\n for li in din:\n p = li.strip().split('\\t')\n if float(p[3]) <= maxscore:\n if p[0] not in bh:\n bh[p[0]] = set()\n bh[p[0]].add(p[1])\n allph.add(p[0])\n if verbose:\n for p in allph:\n if p not in bh:\n sys.stderr.write(\n f\"\"\"WARNING: With a score of {maxscore} did not find any hits to {p}\n\"\"\"\n )\n return bh\n\n\n<function token>\n\n\ndef count_hits(bh, vc, verbose=False):\n \"\"\"\n Count the vc hits per genome\n \"\"\"\n hc = {}\n for g in bh:\n hc[g] = {}\n for b in bh[g]:\n hc[g][vc[b]] = hc[g].get(vc[b], 0) + 1\n besthit = None\n bhc = 0\n for h in hc[g]:\n if hc[g][h] > bhc:\n bhc = hc[g][h]\n besthit = h\n print(f'{g}\\t{besthit}')\n return hc\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\ndef count_hits(bh, vc, verbose=False):\n \"\"\"\n Count the vc hits per genome\n \"\"\"\n hc = {}\n for g in bh:\n hc[g] = {}\n for b in bh[g]:\n hc[g][vc[b]] = hc[g].get(vc[b], 0) + 1\n besthit = None\n bhc = 0\n for h in hc[g]:\n if hc[g][h] > bhc:\n bhc = hc[g][h]\n besthit = h\n print(f'{g}\\t{besthit}')\n return hc\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
1,091 |
669eb2e898c3a127ae01e0ee3020a3674e5e340d
|
from yoloPydarknet import pydarknetYOLO
import cv2
import imutils
import time
yolo = pydarknetYOLO(obdata="../darknet/cfg/coco.data", weights="yolov3.weights",
cfg="../darknet/cfg/yolov3.cfg")
video_out = "yolo_output.avi"
start_time = time.time()
if __name__ == "__main__":
VIDEO_IN = cv2.VideoCapture(0)
if(video_out!=""):
width = int(VIDEO_IN.get(cv2.CAP_PROP_FRAME_WIDTH)) # float
height = int(VIDEO_IN.get(cv2.CAP_PROP_FRAME_HEIGHT)) # float
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter(video_out,fourcc, 30.0, (int(width),int(height)))
frameID = 0
while True:
hasFrame, frame = VIDEO_IN.read()
# Stop the program if reached end of video
if not hasFrame:
print("Done processing !!!")
print("--- %s seconds ---" % (time.time() - start_time))
break
yolo.getObject(frame, labelWant="", drawBox=True, bold=1, textsize=0.6, bcolor=(0,0,255), tcolor=(255,255,255))
print ("Object counts:", yolo.objCounts)
cv2.imshow("Frame", imutils.resize(frame, width=850))
if(video_out!=""):
out.write(frame)
k = cv2.waitKey(1)
if k == 0xFF & ord("q"):
out.release()
break
|
[
"from yoloPydarknet import pydarknetYOLO\nimport cv2\nimport imutils\nimport time\n\nyolo = pydarknetYOLO(obdata=\"../darknet/cfg/coco.data\", weights=\"yolov3.weights\", \n cfg=\"../darknet/cfg/yolov3.cfg\")\nvideo_out = \"yolo_output.avi\"\n\nstart_time = time.time()\n\nif __name__ == \"__main__\":\n\n VIDEO_IN = cv2.VideoCapture(0)\n if(video_out!=\"\"):\n width = int(VIDEO_IN.get(cv2.CAP_PROP_FRAME_WIDTH)) # float\n height = int(VIDEO_IN.get(cv2.CAP_PROP_FRAME_HEIGHT)) # float\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n out = cv2.VideoWriter(video_out,fourcc, 30.0, (int(width),int(height)))\n\n frameID = 0\n while True:\n hasFrame, frame = VIDEO_IN.read()\n # Stop the program if reached end of video\n if not hasFrame:\n print(\"Done processing !!!\")\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n break\n\n yolo.getObject(frame, labelWant=\"\", drawBox=True, bold=1, textsize=0.6, bcolor=(0,0,255), tcolor=(255,255,255))\n print (\"Object counts:\", yolo.objCounts)\n cv2.imshow(\"Frame\", imutils.resize(frame, width=850))\n if(video_out!=\"\"):\n out.write(frame)\n\n k = cv2.waitKey(1)\n if k == 0xFF & ord(\"q\"):\n out.release()\n break\n"
] | true |
1,092 |
6b55a9061bb118558e9077c77e18cfc81f3fa034
|
#
# @lc app=leetcode id=1121 lang=python3
#
# [1121] Divide Array Into Increasing Sequences
#
# https://leetcode.com/problems/divide-array-into-increasing-sequences/description/
#
# algorithms
# Hard (53.30%)
# Likes: 32
# Dislikes: 11
# Total Accepted: 1.7K
# Total Submissions: 3.2K
# Testcase Example: '[1,2,2,3,3,4,4]\n3'
#
# Given a non-decreasing array of positive integers nums and an integer K, find
# out if this array can be divided into one or more disjoint increasing
# subsequences of length at least K.
#
#
#
# Example 1:
#
#
# Input: nums = [1,2,2,3,3,4,4], K = 3
# Output: true
# Explanation:
# The array can be divided into the two subsequences [1,2,3,4] and [2,3,4] with
# lengths at least 3 each.
#
#
# Example 2:
#
#
# Input: nums = [5,6,6,7,8], K = 3
# Output: false
# Explanation:
# There is no way to divide the array using the conditions required.
#
#
#
#
# Note:
#
#
# 1 <= nums.length <= 10^5
# 1 <= K <= nums.length
# 1 <= nums[i] <= 10^5
#
#
#
# @lc code=start
from collections import Counter
class Solution:
def canDivideIntoSubsequences(self, nums: List[int], K: int) -> bool:
return len(nums) >= K * max(Counter(nums).values())
# cur, groups = 1, 1
# for i in range(1, len(nums)):
# if nums[i] > nums[i - 1]:
# cur = 1
# else:
# cur += 1
# groups = max(groups, cur)
# return len(nums) >= K * groups
# @lc code=end
|
[
"#\n# @lc app=leetcode id=1121 lang=python3\n#\n# [1121] Divide Array Into Increasing Sequences\n#\n# https://leetcode.com/problems/divide-array-into-increasing-sequences/description/\n#\n# algorithms\n# Hard (53.30%)\n# Likes: 32\n# Dislikes: 11\n# Total Accepted: 1.7K\n# Total Submissions: 3.2K\n# Testcase Example: '[1,2,2,3,3,4,4]\\n3'\n#\n# Given a non-decreasing array of positive integers nums and an integer K, find\n# out if this array can be divided into one or more disjoint increasing\n# subsequences of length at least K.\n# \n# \n# \n# Example 1:\n# \n# \n# Input: nums = [1,2,2,3,3,4,4], K = 3\n# Output: true\n# Explanation: \n# The array can be divided into the two subsequences [1,2,3,4] and [2,3,4] with\n# lengths at least 3 each.\n# \n# \n# Example 2:\n# \n# \n# Input: nums = [5,6,6,7,8], K = 3\n# Output: false\n# Explanation: \n# There is no way to divide the array using the conditions required.\n# \n# \n# \n# \n# Note:\n# \n# \n# 1 <= nums.length <= 10^5\n# 1 <= K <= nums.length\n# 1 <= nums[i] <= 10^5\n# \n# \n#\n\n# @lc code=start\nfrom collections import Counter\n\nclass Solution:\n def canDivideIntoSubsequences(self, nums: List[int], K: int) -> bool:\n\n return len(nums) >= K * max(Counter(nums).values())\n\n # cur, groups = 1, 1\n # for i in range(1, len(nums)):\n # if nums[i] > nums[i - 1]:\n # cur = 1\n # else:\n # cur += 1\n # groups = max(groups, cur)\n # return len(nums) >= K * groups\n \n# @lc code=end\n\n",
"from collections import Counter\n\n\nclass Solution:\n\n def canDivideIntoSubsequences(self, nums: List[int], K: int) ->bool:\n return len(nums) >= K * max(Counter(nums).values())\n",
"<import token>\n\n\nclass Solution:\n\n def canDivideIntoSubsequences(self, nums: List[int], K: int) ->bool:\n return len(nums) >= K * max(Counter(nums).values())\n",
"<import token>\n\n\nclass Solution:\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
1,093 |
d7524a455e62594e321b67f0a32a5c3a7437c1d6
|
# 引入基础的工作表
from openpyxl import Workbook
# 引入增强的修改功能
from openpyxl.styles import Font,Alignment,Border,Side,PatternFill,colors
# import openpyxl
def make_example():
# 设定文件目录
addr = './example.xlsx'
# 初始化文件,切换到活动的工作表
work_book = Workbook()
# 读取文件采用
# work_book = openpyxl.load_workbook(addr)
work_sheet = work_book.active
# 直接对表格对象赋值
work_sheet['A1'] = 'Hello World!'
# 采用指定行列的方法赋值(第2行,第二列)
select_cell = work_sheet.cell(row=2,column=2,value='I select this cell')
# 添加两行数据到表格
work_sheet.append(['The quick brown fox',' jumps over ','a lazy dog.'])
work_sheet.append(['The quick brown fox',' ',' jumps over ','a lazy dog.'])
# 合并两个单元格作为示范
work_sheet.merge_cells('A3:B3')
work_sheet.merge_cells('A4:B4')
# 遍历表格,读取表格中的数据
# 初始化字体
SIMSUN_20_BOLD = Font(name='宋体',size=12,bold=True)
# 初始化表格对齐模板
CENTER_ALIGN = Alignment(horizontal='center',vertical='center')
# 初始化表格边框样式
LE,RI,TO,BO = [Side(style='thin',color='000000')]*4
THIN_BORDER = Border(left=LE,right=RI,top=TO,bottom=BO)
# 遍历表格,读取表格中的数据
for row in work_sheet['A1:D4']:
for cell in row:
# 把样式赋值给表格
cell.font = SIMSUN_20_BOLD
cell.alignment = CENTER_ALIGN
cell.border = THIN_BORDER
# print(cell.value)
# 设置行高
work_sheet.row_dimensions[1].height=15
work_sheet.row_dimensions[2].height=20
for row_letter in range(3,5,1):
work_sheet.row_dimensions[row_letter].height=17
# 设置列宽
for col_letter in ['A','B']:
work_sheet.column_dimensions[col_letter].width=20
work_sheet.column_dimensions['C'].width=17
work_sheet.column_dimensions['D'].width=25
# 设置颜色
COLOR_MAP = ['ff9900','000000']
COLOR_SIMSUN_20_BOLD = Font(name='宋体',size=12,bold=True,color=COLOR_MAP[0])
BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])
work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD
work_sheet['A1'].fill = BG_FILL
# 保存到设定的addr
work_book.save(addr)
if __name__ == "__main__":
make_example()
|
[
"# 引入基础的工作表\r\nfrom openpyxl import Workbook \r\n# 引入增强的修改功能\r\nfrom openpyxl.styles import Font,Alignment,Border,Side,PatternFill,colors\r\n# import openpyxl\r\ndef make_example():\r\n # 设定文件目录\r\n addr = './example.xlsx'\r\n # 初始化文件,切换到活动的工作表\r\n work_book = Workbook()\r\n # 读取文件采用\r\n # work_book = openpyxl.load_workbook(addr)\r\n work_sheet = work_book.active\r\n # 直接对表格对象赋值\r\n work_sheet['A1'] = 'Hello World!'\r\n # 采用指定行列的方法赋值(第2行,第二列)\r\n select_cell = work_sheet.cell(row=2,column=2,value='I select this cell')\r\n # 添加两行数据到表格\r\n work_sheet.append(['The quick brown fox',' jumps over ','a lazy dog.'])\r\n work_sheet.append(['The quick brown fox',' ',' jumps over ','a lazy dog.'])\r\n # 合并两个单元格作为示范\r\n work_sheet.merge_cells('A3:B3')\r\n work_sheet.merge_cells('A4:B4')\r\n # 遍历表格,读取表格中的数据\r\n # 初始化字体\r\n SIMSUN_20_BOLD = Font(name='宋体',size=12,bold=True)\r\n # 初始化表格对齐模板\r\n CENTER_ALIGN = Alignment(horizontal='center',vertical='center')\r\n # 初始化表格边框样式\r\n LE,RI,TO,BO = [Side(style='thin',color='000000')]*4\r\n THIN_BORDER = Border(left=LE,right=RI,top=TO,bottom=BO)\r\n # 遍历表格,读取表格中的数据\r\n for row in work_sheet['A1:D4']:\r\n for cell in row:\r\n # 把样式赋值给表格\r\n cell.font = SIMSUN_20_BOLD\r\n cell.alignment = CENTER_ALIGN\r\n cell.border = THIN_BORDER\r\n # print(cell.value)\r\n # 设置行高\r\n work_sheet.row_dimensions[1].height=15\r\n work_sheet.row_dimensions[2].height=20\r\n for row_letter in range(3,5,1):\r\n work_sheet.row_dimensions[row_letter].height=17\r\n # 设置列宽\r\n for col_letter in ['A','B']:\r\n work_sheet.column_dimensions[col_letter].width=20\r\n work_sheet.column_dimensions['C'].width=17\r\n work_sheet.column_dimensions['D'].width=25\r\n # 设置颜色\r\n COLOR_MAP = ['ff9900','000000']\r\n COLOR_SIMSUN_20_BOLD = Font(name='宋体',size=12,bold=True,color=COLOR_MAP[0])\r\n BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1]) \r\n work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD\r\n work_sheet['A1'].fill = BG_FILL\r\n # 保存到设定的addr\r\n work_book.save(addr)\r\n\r\nif __name__ == \"__main__\":\r\n make_example()",
"from openpyxl import Workbook\nfrom openpyxl.styles import Font, Alignment, Border, Side, PatternFill, colors\n\n\ndef make_example():\n addr = './example.xlsx'\n work_book = Workbook()\n work_sheet = work_book.active\n work_sheet['A1'] = 'Hello World!'\n select_cell = work_sheet.cell(row=2, column=2, value='I select this cell')\n work_sheet.append(['The quick brown fox', ' jumps over ', 'a lazy dog.'])\n work_sheet.append(['The quick brown fox', ' ', ' jumps over ',\n 'a lazy dog.'])\n work_sheet.merge_cells('A3:B3')\n work_sheet.merge_cells('A4:B4')\n SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True)\n CENTER_ALIGN = Alignment(horizontal='center', vertical='center')\n LE, RI, TO, BO = [Side(style='thin', color='000000')] * 4\n THIN_BORDER = Border(left=LE, right=RI, top=TO, bottom=BO)\n for row in work_sheet['A1:D4']:\n for cell in row:\n cell.font = SIMSUN_20_BOLD\n cell.alignment = CENTER_ALIGN\n cell.border = THIN_BORDER\n work_sheet.row_dimensions[1].height = 15\n work_sheet.row_dimensions[2].height = 20\n for row_letter in range(3, 5, 1):\n work_sheet.row_dimensions[row_letter].height = 17\n for col_letter in ['A', 'B']:\n work_sheet.column_dimensions[col_letter].width = 20\n work_sheet.column_dimensions['C'].width = 17\n work_sheet.column_dimensions['D'].width = 25\n COLOR_MAP = ['ff9900', '000000']\n COLOR_SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True, color=\n COLOR_MAP[0])\n BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])\n work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD\n work_sheet['A1'].fill = BG_FILL\n work_book.save(addr)\n\n\nif __name__ == '__main__':\n make_example()\n",
"<import token>\n\n\ndef make_example():\n addr = './example.xlsx'\n work_book = Workbook()\n work_sheet = work_book.active\n work_sheet['A1'] = 'Hello World!'\n select_cell = work_sheet.cell(row=2, column=2, value='I select this cell')\n work_sheet.append(['The quick brown fox', ' jumps over ', 'a lazy dog.'])\n work_sheet.append(['The quick brown fox', ' ', ' jumps over ',\n 'a lazy dog.'])\n work_sheet.merge_cells('A3:B3')\n work_sheet.merge_cells('A4:B4')\n SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True)\n CENTER_ALIGN = Alignment(horizontal='center', vertical='center')\n LE, RI, TO, BO = [Side(style='thin', color='000000')] * 4\n THIN_BORDER = Border(left=LE, right=RI, top=TO, bottom=BO)\n for row in work_sheet['A1:D4']:\n for cell in row:\n cell.font = SIMSUN_20_BOLD\n cell.alignment = CENTER_ALIGN\n cell.border = THIN_BORDER\n work_sheet.row_dimensions[1].height = 15\n work_sheet.row_dimensions[2].height = 20\n for row_letter in range(3, 5, 1):\n work_sheet.row_dimensions[row_letter].height = 17\n for col_letter in ['A', 'B']:\n work_sheet.column_dimensions[col_letter].width = 20\n work_sheet.column_dimensions['C'].width = 17\n work_sheet.column_dimensions['D'].width = 25\n COLOR_MAP = ['ff9900', '000000']\n COLOR_SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True, color=\n COLOR_MAP[0])\n BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])\n work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD\n work_sheet['A1'].fill = BG_FILL\n work_book.save(addr)\n\n\nif __name__ == '__main__':\n make_example()\n",
"<import token>\n\n\ndef make_example():\n addr = './example.xlsx'\n work_book = Workbook()\n work_sheet = work_book.active\n work_sheet['A1'] = 'Hello World!'\n select_cell = work_sheet.cell(row=2, column=2, value='I select this cell')\n work_sheet.append(['The quick brown fox', ' jumps over ', 'a lazy dog.'])\n work_sheet.append(['The quick brown fox', ' ', ' jumps over ',\n 'a lazy dog.'])\n work_sheet.merge_cells('A3:B3')\n work_sheet.merge_cells('A4:B4')\n SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True)\n CENTER_ALIGN = Alignment(horizontal='center', vertical='center')\n LE, RI, TO, BO = [Side(style='thin', color='000000')] * 4\n THIN_BORDER = Border(left=LE, right=RI, top=TO, bottom=BO)\n for row in work_sheet['A1:D4']:\n for cell in row:\n cell.font = SIMSUN_20_BOLD\n cell.alignment = CENTER_ALIGN\n cell.border = THIN_BORDER\n work_sheet.row_dimensions[1].height = 15\n work_sheet.row_dimensions[2].height = 20\n for row_letter in range(3, 5, 1):\n work_sheet.row_dimensions[row_letter].height = 17\n for col_letter in ['A', 'B']:\n work_sheet.column_dimensions[col_letter].width = 20\n work_sheet.column_dimensions['C'].width = 17\n work_sheet.column_dimensions['D'].width = 25\n COLOR_MAP = ['ff9900', '000000']\n COLOR_SIMSUN_20_BOLD = Font(name='宋体', size=12, bold=True, color=\n COLOR_MAP[0])\n BG_FILL = PatternFill('solid', fgColor=COLOR_MAP[1])\n work_sheet['A1'].font = COLOR_SIMSUN_20_BOLD\n work_sheet['A1'].fill = BG_FILL\n work_book.save(addr)\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
1,094 |
9fc9d766915bcefde4f0ba5c24cb83e33fc66272
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
import dbindexer
dbindexer.autodiscover() #This needs to happen before anything else, hence strange import ordering
urlpatterns = patterns('harvester.views',
url(r'^$', 'home', name='home'),
url(r'^settings/', 'settings', name='settings'),
# Examples:
# url(r'^$', 'harvester.views.home', name='home'),
# url(r'^harvester/', include('harvester.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
[
"from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\nimport dbindexer\ndbindexer.autodiscover() #This needs to happen before anything else, hence strange import ordering\n\nurlpatterns = patterns('harvester.views',\n url(r'^$', 'home', name='home'),\n url(r'^settings/', 'settings', name='settings'),\n # Examples:\n # url(r'^$', 'harvester.views.home', name='home'),\n # url(r'^harvester/', include('harvester.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n)\n",
"from django.conf.urls import patterns, include, url\nimport dbindexer\ndbindexer.autodiscover()\nurlpatterns = patterns('harvester.views', url('^$', 'home', name='home'),\n url('^settings/', 'settings', name='settings'))\n",
"<import token>\ndbindexer.autodiscover()\nurlpatterns = patterns('harvester.views', url('^$', 'home', name='home'),\n url('^settings/', 'settings', name='settings'))\n",
"<import token>\ndbindexer.autodiscover()\n<assignment token>\n",
"<import token>\n<code token>\n<assignment token>\n"
] | false |
1,095 |
c88e2336432f93d95b4e2285aa532b673a4a410b
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RDt(RPackage):
"""A Wrapper of the JavaScript Library 'DataTables'.
Data objects in R can be rendered as HTML tables using the JavaScript
library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'
library has been included in this R package. The package name 'DT' is an
abbreviation of 'DataTables'."""
cran = "DT"
version("0.23", sha256="360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70")
version("0.20", sha256="c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f")
version("0.17", sha256="e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56")
version("0.13", sha256="79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5")
version("0.8", sha256="90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21")
version("0.7", sha256="1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c")
version("0.6", sha256="2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916")
version("0.4", sha256="3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19")
version("0.3", sha256="ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb")
version("0.2", sha256="a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd")
version("0.1", sha256="129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756")
depends_on("[email protected]:", type=("build", "run"))
depends_on("[email protected]:", type=("build", "run"))
depends_on("[email protected]:", type=("build", "run"), when="@0.8:")
depends_on("r-magrittr", type=("build", "run"))
depends_on("r-crosstalk", type=("build", "run"))
depends_on("r-jquerylib", type=("build", "run"), when="@0.19:")
depends_on("r-promises", type=("build", "run"), when="@0.5:")
|
[
"# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass RDt(RPackage):\n \"\"\"A Wrapper of the JavaScript Library 'DataTables'.\n\n Data objects in R can be rendered as HTML tables using the JavaScript\n library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'\n library has been included in this R package. The package name 'DT' is an\n abbreviation of 'DataTables'.\"\"\"\n\n cran = \"DT\"\n\n version(\"0.23\", sha256=\"360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70\")\n version(\"0.20\", sha256=\"c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f\")\n version(\"0.17\", sha256=\"e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56\")\n version(\"0.13\", sha256=\"79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5\")\n version(\"0.8\", sha256=\"90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21\")\n version(\"0.7\", sha256=\"1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c\")\n version(\"0.6\", sha256=\"2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916\")\n version(\"0.4\", sha256=\"3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19\")\n version(\"0.3\", sha256=\"ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb\")\n version(\"0.2\", sha256=\"a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd\")\n version(\"0.1\", sha256=\"129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756\")\n\n depends_on(\"[email protected]:\", type=(\"build\", \"run\"))\n depends_on(\"[email protected]:\", type=(\"build\", \"run\"))\n depends_on(\"[email protected]:\", type=(\"build\", \"run\"), when=\"@0.8:\")\n depends_on(\"r-magrittr\", type=(\"build\", \"run\"))\n depends_on(\"r-crosstalk\", type=(\"build\", \"run\"))\n depends_on(\"r-jquerylib\", type=(\"build\", \"run\"), when=\"@0.19:\")\n depends_on(\"r-promises\", type=(\"build\", \"run\"), when=\"@0.5:\")\n",
"from spack.package import *\n\n\nclass RDt(RPackage):\n \"\"\"A Wrapper of the JavaScript Library 'DataTables'.\n\n Data objects in R can be rendered as HTML tables using the JavaScript\n library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'\n library has been included in this R package. The package name 'DT' is an\n abbreviation of 'DataTables'.\"\"\"\n cran = 'DT'\n version('0.23', sha256=\n '360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')\n version('0.20', sha256=\n 'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')\n version('0.17', sha256=\n 'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')\n version('0.13', sha256=\n '79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')\n version('0.8', sha256=\n '90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')\n version('0.7', sha256=\n '1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')\n version('0.6', sha256=\n '2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')\n version('0.4', sha256=\n '3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')\n version('0.3', sha256=\n 'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')\n version('0.2', sha256=\n 'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')\n version('0.1', sha256=\n '129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')\n depends_on('r-magrittr', type=('build', 'run'))\n depends_on('r-crosstalk', type=('build', 'run'))\n depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')\n depends_on('r-promises', type=('build', 'run'), when='@0.5:')\n",
"<import token>\n\n\nclass RDt(RPackage):\n \"\"\"A Wrapper of the JavaScript Library 'DataTables'.\n\n Data objects in R can be rendered as HTML tables using the JavaScript\n library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'\n library has been included in this R package. The package name 'DT' is an\n abbreviation of 'DataTables'.\"\"\"\n cran = 'DT'\n version('0.23', sha256=\n '360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')\n version('0.20', sha256=\n 'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')\n version('0.17', sha256=\n 'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')\n version('0.13', sha256=\n '79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')\n version('0.8', sha256=\n '90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')\n version('0.7', sha256=\n '1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')\n version('0.6', sha256=\n '2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')\n version('0.4', sha256=\n '3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')\n version('0.3', sha256=\n 'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')\n version('0.2', sha256=\n 'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')\n version('0.1', sha256=\n '129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')\n depends_on('r-magrittr', type=('build', 'run'))\n depends_on('r-crosstalk', type=('build', 'run'))\n depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')\n depends_on('r-promises', type=('build', 'run'), when='@0.5:')\n",
"<import token>\n\n\nclass RDt(RPackage):\n <docstring token>\n cran = 'DT'\n version('0.23', sha256=\n '360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')\n version('0.20', sha256=\n 'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')\n version('0.17', sha256=\n 'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')\n version('0.13', sha256=\n '79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')\n version('0.8', sha256=\n '90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')\n version('0.7', sha256=\n '1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')\n version('0.6', sha256=\n '2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')\n version('0.4', sha256=\n '3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')\n version('0.3', sha256=\n 'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')\n version('0.2', sha256=\n 'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')\n version('0.1', sha256=\n '129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')\n depends_on('r-magrittr', type=('build', 'run'))\n depends_on('r-crosstalk', type=('build', 'run'))\n depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')\n depends_on('r-promises', type=('build', 'run'), when='@0.5:')\n",
"<import token>\n\n\nclass RDt(RPackage):\n <docstring token>\n <assignment token>\n version('0.23', sha256=\n '360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')\n version('0.20', sha256=\n 'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')\n version('0.17', sha256=\n 'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')\n version('0.13', sha256=\n '79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')\n version('0.8', sha256=\n '90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')\n version('0.7', sha256=\n '1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')\n version('0.6', sha256=\n '2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')\n version('0.4', sha256=\n '3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')\n version('0.3', sha256=\n 'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')\n version('0.2', sha256=\n 'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')\n version('0.1', sha256=\n '129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')\n depends_on('r-magrittr', type=('build', 'run'))\n depends_on('r-crosstalk', type=('build', 'run'))\n depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')\n depends_on('r-promises', type=('build', 'run'), when='@0.5:')\n",
"<import token>\n<class token>\n"
] | false |
1,096 |
6b597f1570c022d17e4476e2ab8817e724a166a7
|
class Solution:
def evalRPN(self, tokens: List[str]) -> int:
def operation(op1,op2,op):
if op == "+":
return op1 + op2
if op == "-":
return op1 - op2
if op == "*":
return op1 * op2
if op == "/":
return int(op1/op2)
stack = []
for char in tokens:
if char in ["+", "-", "*", "/"]:
op2 = stack.pop()
op1 = stack.pop()
res = operation(op1,op2,char)
stack.append(int(res))
else:
stack.append(int(char))
return stack.pop()
|
[
"class Solution:\r\n def evalRPN(self, tokens: List[str]) -> int:\r\n def operation(op1,op2,op):\r\n if op == \"+\":\r\n return op1 + op2\r\n if op == \"-\":\r\n return op1 - op2\r\n if op == \"*\":\r\n return op1 * op2\r\n if op == \"/\":\r\n return int(op1/op2)\r\n \r\n stack = []\r\n for char in tokens:\r\n if char in [\"+\", \"-\", \"*\", \"/\"]:\r\n op2 = stack.pop()\r\n op1 = stack.pop()\r\n res = operation(op1,op2,char)\r\n stack.append(int(res))\r\n else:\r\n stack.append(int(char))\r\n return stack.pop()",
"class Solution:\n\n def evalRPN(self, tokens: List[str]) ->int:\n\n def operation(op1, op2, op):\n if op == '+':\n return op1 + op2\n if op == '-':\n return op1 - op2\n if op == '*':\n return op1 * op2\n if op == '/':\n return int(op1 / op2)\n stack = []\n for char in tokens:\n if char in ['+', '-', '*', '/']:\n op2 = stack.pop()\n op1 = stack.pop()\n res = operation(op1, op2, char)\n stack.append(int(res))\n else:\n stack.append(int(char))\n return stack.pop()\n",
"class Solution:\n <function token>\n",
"<class token>\n"
] | false |
1,097 |
09c3a10230e7d0b3b893ccf236c39fc2dc12b2c6
|
dic = {'name': 'Eric', 'age': '25'} # 딕셔너리 형태
print(dic['name'])
|
[
"dic = {'name': 'Eric', 'age': '25'} # 딕셔너리 형태\n\n\nprint(dic['name'])\n",
"dic = {'name': 'Eric', 'age': '25'}\nprint(dic['name'])\n",
"<assignment token>\nprint(dic['name'])\n",
"<assignment token>\n<code token>\n"
] | false |
1,098 |
ecdc8f5f76b92c3c9dcf2a12b3d9452166fcb706
|
"""
Config module for storage read only disks
"""
from rhevmtests.storage.config import * # flake8: noqa
TEST_NAME = "read_only"
VM_NAME = "{0}_vm_%s".format(TEST_NAME)
VM_COUNT = 2
DISK_NAMES = dict() # dictionary with storage type as key
DISK_TIMEOUT = 600
# allocation policies
SPARSE = True
DIRECT_LUNS = UNUSED_LUNS
DIRECT_LUN_ADDRESSES = UNUSED_LUN_ADDRESSES
DIRECT_LUN_TARGETS = UNUSED_LUN_TARGETS
|
[
"\"\"\"\nConfig module for storage read only disks\n\"\"\"\nfrom rhevmtests.storage.config import * # flake8: noqa\n\nTEST_NAME = \"read_only\"\nVM_NAME = \"{0}_vm_%s\".format(TEST_NAME)\n\nVM_COUNT = 2\n\nDISK_NAMES = dict() # dictionary with storage type as key\nDISK_TIMEOUT = 600\n\n# allocation policies\nSPARSE = True\nDIRECT_LUNS = UNUSED_LUNS\nDIRECT_LUN_ADDRESSES = UNUSED_LUN_ADDRESSES\nDIRECT_LUN_TARGETS = UNUSED_LUN_TARGETS\n",
"<docstring token>\nfrom rhevmtests.storage.config import *\nTEST_NAME = 'read_only'\nVM_NAME = '{0}_vm_%s'.format(TEST_NAME)\nVM_COUNT = 2\nDISK_NAMES = dict()\nDISK_TIMEOUT = 600\nSPARSE = True\nDIRECT_LUNS = UNUSED_LUNS\nDIRECT_LUN_ADDRESSES = UNUSED_LUN_ADDRESSES\nDIRECT_LUN_TARGETS = UNUSED_LUN_TARGETS\n",
"<docstring token>\n<import token>\nTEST_NAME = 'read_only'\nVM_NAME = '{0}_vm_%s'.format(TEST_NAME)\nVM_COUNT = 2\nDISK_NAMES = dict()\nDISK_TIMEOUT = 600\nSPARSE = True\nDIRECT_LUNS = UNUSED_LUNS\nDIRECT_LUN_ADDRESSES = UNUSED_LUN_ADDRESSES\nDIRECT_LUN_TARGETS = UNUSED_LUN_TARGETS\n",
"<docstring token>\n<import token>\n<assignment token>\n"
] | false |
1,099 |
a55024f0e5edec22125ce53ef54ee364be185cb8
|
"""Test the init file of Mailgun."""
import hashlib
import hmac
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import mailgun, webhook
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import CONF_API_KEY, CONF_DOMAIN
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
API_KEY = "abc123"
@pytest.fixture
async def http_client(hass, hass_client_no_auth):
"""Initialize a Home Assistant Server for testing this module."""
await async_setup_component(hass, webhook.DOMAIN, {})
return await hass_client_no_auth()
@pytest.fixture
async def webhook_id_with_api_key(hass):
"""Initialize the Mailgun component and get the webhook_id."""
await async_setup_component(
hass,
mailgun.DOMAIN,
{mailgun.DOMAIN: {CONF_API_KEY: API_KEY, CONF_DOMAIN: "example.com"}},
)
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
"mailgun", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY
return result["result"].data["webhook_id"]
@pytest.fixture
async def webhook_id_without_api_key(hass):
"""Initialize the Mailgun component and get the webhook_id w/o API key."""
await async_setup_component(hass, mailgun.DOMAIN, {})
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
"mailgun", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY
return result["result"].data["webhook_id"]
@pytest.fixture
async def mailgun_events(hass):
"""Return a list of mailgun_events triggered."""
events = []
@callback
def handle_event(event):
"""Handle Mailgun event."""
events.append(event)
hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)
return events
async def test_mailgun_webhook_with_missing_signature(
http_client, webhook_id_with_api_key, mailgun_events
) -> None:
"""Test that webhook doesn't trigger an event without a signature."""
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}",
json={"hello": "mailgun", "signature": {}},
)
assert len(mailgun_events) == event_count
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}", json={"hello": "mailgun"}
)
assert len(mailgun_events) == event_count
async def test_mailgun_webhook_with_different_api_key(
http_client, webhook_id_with_api_key, mailgun_events
) -> None:
"""Test that webhook doesn't trigger an event with a wrong signature."""
timestamp = "1529006854"
token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0"
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}",
json={
"hello": "mailgun",
"signature": {
"signature": hmac.new(
key=b"random_api_key",
msg=bytes(f"{timestamp}{token}", "utf-8"),
digestmod=hashlib.sha256,
).hexdigest(),
"timestamp": timestamp,
"token": token,
},
},
)
assert len(mailgun_events) == event_count
async def test_mailgun_webhook_event_with_correct_api_key(
http_client, webhook_id_with_api_key, mailgun_events
) -> None:
"""Test that webhook triggers an event after validating a signature."""
timestamp = "1529006854"
token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0"
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}",
json={
"hello": "mailgun",
"signature": {
"signature": hmac.new(
key=bytes(API_KEY, "utf-8"),
msg=bytes(f"{timestamp}{token}", "utf-8"),
digestmod=hashlib.sha256,
).hexdigest(),
"timestamp": timestamp,
"token": token,
},
},
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_with_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
async def test_mailgun_webhook_with_missing_signature_without_api_key(
http_client, webhook_id_without_api_key, mailgun_events
) -> None:
"""Test that webhook triggers an event without a signature w/o API key."""
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_without_api_key}",
json={"hello": "mailgun", "signature": {}},
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
await http_client.post(
f"/api/webhook/{webhook_id_without_api_key}", json={"hello": "mailgun"}
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
async def test_mailgun_webhook_event_without_an_api_key(
http_client, webhook_id_without_api_key, mailgun_events
) -> None:
"""Test that webhook triggers an event if there is no api key."""
timestamp = "1529006854"
token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0"
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_without_api_key}",
json={
"hello": "mailgun",
"signature": {
"signature": hmac.new(
key=bytes(API_KEY, "utf-8"),
msg=bytes(f"{timestamp}{token}", "utf-8"),
digestmod=hashlib.sha256,
).hexdigest(),
"timestamp": timestamp,
"token": token,
},
},
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
|
[
"\"\"\"Test the init file of Mailgun.\"\"\"\nimport hashlib\nimport hmac\n\nimport pytest\n\nfrom homeassistant import config_entries, data_entry_flow\nfrom homeassistant.components import mailgun, webhook\nfrom homeassistant.config import async_process_ha_core_config\nfrom homeassistant.const import CONF_API_KEY, CONF_DOMAIN\nfrom homeassistant.core import callback\nfrom homeassistant.setup import async_setup_component\n\nAPI_KEY = \"abc123\"\n\n\[email protected]\nasync def http_client(hass, hass_client_no_auth):\n \"\"\"Initialize a Home Assistant Server for testing this module.\"\"\"\n await async_setup_component(hass, webhook.DOMAIN, {})\n return await hass_client_no_auth()\n\n\[email protected]\nasync def webhook_id_with_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id.\"\"\"\n await async_setup_component(\n hass,\n mailgun.DOMAIN,\n {mailgun.DOMAIN: {CONF_API_KEY: API_KEY, CONF_DOMAIN: \"example.com\"}},\n )\n\n await async_process_ha_core_config(\n hass,\n {\"internal_url\": \"http://example.local:8123\"},\n )\n result = await hass.config_entries.flow.async_init(\n \"mailgun\", context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM, result\n\n result = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n assert result[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n\n return result[\"result\"].data[\"webhook_id\"]\n\n\[email protected]\nasync def webhook_id_without_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id w/o API key.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {})\n\n await async_process_ha_core_config(\n hass,\n {\"internal_url\": \"http://example.local:8123\"},\n )\n result = await hass.config_entries.flow.async_init(\n \"mailgun\", context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM, result\n\n result = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n assert result[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n\n return result[\"result\"].data[\"webhook_id\"]\n\n\[email protected]\nasync def mailgun_events(hass):\n \"\"\"Return a list of mailgun_events triggered.\"\"\"\n events = []\n\n @callback\n def handle_event(event):\n \"\"\"Handle Mailgun event.\"\"\"\n events.append(event)\n\n hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)\n\n return events\n\n\nasync def test_mailgun_webhook_with_missing_signature(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook doesn't trigger an event without a signature.\"\"\"\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\"hello\": \"mailgun\", \"signature\": {}},\n )\n\n assert len(mailgun_events) == event_count\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\", json={\"hello\": \"mailgun\"}\n )\n\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_with_different_api_key(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook doesn't trigger an event with a wrong signature.\"\"\"\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=b\"random_api_key\",\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_event_with_correct_api_key(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook triggers an event after validating a signature.\"\"\"\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=bytes(API_KEY, \"utf-8\"),\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_with_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n\n\nasync def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook triggers an event without a signature w/o API key.\"\"\"\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\",\n json={\"hello\": \"mailgun\", \"signature\": {}},\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\", json={\"hello\": \"mailgun\"}\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n\n\nasync def test_mailgun_webhook_event_without_an_api_key(\n http_client, webhook_id_without_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook triggers an event if there is no api key.\"\"\"\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=bytes(API_KEY, \"utf-8\"),\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n",
"<docstring token>\nimport hashlib\nimport hmac\nimport pytest\nfrom homeassistant import config_entries, data_entry_flow\nfrom homeassistant.components import mailgun, webhook\nfrom homeassistant.config import async_process_ha_core_config\nfrom homeassistant.const import CONF_API_KEY, CONF_DOMAIN\nfrom homeassistant.core import callback\nfrom homeassistant.setup import async_setup_component\nAPI_KEY = 'abc123'\n\n\[email protected]\nasync def http_client(hass, hass_client_no_auth):\n \"\"\"Initialize a Home Assistant Server for testing this module.\"\"\"\n await async_setup_component(hass, webhook.DOMAIN, {})\n return await hass_client_no_auth()\n\n\[email protected]\nasync def webhook_id_with_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {mailgun.DOMAIN: {\n CONF_API_KEY: API_KEY, CONF_DOMAIN: 'example.com'}})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\[email protected]\nasync def webhook_id_without_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id w/o API key.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\[email protected]\nasync def mailgun_events(hass):\n \"\"\"Return a list of mailgun_events triggered.\"\"\"\n events = []\n\n @callback\n def handle_event(event):\n \"\"\"Handle Mailgun event.\"\"\"\n events.append(event)\n hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)\n return events\n\n\nasync def test_mailgun_webhook_with_missing_signature(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event without a signature.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun'})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_with_different_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event with a wrong signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n b'random_api_key', msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_event_with_correct_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event after validating a signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=bytes(\n API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_with_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event without a signature w/o API key.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun'})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_event_without_an_api_key(http_client,\n webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event if there is no api key.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n bytes(API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n",
"<docstring token>\n<import token>\nAPI_KEY = 'abc123'\n\n\[email protected]\nasync def http_client(hass, hass_client_no_auth):\n \"\"\"Initialize a Home Assistant Server for testing this module.\"\"\"\n await async_setup_component(hass, webhook.DOMAIN, {})\n return await hass_client_no_auth()\n\n\[email protected]\nasync def webhook_id_with_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {mailgun.DOMAIN: {\n CONF_API_KEY: API_KEY, CONF_DOMAIN: 'example.com'}})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\[email protected]\nasync def webhook_id_without_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id w/o API key.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\[email protected]\nasync def mailgun_events(hass):\n \"\"\"Return a list of mailgun_events triggered.\"\"\"\n events = []\n\n @callback\n def handle_event(event):\n \"\"\"Handle Mailgun event.\"\"\"\n events.append(event)\n hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)\n return events\n\n\nasync def test_mailgun_webhook_with_missing_signature(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event without a signature.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun'})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_with_different_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event with a wrong signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n b'random_api_key', msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_event_with_correct_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event after validating a signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=bytes(\n API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_with_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event without a signature w/o API key.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun'})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_event_without_an_api_key(http_client,\n webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event if there is no api key.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n bytes(API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\[email protected]\nasync def http_client(hass, hass_client_no_auth):\n \"\"\"Initialize a Home Assistant Server for testing this module.\"\"\"\n await async_setup_component(hass, webhook.DOMAIN, {})\n return await hass_client_no_auth()\n\n\[email protected]\nasync def webhook_id_with_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {mailgun.DOMAIN: {\n CONF_API_KEY: API_KEY, CONF_DOMAIN: 'example.com'}})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\[email protected]\nasync def webhook_id_without_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id w/o API key.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\[email protected]\nasync def mailgun_events(hass):\n \"\"\"Return a list of mailgun_events triggered.\"\"\"\n events = []\n\n @callback\n def handle_event(event):\n \"\"\"Handle Mailgun event.\"\"\"\n events.append(event)\n hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)\n return events\n\n\nasync def test_mailgun_webhook_with_missing_signature(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event without a signature.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun'})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_with_different_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event with a wrong signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n b'random_api_key', msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_event_with_correct_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event after validating a signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=bytes(\n API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_with_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event without a signature w/o API key.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun'})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_event_without_an_api_key(http_client,\n webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event if there is no api key.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n bytes(API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.