code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
# -*- coding: utf-8 -*-
"""
Modul do zapisu piosenki (wczytywanie ustawien (defs.txt), tworzenie .wav,
"zglasnianie utworu")
"""
print("Laduje modul o nazwie: "+__name__)
import numpy as np
def wczytywanie_ustawien(plik_konfiguracyjny = "defs.txt"):
"""
wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika
arg:
str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi
wartosciami parametrow (tempo itd.)
wyjscie:
dict: parametry - zapisane nazwy i wartosci uzywanych parametrow
"""
import re
import numpy as np
# wczytuje zawartosc pliku (bez pierwszej i ostatniej linijki, jeden wiersz
# wyjsciowej macierzy, zawiera nazwe parametru i jego wartosc, jako
# oddzielne elementy, zapisane jako stringi)
ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype = str, \
skip_header=1, skip_footer=1, delimiter=":")
# tworze slownik, ktory bedzie przechowywal wartosci
parametry = {}
# pozbywam się "" z key
# jesli mamy 1 parametr (1 linijka w pliku, to ustawienia to zmienna o
# shape = (2,), wiec odwoluje sie bezposrednio do zmiennej ustawienia
if ustawienia.shape == (2,):
parametry[re.sub('"','',ustawienia[0])] = ustawienia[1]
# jak mamy wiecej parametrow odwoluje sie do kolejnych linijek macierzy
# ustawienia
else:
for l in ustawienia:
parametry[re.sub('"','',l[0])] = l[1]
# zamieniamy napisy na odpowiednie wartosci - kontroluje te parametry, wiec
# robie to recznie
try:
parametry['tryb'] = parametry['tryb'].strip() #tryb
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
print("Podaj tryb odczytu!")
try:
parametry['bpm'] = int(parametry['bpm']) # tempo
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
try:
parametry['freq'] = int(parametry['freq']) # frekwencja wyjsciowego wav
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
try:
parametry['loud'] = float(parametry['loud'] ) # glosnosc
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
try:
# lista wag dla sampli
parametry['wages'] = [float(s) for s in parametry['wages'].split(",")]
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
return parametry
#b = wczytywanie_ustawien("defs.txt")
#zglasnianie utworu
def zmiana_glosnosci(utwor, procent = 0):
"""
zmienia glosnosc utworu (jego amplitudy)
arg:
numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony
lub zciszony
float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga
wartosci od -1 do 1, dla 0 brak zmian, dla 1 - "100%
glosniej", dla -1 "100% ciszej"
wyjscie:
numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor
"""
if(-1 <= procent <= 1):
#ile razy mamy pomnozyc amplitude naszego dzwieku
mnoznik = 0
if( procent < 0 ):
mnoznik = 1 + procent
else:
# obliczamy najwyzsza amplitude w danym utworze i ona bedzie
# wyznaczac jak bardzo mozemy podglosnic
maks_ampli = 0
maks_ampli = max(abs(utwor))
mnoznik = 32767/maks_ampli # maksymalny mnoznik
# mnoznik minimalnie moze osiagnac wartosc 1, to co powyzej
# (mnoznik-1) mnozymy o procent zglosnienia
# i dodajemy do podstawy (czyli 1)
mnoznik = 1 + (mnoznik - 1)*procent
glosniej = mnoznik * utwor
#glosniej = np.array(glosniej, dtype=np.int16)
glosniej = glosniej.astype(np.int16)
return glosniej
else:
print("Podaj procent z zakresu -1 do 1")
#wierszyk1 = zmiana_glosnosci(wierszyk, b['loud'])
#wierszyk1
def tworzenie_piosenki(macierz_piosenki, czy_pelna = True, bpm = 120, \
freq = 44100, wages = None, loud = 0):
"""
glowna funkcja generujaca cala piosenke
arg:
numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca
definicje kolejnych cwiercnut (co ma byc grane
w danej cwiercnucie)
bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest
zapisana (nie jest, gdy tracki mialy nieodpowiednia
liczbe wierszy lub kolumn)
int: bpm - tempo piosenki w jednostce bpm
int: freq - ilosc probek w jednej sekundzie
list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1
probka, 2 etc.)
float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na
maxa, -1 - sciszamy na maxa
wyjscie:
numpy.ndarray (numpy.int16): gotowy utwór
"""
# macierz piosenki byla pusta, piosenka nie zostala utworzona
if(czy_pelna == False):
print("Nie utworzono piosenki")
return None
else:
import numpy as np
import scipy.io.wavfile
t_cwiercnuty = 60 / bpm # czas trwania jednej cwiercnuty (zalezy od
#tempa)
ile_cwiercnut = macierz_piosenki.shape[0] # ilosc cwiercnut
kanaly = macierz_piosenki.shape[1] # ilosc uzywanych sampli
frekw = freq
czas_utworu = ile_cwiercnut*t_cwiercnuty
# ile elementow bedzie w nowym utworze
ilosc_probek = int(frekw*czas_utworu)
# bedziemy tylko raz wczytywac zawartosc sampleXY.wav, wiec potrzebuje
# unikalne numery sampli
rozne_sample = np.unique(macierz_piosenki) # bierze lacznie z "--"
# w slownikach zapiszemy parametry tych sampli
# slownik z wartosciami danego sampla (tj. macierze numpy-owe z
# amplitudami)
sample_co = {}
sample_frekw = {} # slownik z ich frekwencjami
sample_dl = {} # slownik z ich dlugosciami
#wczytujemy te sample
# w iteratorze bierzemy napisy "01" "02" "--" itd. stringi!!!
for ktory_sampel in rozne_sample:
if(ktory_sampel != '--'):
# tworzymy napis z nazwa pliku sampla, np. "sample01.wav"
plik = ''.join(['sample',ktory_sampel,'.wav'])
# wczytujemy zawartosc i frekwencje danego sampla do
# odpowiednio nazwanego elementu w slowniku sample_co i
# sample_frekw
sample_frekw[ktory_sampel], sample_co[ktory_sampel] = \
scipy.io.wavfile.read(plik)
# tworzymy mono z naszego sampla
sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\
axis=1)/32767
# normalizujemy te wartosci
sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel]/ \
max(np.abs(sample_co[ktory_sampel])) * 32767)
# zapisujemy dlugosc sampli, czyli ilosc probek
# ( = czas_trwania*frekwencja)
sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]
else: # to samo robimy dla "--" recznie ustawiamy
# robimy cisze, gdy --
sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)
sample_frekw[ktory_sampel] = frekw # taka sama jak domyslna
sample_dl[ktory_sampel] = 0 # zakladamy czas 0 sekund
if wages is None:
wages = np.ones((1,kanaly))
else:
# zeby mialo wymiar (1,kanaly), a nie (kanaly,)
wages = np.array(wages).reshape(1,kanaly)
# definicja nowego utworu
T = np.linspace(0, czas_utworu, ilosc_probek)
for wiersz in range(0, ile_cwiercnut):
sample = [] # wczytamy sample z danej cwiecnuty
dlugosci = [] # tu zapiszemy ich dlugosci w tej cwiercnucie
for i in range(0, kanaly):
sampus = macierz_piosenki[wiersz,i]
sample.append(sample_co[sampus])
dlugosci.append(sample_dl[sampus])
# bierzemy najdluzszy sample i w calosci bedziemy go odtwarzac;
# reszte zatem tez w calosci odtworzymy, a gdy sie skoncza damy
# cisze (zera)
maksik = max(dlugosci)
# mamy tutaj macierz 4 na max dlugosc, przygotowana do zlaczenia
# potem tych dzwiekow w jeden
pusty = np.int16(np.zeros((len(sample), maksik)))
# dodajemy nasze dzwieki do tej pustej
for k in range(0, kanaly):
pusty[k][0:dlugosci[k]] = sample[k]
# mnozymy kolejne elementy wektora pusty (czyli sample) przez
# wagi i sumujemy
cwiercnuta = np.dot(wages, pusty)
#otrzymamy wymiar (1, x), a chcemy (x,), wiec bierzemy pierwszy
# element
cwiercnuta = cwiercnuta[0]
# poczatek biezacej cwiercnuty
poczatek_cwiercnuty = int(wiersz*t_cwiercnuty*frekw)
# jesli dodanie ostatnich cwiercnut bedzie wiazalo sie z
# przekroczeniem dlugosci tworzonego utworu, obcinamy ostatnie
# dzwieki, tak by zmiescic sie w tej dlugosci
if (poczatek_cwiercnuty + maksik) > ilosc_probek:
T[poczatek_cwiercnuty:(poczatek_cwiercnuty + maksik)]=\
cwiercnuta[0:len(T[poczatek_cwiercnuty:(poczatek_cwiercnuty +\
maksik)])]
else:
T[poczatek_cwiercnuty:(poczatek_cwiercnuty + maksik)] += \
cwiercnuta
T= np.array(T, dtype=np.int16)
#ustalamy glosnosc utworu
T = zmiana_glosnosci(T, loud)
return T
#pios, k = wczytywanie_sciezek(a)
#wierszyk = tworzenie_piosenki(pios, k, bpm = b['bpm'], freq = b['freq'], \
#wages = b['wages'])
#wierszyk = tworzenie_piosenki(pios, k, **b)
#wierszyk
|
normal
|
{
"blob_id": "8220a6d33cda5861e74d6236757abbc81685a998",
"index": 6369,
"step-1": "<mask token>\n\n\ndef wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):\n \"\"\" \n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\n \n arg:\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \n wartosciami parametrow (tempo itd.)\n \n wyjscie:\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\n \n \"\"\"\n import re\n import numpy as np\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=\n 1, skip_footer=1, delimiter=':')\n parametry = {}\n if ustawienia.shape == (2,):\n parametry[re.sub('\"', '', ustawienia[0])] = ustawienia[1]\n else:\n for l in ustawienia:\n parametry[re.sub('\"', '', l[0])] = l[1]\n try:\n parametry['tryb'] = parametry['tryb'].strip()\n except KeyError:\n print('Podaj tryb odczytu!')\n try:\n parametry['bpm'] = int(parametry['bpm'])\n except KeyError:\n pass\n try:\n parametry['freq'] = int(parametry['freq'])\n except KeyError:\n pass\n try:\n parametry['loud'] = float(parametry['loud'])\n except KeyError:\n pass\n try:\n parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]\n except KeyError:\n pass\n return parametry\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):\n \"\"\" \n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\n \n arg:\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \n wartosciami parametrow (tempo itd.)\n \n wyjscie:\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\n \n \"\"\"\n import re\n import numpy as np\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=\n 1, skip_footer=1, delimiter=':')\n parametry = {}\n if ustawienia.shape == (2,):\n parametry[re.sub('\"', '', ustawienia[0])] = ustawienia[1]\n else:\n for l in ustawienia:\n parametry[re.sub('\"', '', l[0])] = l[1]\n try:\n parametry['tryb'] = parametry['tryb'].strip()\n except KeyError:\n print('Podaj tryb odczytu!')\n try:\n parametry['bpm'] = int(parametry['bpm'])\n except KeyError:\n pass\n try:\n parametry['freq'] = int(parametry['freq'])\n except KeyError:\n pass\n try:\n parametry['loud'] = float(parametry['loud'])\n except KeyError:\n pass\n try:\n parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]\n except KeyError:\n pass\n return parametry\n\n\ndef zmiana_glosnosci(utwor, procent=0):\n \"\"\"\n zmienia glosnosc utworu (jego amplitudy)\n \n arg:\n numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony \n lub zciszony\n \n float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga \n wartosci od -1 do 1, dla 0 brak zmian, dla 1 - \"100% \n glosniej\", dla -1 \"100% ciszej\"\n \n wyjscie:\n numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor\n \"\"\"\n if -1 <= procent <= 1:\n mnoznik = 0\n if procent < 0:\n mnoznik = 1 + procent\n else:\n maks_ampli = 0\n maks_ampli = max(abs(utwor))\n mnoznik = 32767 / maks_ampli\n mnoznik = 1 + (mnoznik - 1) * procent\n glosniej = mnoznik * utwor\n glosniej = glosniej.astype(np.int16)\n return glosniej\n else:\n print('Podaj procent z zakresu -1 do 1')\n\n\ndef tworzenie_piosenki(macierz_piosenki, czy_pelna=True, bpm=120, freq=\n 44100, wages=None, loud=0):\n \"\"\"\n glowna funkcja generujaca cala piosenke\n \n arg:\n numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca \n definicje kolejnych cwiercnut (co ma byc grane \n w danej cwiercnucie)\n \n bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest \n zapisana (nie jest, gdy tracki mialy nieodpowiednia \n liczbe wierszy lub kolumn)\n \n int: bpm - tempo piosenki w jednostce bpm\n \n int: freq - ilosc probek w jednej sekundzie\n \n list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1 \n probka, 2 etc.)\n \n float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na \n maxa, -1 - sciszamy na maxa\n \n wyjscie:\n numpy.ndarray (numpy.int16): gotowy utwór\n \n \"\"\"\n if czy_pelna == False:\n print('Nie utworzono piosenki')\n return None\n else:\n import numpy as np\n import scipy.io.wavfile\n t_cwiercnuty = 60 / bpm\n ile_cwiercnut = macierz_piosenki.shape[0]\n kanaly = macierz_piosenki.shape[1]\n frekw = freq\n czas_utworu = ile_cwiercnut * t_cwiercnuty\n ilosc_probek = int(frekw * czas_utworu)\n rozne_sample = np.unique(macierz_piosenki)\n sample_co = {}\n sample_frekw = {}\n sample_dl = {}\n for ktory_sampel in rozne_sample:\n if ktory_sampel != '--':\n plik = ''.join(['sample', ktory_sampel, '.wav'])\n sample_frekw[ktory_sampel], sample_co[ktory_sampel\n ] = scipy.io.wavfile.read(plik)\n sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\n axis=1) / 32767\n sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel] /\n max(np.abs(sample_co[ktory_sampel])) * 32767)\n sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]\n else:\n sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)\n sample_frekw[ktory_sampel] = frekw\n sample_dl[ktory_sampel] = 0\n if wages is None:\n wages = np.ones((1, kanaly))\n else:\n wages = np.array(wages).reshape(1, kanaly)\n T = np.linspace(0, czas_utworu, ilosc_probek)\n for wiersz in range(0, ile_cwiercnut):\n sample = []\n dlugosci = []\n for i in range(0, kanaly):\n sampus = macierz_piosenki[wiersz, i]\n sample.append(sample_co[sampus])\n dlugosci.append(sample_dl[sampus])\n maksik = max(dlugosci)\n pusty = np.int16(np.zeros((len(sample), maksik)))\n for k in range(0, kanaly):\n pusty[k][0:dlugosci[k]] = sample[k]\n cwiercnuta = np.dot(wages, pusty)\n cwiercnuta = cwiercnuta[0]\n poczatek_cwiercnuty = int(wiersz * t_cwiercnuty * frekw)\n if poczatek_cwiercnuty + maksik > ilosc_probek:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] = cwiercnuta[0:len(T[poczatek_cwiercnuty:\n poczatek_cwiercnuty + maksik])]\n else:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] += cwiercnuta\n T = np.array(T, dtype=np.int16)\n T = zmiana_glosnosci(T, loud)\n return T\n",
"step-3": "<mask token>\nprint('Laduje modul o nazwie: ' + __name__)\n<mask token>\n\n\ndef wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):\n \"\"\" \n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\n \n arg:\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \n wartosciami parametrow (tempo itd.)\n \n wyjscie:\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\n \n \"\"\"\n import re\n import numpy as np\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=\n 1, skip_footer=1, delimiter=':')\n parametry = {}\n if ustawienia.shape == (2,):\n parametry[re.sub('\"', '', ustawienia[0])] = ustawienia[1]\n else:\n for l in ustawienia:\n parametry[re.sub('\"', '', l[0])] = l[1]\n try:\n parametry['tryb'] = parametry['tryb'].strip()\n except KeyError:\n print('Podaj tryb odczytu!')\n try:\n parametry['bpm'] = int(parametry['bpm'])\n except KeyError:\n pass\n try:\n parametry['freq'] = int(parametry['freq'])\n except KeyError:\n pass\n try:\n parametry['loud'] = float(parametry['loud'])\n except KeyError:\n pass\n try:\n parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]\n except KeyError:\n pass\n return parametry\n\n\ndef zmiana_glosnosci(utwor, procent=0):\n \"\"\"\n zmienia glosnosc utworu (jego amplitudy)\n \n arg:\n numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony \n lub zciszony\n \n float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga \n wartosci od -1 do 1, dla 0 brak zmian, dla 1 - \"100% \n glosniej\", dla -1 \"100% ciszej\"\n \n wyjscie:\n numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor\n \"\"\"\n if -1 <= procent <= 1:\n mnoznik = 0\n if procent < 0:\n mnoznik = 1 + procent\n else:\n maks_ampli = 0\n maks_ampli = max(abs(utwor))\n mnoznik = 32767 / maks_ampli\n mnoznik = 1 + (mnoznik - 1) * procent\n glosniej = mnoznik * utwor\n glosniej = glosniej.astype(np.int16)\n return glosniej\n else:\n print('Podaj procent z zakresu -1 do 1')\n\n\ndef tworzenie_piosenki(macierz_piosenki, czy_pelna=True, bpm=120, freq=\n 44100, wages=None, loud=0):\n \"\"\"\n glowna funkcja generujaca cala piosenke\n \n arg:\n numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca \n definicje kolejnych cwiercnut (co ma byc grane \n w danej cwiercnucie)\n \n bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest \n zapisana (nie jest, gdy tracki mialy nieodpowiednia \n liczbe wierszy lub kolumn)\n \n int: bpm - tempo piosenki w jednostce bpm\n \n int: freq - ilosc probek w jednej sekundzie\n \n list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1 \n probka, 2 etc.)\n \n float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na \n maxa, -1 - sciszamy na maxa\n \n wyjscie:\n numpy.ndarray (numpy.int16): gotowy utwór\n \n \"\"\"\n if czy_pelna == False:\n print('Nie utworzono piosenki')\n return None\n else:\n import numpy as np\n import scipy.io.wavfile\n t_cwiercnuty = 60 / bpm\n ile_cwiercnut = macierz_piosenki.shape[0]\n kanaly = macierz_piosenki.shape[1]\n frekw = freq\n czas_utworu = ile_cwiercnut * t_cwiercnuty\n ilosc_probek = int(frekw * czas_utworu)\n rozne_sample = np.unique(macierz_piosenki)\n sample_co = {}\n sample_frekw = {}\n sample_dl = {}\n for ktory_sampel in rozne_sample:\n if ktory_sampel != '--':\n plik = ''.join(['sample', ktory_sampel, '.wav'])\n sample_frekw[ktory_sampel], sample_co[ktory_sampel\n ] = scipy.io.wavfile.read(plik)\n sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\n axis=1) / 32767\n sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel] /\n max(np.abs(sample_co[ktory_sampel])) * 32767)\n sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]\n else:\n sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)\n sample_frekw[ktory_sampel] = frekw\n sample_dl[ktory_sampel] = 0\n if wages is None:\n wages = np.ones((1, kanaly))\n else:\n wages = np.array(wages).reshape(1, kanaly)\n T = np.linspace(0, czas_utworu, ilosc_probek)\n for wiersz in range(0, ile_cwiercnut):\n sample = []\n dlugosci = []\n for i in range(0, kanaly):\n sampus = macierz_piosenki[wiersz, i]\n sample.append(sample_co[sampus])\n dlugosci.append(sample_dl[sampus])\n maksik = max(dlugosci)\n pusty = np.int16(np.zeros((len(sample), maksik)))\n for k in range(0, kanaly):\n pusty[k][0:dlugosci[k]] = sample[k]\n cwiercnuta = np.dot(wages, pusty)\n cwiercnuta = cwiercnuta[0]\n poczatek_cwiercnuty = int(wiersz * t_cwiercnuty * frekw)\n if poczatek_cwiercnuty + maksik > ilosc_probek:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] = cwiercnuta[0:len(T[poczatek_cwiercnuty:\n poczatek_cwiercnuty + maksik])]\n else:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] += cwiercnuta\n T = np.array(T, dtype=np.int16)\n T = zmiana_glosnosci(T, loud)\n return T\n",
"step-4": "<mask token>\nprint('Laduje modul o nazwie: ' + __name__)\nimport numpy as np\n\n\ndef wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):\n \"\"\" \n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\n \n arg:\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \n wartosciami parametrow (tempo itd.)\n \n wyjscie:\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\n \n \"\"\"\n import re\n import numpy as np\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=\n 1, skip_footer=1, delimiter=':')\n parametry = {}\n if ustawienia.shape == (2,):\n parametry[re.sub('\"', '', ustawienia[0])] = ustawienia[1]\n else:\n for l in ustawienia:\n parametry[re.sub('\"', '', l[0])] = l[1]\n try:\n parametry['tryb'] = parametry['tryb'].strip()\n except KeyError:\n print('Podaj tryb odczytu!')\n try:\n parametry['bpm'] = int(parametry['bpm'])\n except KeyError:\n pass\n try:\n parametry['freq'] = int(parametry['freq'])\n except KeyError:\n pass\n try:\n parametry['loud'] = float(parametry['loud'])\n except KeyError:\n pass\n try:\n parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]\n except KeyError:\n pass\n return parametry\n\n\ndef zmiana_glosnosci(utwor, procent=0):\n \"\"\"\n zmienia glosnosc utworu (jego amplitudy)\n \n arg:\n numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony \n lub zciszony\n \n float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga \n wartosci od -1 do 1, dla 0 brak zmian, dla 1 - \"100% \n glosniej\", dla -1 \"100% ciszej\"\n \n wyjscie:\n numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor\n \"\"\"\n if -1 <= procent <= 1:\n mnoznik = 0\n if procent < 0:\n mnoznik = 1 + procent\n else:\n maks_ampli = 0\n maks_ampli = max(abs(utwor))\n mnoznik = 32767 / maks_ampli\n mnoznik = 1 + (mnoznik - 1) * procent\n glosniej = mnoznik * utwor\n glosniej = glosniej.astype(np.int16)\n return glosniej\n else:\n print('Podaj procent z zakresu -1 do 1')\n\n\ndef tworzenie_piosenki(macierz_piosenki, czy_pelna=True, bpm=120, freq=\n 44100, wages=None, loud=0):\n \"\"\"\n glowna funkcja generujaca cala piosenke\n \n arg:\n numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca \n definicje kolejnych cwiercnut (co ma byc grane \n w danej cwiercnucie)\n \n bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest \n zapisana (nie jest, gdy tracki mialy nieodpowiednia \n liczbe wierszy lub kolumn)\n \n int: bpm - tempo piosenki w jednostce bpm\n \n int: freq - ilosc probek w jednej sekundzie\n \n list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1 \n probka, 2 etc.)\n \n float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na \n maxa, -1 - sciszamy na maxa\n \n wyjscie:\n numpy.ndarray (numpy.int16): gotowy utwór\n \n \"\"\"\n if czy_pelna == False:\n print('Nie utworzono piosenki')\n return None\n else:\n import numpy as np\n import scipy.io.wavfile\n t_cwiercnuty = 60 / bpm\n ile_cwiercnut = macierz_piosenki.shape[0]\n kanaly = macierz_piosenki.shape[1]\n frekw = freq\n czas_utworu = ile_cwiercnut * t_cwiercnuty\n ilosc_probek = int(frekw * czas_utworu)\n rozne_sample = np.unique(macierz_piosenki)\n sample_co = {}\n sample_frekw = {}\n sample_dl = {}\n for ktory_sampel in rozne_sample:\n if ktory_sampel != '--':\n plik = ''.join(['sample', ktory_sampel, '.wav'])\n sample_frekw[ktory_sampel], sample_co[ktory_sampel\n ] = scipy.io.wavfile.read(plik)\n sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\n axis=1) / 32767\n sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel] /\n max(np.abs(sample_co[ktory_sampel])) * 32767)\n sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]\n else:\n sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)\n sample_frekw[ktory_sampel] = frekw\n sample_dl[ktory_sampel] = 0\n if wages is None:\n wages = np.ones((1, kanaly))\n else:\n wages = np.array(wages).reshape(1, kanaly)\n T = np.linspace(0, czas_utworu, ilosc_probek)\n for wiersz in range(0, ile_cwiercnut):\n sample = []\n dlugosci = []\n for i in range(0, kanaly):\n sampus = macierz_piosenki[wiersz, i]\n sample.append(sample_co[sampus])\n dlugosci.append(sample_dl[sampus])\n maksik = max(dlugosci)\n pusty = np.int16(np.zeros((len(sample), maksik)))\n for k in range(0, kanaly):\n pusty[k][0:dlugosci[k]] = sample[k]\n cwiercnuta = np.dot(wages, pusty)\n cwiercnuta = cwiercnuta[0]\n poczatek_cwiercnuty = int(wiersz * t_cwiercnuty * frekw)\n if poczatek_cwiercnuty + maksik > ilosc_probek:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] = cwiercnuta[0:len(T[poczatek_cwiercnuty:\n poczatek_cwiercnuty + maksik])]\n else:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] += cwiercnuta\n T = np.array(T, dtype=np.int16)\n T = zmiana_glosnosci(T, loud)\n return T\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nModul do zapisu piosenki (wczytywanie ustawien (defs.txt), tworzenie .wav,\r\n \"zglasnianie utworu\")\r\n\"\"\"\r\n\r\n\r\nprint(\"Laduje modul o nazwie: \"+__name__)\r\n\r\nimport numpy as np\r\n\r\ndef wczytywanie_ustawien(plik_konfiguracyjny = \"defs.txt\"):\r\n \"\"\" \r\n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\r\n \r\n arg:\r\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \r\n wartosciami parametrow (tempo itd.)\r\n \r\n wyjscie:\r\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\r\n \r\n \"\"\"\r\n import re\r\n import numpy as np\r\n \r\n # wczytuje zawartosc pliku (bez pierwszej i ostatniej linijki, jeden wiersz \r\n # wyjsciowej macierzy, zawiera nazwe parametru i jego wartosc, jako \r\n # oddzielne elementy, zapisane jako stringi)\r\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype = str, \\\r\n skip_header=1, skip_footer=1, delimiter=\":\")\r\n \r\n # tworze slownik, ktory bedzie przechowywal wartosci\r\n parametry = {}\r\n \r\n # pozbywam się \"\" z key\r\n \r\n # jesli mamy 1 parametr (1 linijka w pliku, to ustawienia to zmienna o \r\n # shape = (2,), wiec odwoluje sie bezposrednio do zmiennej ustawienia\r\n if ustawienia.shape == (2,): \r\n parametry[re.sub('\"','',ustawienia[0])] = ustawienia[1]\r\n # jak mamy wiecej parametrow odwoluje sie do kolejnych linijek macierzy \r\n # ustawienia\r\n else:\r\n for l in ustawienia: \r\n parametry[re.sub('\"','',l[0])] = l[1]\r\n \r\n # zamieniamy napisy na odpowiednie wartosci - kontroluje te parametry, wiec\r\n # robie to recznie\r\n \r\n try:\r\n parametry['tryb'] = parametry['tryb'].strip() #tryb\r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n print(\"Podaj tryb odczytu!\")\r\n try:\r\n parametry['bpm'] = int(parametry['bpm']) # tempo\r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n pass\r\n try:\r\n parametry['freq'] = int(parametry['freq']) # frekwencja wyjsciowego wav\r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n pass\r\n try:\r\n parametry['loud'] = float(parametry['loud'] ) # glosnosc\r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n pass\r\n try:\r\n # lista wag dla sampli\r\n parametry['wages'] = [float(s) for s in parametry['wages'].split(\",\")] \r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n pass\r\n \r\n return parametry\r\n \r\n#b = wczytywanie_ustawien(\"defs.txt\")\r\n \r\n \r\n#zglasnianie utworu\r\n\r\ndef zmiana_glosnosci(utwor, procent = 0):\r\n \"\"\"\r\n zmienia glosnosc utworu (jego amplitudy)\r\n \r\n arg:\r\n numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony \r\n lub zciszony\r\n \r\n float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga \r\n wartosci od -1 do 1, dla 0 brak zmian, dla 1 - \"100% \r\n glosniej\", dla -1 \"100% ciszej\"\r\n \r\n wyjscie:\r\n numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor\r\n \"\"\"\r\n if(-1 <= procent <= 1):\r\n #ile razy mamy pomnozyc amplitude naszego dzwieku\r\n mnoznik = 0\r\n if( procent < 0 ):\r\n mnoznik = 1 + procent\r\n else:\r\n # obliczamy najwyzsza amplitude w danym utworze i ona bedzie \r\n # wyznaczac jak bardzo mozemy podglosnic\r\n maks_ampli = 0\r\n maks_ampli = max(abs(utwor))\r\n mnoznik = 32767/maks_ampli # maksymalny mnoznik\r\n # mnoznik minimalnie moze osiagnac wartosc 1, to co powyzej \r\n # (mnoznik-1) mnozymy o procent zglosnienia\r\n # i dodajemy do podstawy (czyli 1)\r\n mnoznik = 1 + (mnoznik - 1)*procent\r\n glosniej = mnoznik * utwor\r\n #glosniej = np.array(glosniej, dtype=np.int16)\r\n glosniej = glosniej.astype(np.int16) \r\n return glosniej\r\n else:\r\n print(\"Podaj procent z zakresu -1 do 1\")\r\n \r\n\r\n#wierszyk1 = zmiana_glosnosci(wierszyk, b['loud'])\r\n#wierszyk1\r\n \r\n \r\n \r\n\r\ndef tworzenie_piosenki(macierz_piosenki, czy_pelna = True, bpm = 120, \\\r\n freq = 44100, wages = None, loud = 0):\r\n \"\"\"\r\n glowna funkcja generujaca cala piosenke\r\n \r\n arg:\r\n numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca \r\n definicje kolejnych cwiercnut (co ma byc grane \r\n w danej cwiercnucie)\r\n \r\n bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest \r\n zapisana (nie jest, gdy tracki mialy nieodpowiednia \r\n liczbe wierszy lub kolumn)\r\n \r\n int: bpm - tempo piosenki w jednostce bpm\r\n \r\n int: freq - ilosc probek w jednej sekundzie\r\n \r\n list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1 \r\n probka, 2 etc.)\r\n \r\n float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na \r\n maxa, -1 - sciszamy na maxa\r\n \r\n wyjscie:\r\n numpy.ndarray (numpy.int16): gotowy utwór\r\n \r\n \"\"\"\r\n \r\n \r\n # macierz piosenki byla pusta, piosenka nie zostala utworzona\r\n if(czy_pelna == False):\r\n print(\"Nie utworzono piosenki\")\r\n return None \r\n \r\n else:\r\n \r\n import numpy as np\r\n import scipy.io.wavfile\r\n \r\n t_cwiercnuty = 60 / bpm # czas trwania jednej cwiercnuty (zalezy od \r\n #tempa)\r\n ile_cwiercnut = macierz_piosenki.shape[0] # ilosc cwiercnut\r\n kanaly = macierz_piosenki.shape[1] # ilosc uzywanych sampli\r\n frekw = freq\r\n czas_utworu = ile_cwiercnut*t_cwiercnuty\r\n # ile elementow bedzie w nowym utworze\r\n ilosc_probek = int(frekw*czas_utworu) \r\n \r\n # bedziemy tylko raz wczytywac zawartosc sampleXY.wav, wiec potrzebuje \r\n # unikalne numery sampli\r\n rozne_sample = np.unique(macierz_piosenki) # bierze lacznie z \"--\"\r\n \r\n # w slownikach zapiszemy parametry tych sampli\r\n # slownik z wartosciami danego sampla (tj. macierze numpy-owe z \r\n # amplitudami)\r\n sample_co = {} \r\n sample_frekw = {} # slownik z ich frekwencjami\r\n sample_dl = {} # slownik z ich dlugosciami\r\n \r\n #wczytujemy te sample\r\n # w iteratorze bierzemy napisy \"01\" \"02\" \"--\" itd. stringi!!!\r\n for ktory_sampel in rozne_sample: \r\n \r\n if(ktory_sampel != '--'):\r\n # tworzymy napis z nazwa pliku sampla, np. \"sample01.wav\"\r\n plik = ''.join(['sample',ktory_sampel,'.wav'])\r\n # wczytujemy zawartosc i frekwencje danego sampla do \r\n # odpowiednio nazwanego elementu w slowniku sample_co i \r\n # sample_frekw\r\n sample_frekw[ktory_sampel], sample_co[ktory_sampel] = \\\r\n scipy.io.wavfile.read(plik)\r\n # tworzymy mono z naszego sampla\r\n sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\\\r\n axis=1)/32767\r\n # normalizujemy te wartosci\r\n sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel]/ \\\r\n max(np.abs(sample_co[ktory_sampel])) * 32767)\r\n # zapisujemy dlugosc sampli, czyli ilosc probek \r\n # ( = czas_trwania*frekwencja)\r\n sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]\r\n \r\n else: # to samo robimy dla \"--\" recznie ustawiamy\r\n # robimy cisze, gdy --\r\n sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16) \r\n sample_frekw[ktory_sampel] = frekw # taka sama jak domyslna\r\n sample_dl[ktory_sampel] = 0 # zakladamy czas 0 sekund\r\n \r\n\r\n \r\n \r\n \r\n if wages is None:\r\n wages = np.ones((1,kanaly)) \r\n else:\r\n # zeby mialo wymiar (1,kanaly), a nie (kanaly,)\r\n wages = np.array(wages).reshape(1,kanaly) \r\n \r\n # definicja nowego utworu\r\n T = np.linspace(0, czas_utworu, ilosc_probek)\r\n \r\n for wiersz in range(0, ile_cwiercnut):\r\n\r\n sample = [] # wczytamy sample z danej cwiecnuty\r\n dlugosci = [] # tu zapiszemy ich dlugosci w tej cwiercnucie\r\n\r\n for i in range(0, kanaly):\r\n \r\n sampus = macierz_piosenki[wiersz,i]\r\n sample.append(sample_co[sampus]) \r\n dlugosci.append(sample_dl[sampus])\r\n\r\n \r\n # bierzemy najdluzszy sample i w calosci bedziemy go odtwarzac; \r\n # reszte zatem tez w calosci odtworzymy, a gdy sie skoncza damy \r\n # cisze (zera)\r\n maksik = max(dlugosci)\r\n # mamy tutaj macierz 4 na max dlugosc, przygotowana do zlaczenia \r\n # potem tych dzwiekow w jeden \r\n pusty = np.int16(np.zeros((len(sample), maksik)))\r\n\r\n # dodajemy nasze dzwieki do tej pustej\r\n for k in range(0, kanaly):\r\n pusty[k][0:dlugosci[k]] = sample[k]\r\n\r\n \r\n # mnozymy kolejne elementy wektora pusty (czyli sample) przez \r\n # wagi i sumujemy\r\n cwiercnuta = np.dot(wages, pusty) \r\n #otrzymamy wymiar (1, x), a chcemy (x,), wiec bierzemy pierwszy \r\n # element\r\n cwiercnuta = cwiercnuta[0]\r\n \r\n # poczatek biezacej cwiercnuty \r\n poczatek_cwiercnuty = int(wiersz*t_cwiercnuty*frekw)\r\n \r\n # jesli dodanie ostatnich cwiercnut bedzie wiazalo sie z \r\n # przekroczeniem dlugosci tworzonego utworu, obcinamy ostatnie \r\n # dzwieki, tak by zmiescic sie w tej dlugosci\r\n if (poczatek_cwiercnuty + maksik) > ilosc_probek:\r\n \r\n T[poczatek_cwiercnuty:(poczatek_cwiercnuty + maksik)]=\\\r\n cwiercnuta[0:len(T[poczatek_cwiercnuty:(poczatek_cwiercnuty +\\\r\n maksik)])]\r\n \r\n else:\r\n T[poczatek_cwiercnuty:(poczatek_cwiercnuty + maksik)] += \\\r\n cwiercnuta\r\n \r\n T= np.array(T, dtype=np.int16)\r\n \r\n #ustalamy glosnosc utworu\r\n T = zmiana_glosnosci(T, loud)\r\n\r\n return T\r\n\r\n#pios, k = wczytywanie_sciezek(a)\r\n#wierszyk = tworzenie_piosenki(pios, k, bpm = b['bpm'], freq = b['freq'], \\\r\n#wages = b['wages'])\r\n#wierszyk = tworzenie_piosenki(pios, k, **b)\r\n#wierszyk ",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def json_dump(obj, file_path):
with open(file_path, 'w') as f:
json.dump(obj, f)
<|reserved_special_token_0|>
def get_repo_path(file_path):
if os.path.isfile(file_path):
folder_path = os.path.abspath(os.path.join(file_path, os.pardir))
else:
folder_path = file_path
for i in range(100):
if folder_path == '/':
return None
if is_repo_path(folder_path):
break
folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))
return folder_path
<|reserved_special_token_0|>
class LineNumberTracker:
"""
When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,
"""
def __init__(self):
self._log = []
def transform(self, line_num):
for is_add, start, end in self._log:
if line_num < start:
pass
elif line_num < end and not is_add:
assert False, 'Line Deleted: {} {}'.format(line_num, self._log)
elif is_add:
line_num += end - start
else:
line_num -= end - start
return line_num
def remove_lines(self, start, end):
self._log.append((False, start, end))
def add_lines(self, start, end):
self._log.append((True, start, end))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def json_dump(obj, file_path):
with open(file_path, 'w') as f:
json.dump(obj, f)
<|reserved_special_token_0|>
def get_repo_path(file_path):
if os.path.isfile(file_path):
folder_path = os.path.abspath(os.path.join(file_path, os.pardir))
else:
folder_path = file_path
for i in range(100):
if folder_path == '/':
return None
if is_repo_path(folder_path):
break
folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))
return folder_path
def is_repo_path(path):
return os.path.isdir(path) and '.git' in os.listdir(path)
class LineNumberTracker:
"""
When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,
"""
def __init__(self):
self._log = []
def transform(self, line_num):
for is_add, start, end in self._log:
if line_num < start:
pass
elif line_num < end and not is_add:
assert False, 'Line Deleted: {} {}'.format(line_num, self._log)
elif is_add:
line_num += end - start
else:
line_num -= end - start
return line_num
def remove_lines(self, start, end):
self._log.append((False, start, end))
def add_lines(self, start, end):
self._log.append((True, start, end))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_json_if_exists(path):
if not os.path.isfile(path):
return {}
with open(path) as f:
return json.load(f)
def json_dump(obj, file_path):
with open(file_path, 'w') as f:
json.dump(obj, f)
def get_folder_paths(directory):
return [os.path.join(directory, f) for f in os.listdir(directory) if os
.path.isdir(os.path.join(directory, f))]
def file_to_lines(file_path):
if len(file_path) == 0:
return []
with open(file_path) as f:
lines = list(f.read().splitlines())
return lines
def get_repo_path(file_path):
if os.path.isfile(file_path):
folder_path = os.path.abspath(os.path.join(file_path, os.pardir))
else:
folder_path = file_path
for i in range(100):
if folder_path == '/':
return None
if is_repo_path(folder_path):
break
folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))
return folder_path
def is_repo_path(path):
return os.path.isdir(path) and '.git' in os.listdir(path)
class LineNumberTracker:
"""
When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,
"""
def __init__(self):
self._log = []
def transform(self, line_num):
for is_add, start, end in self._log:
if line_num < start:
pass
elif line_num < end and not is_add:
assert False, 'Line Deleted: {} {}'.format(line_num, self._log)
elif is_add:
line_num += end - start
else:
line_num -= end - start
return line_num
def remove_lines(self, start, end):
self._log.append((False, start, end))
def add_lines(self, start, end):
self._log.append((True, start, end))
<|reserved_special_token_1|>
import os
import json
def load_json_if_exists(path):
if not os.path.isfile(path):
return {}
with open(path) as f:
return json.load(f)
def json_dump(obj, file_path):
with open(file_path, 'w') as f:
json.dump(obj, f)
def get_folder_paths(directory):
return [os.path.join(directory, f) for f in os.listdir(directory) if os
.path.isdir(os.path.join(directory, f))]
def file_to_lines(file_path):
if len(file_path) == 0:
return []
with open(file_path) as f:
lines = list(f.read().splitlines())
return lines
def get_repo_path(file_path):
if os.path.isfile(file_path):
folder_path = os.path.abspath(os.path.join(file_path, os.pardir))
else:
folder_path = file_path
for i in range(100):
if folder_path == '/':
return None
if is_repo_path(folder_path):
break
folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))
return folder_path
def is_repo_path(path):
return os.path.isdir(path) and '.git' in os.listdir(path)
class LineNumberTracker:
"""
When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,
"""
def __init__(self):
self._log = []
def transform(self, line_num):
for is_add, start, end in self._log:
if line_num < start:
pass
elif line_num < end and not is_add:
assert False, 'Line Deleted: {} {}'.format(line_num, self._log)
elif is_add:
line_num += end - start
else:
line_num -= end - start
return line_num
def remove_lines(self, start, end):
self._log.append((False, start, end))
def add_lines(self, start, end):
self._log.append((True, start, end))
<|reserved_special_token_1|>
import os
import json
def load_json_if_exists(path):
if not os.path.isfile(path):
return {}
with open(path) as f:
return json.load(f)
def json_dump(obj, file_path):
with open(file_path, 'w') as f:
json.dump(obj, f)
def get_folder_paths(directory):
return [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isdir(os.path.join(directory, f))]
def file_to_lines(file_path):
if len(file_path) == 0:
return []
with open(file_path) as f:
lines = list(f.read().splitlines())
return lines
def get_repo_path(file_path):
if os.path.isfile(file_path):
folder_path = os.path.abspath(os.path.join(file_path, os.pardir))
else:
folder_path = file_path
for i in range(100):
if folder_path == '/':
return None
if is_repo_path(folder_path):
break
folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))
return folder_path
def is_repo_path(path):
return os.path.isdir(path) and '.git' in os.listdir(path)
class LineNumberTracker:
'''
When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,
'''
def __init__(self):
self._log = []
def transform(self, line_num):
for is_add, start, end in self._log:
if line_num < start:
pass
elif line_num < end and not is_add:
assert False, 'Line Deleted: {} {}'.format(line_num, self._log)
else:
if is_add:
line_num += (end - start)
else:
line_num -= (end - start)
return line_num
def remove_lines(self, start, end):
self._log.append((False, start, end))
def add_lines(self, start, end):
self._log.append((True, start, end))
|
flexible
|
{
"blob_id": "3788888a17e2598e781803f89cd63ac9c3219f59",
"index": 4341,
"step-1": "<mask token>\n\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\n\n<mask token>\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\n\n<mask token>\n\n\nclass LineNumberTracker:\n \"\"\"\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n \"\"\"\n\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n elif is_add:\n line_num += end - start\n else:\n line_num -= end - start\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n",
"step-2": "<mask token>\n\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\n\n<mask token>\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\n\ndef is_repo_path(path):\n return os.path.isdir(path) and '.git' in os.listdir(path)\n\n\nclass LineNumberTracker:\n \"\"\"\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n \"\"\"\n\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n elif is_add:\n line_num += end - start\n else:\n line_num -= end - start\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n",
"step-3": "<mask token>\n\n\ndef load_json_if_exists(path):\n if not os.path.isfile(path):\n return {}\n with open(path) as f:\n return json.load(f)\n\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\n\ndef get_folder_paths(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os\n .path.isdir(os.path.join(directory, f))]\n\n\ndef file_to_lines(file_path):\n if len(file_path) == 0:\n return []\n with open(file_path) as f:\n lines = list(f.read().splitlines())\n return lines\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\n\ndef is_repo_path(path):\n return os.path.isdir(path) and '.git' in os.listdir(path)\n\n\nclass LineNumberTracker:\n \"\"\"\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n \"\"\"\n\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n elif is_add:\n line_num += end - start\n else:\n line_num -= end - start\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n",
"step-4": "import os\nimport json\n\n\ndef load_json_if_exists(path):\n if not os.path.isfile(path):\n return {}\n with open(path) as f:\n return json.load(f)\n\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\n\ndef get_folder_paths(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os\n .path.isdir(os.path.join(directory, f))]\n\n\ndef file_to_lines(file_path):\n if len(file_path) == 0:\n return []\n with open(file_path) as f:\n lines = list(f.read().splitlines())\n return lines\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\n\ndef is_repo_path(path):\n return os.path.isdir(path) and '.git' in os.listdir(path)\n\n\nclass LineNumberTracker:\n \"\"\"\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n \"\"\"\n\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n elif is_add:\n line_num += end - start\n else:\n line_num -= end - start\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n",
"step-5": "import os\nimport json\n\n\ndef load_json_if_exists(path):\n if not os.path.isfile(path):\n return {}\n with open(path) as f:\n return json.load(f)\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\ndef get_folder_paths(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isdir(os.path.join(directory, f))]\n\n\ndef file_to_lines(file_path):\n if len(file_path) == 0:\n return []\n with open(file_path) as f:\n lines = list(f.read().splitlines())\n return lines\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\ndef is_repo_path(path):\n return os.path.isdir(path) and '.git' in os.listdir(path)\n\nclass LineNumberTracker:\n '''\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n '''\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n else:\n if is_add:\n line_num += (end - start)\n else:\n line_num -= (end - start)\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n\n\n",
"step-ids": [
8,
9,
12,
13,
14
]
}
|
[
8,
9,
12,
13,
14
] |
import weakref
from enum import Enum
from functools import partial
from typing import TYPE_CHECKING
import inflection
if TYPE_CHECKING:
from stake.client import StakeClient
camelcase = partial(inflection.camelize, uppercase_first_letter=False)
__all__ = ["SideEnum"]
class SideEnum(str, Enum):
BUY = "B"
SELL = "S"
class BaseClient:
# flake8: noqa
def __init__(self, client: "StakeClient"):
self._client = weakref.proxy(client)
|
normal
|
{
"blob_id": "f13ccbfb27788deca0d4f4b58a4e9e8c7e8e0306",
"index": 1644,
"step-1": "<mask token>\n\n\nclass SideEnum(str, Enum):\n BUY = 'B'\n SELL = 'S'\n\n\nclass BaseClient:\n\n def __init__(self, client: 'StakeClient'):\n self._client = weakref.proxy(client)\n",
"step-2": "<mask token>\nif TYPE_CHECKING:\n from stake.client import StakeClient\n<mask token>\n\n\nclass SideEnum(str, Enum):\n BUY = 'B'\n SELL = 'S'\n\n\nclass BaseClient:\n\n def __init__(self, client: 'StakeClient'):\n self._client = weakref.proxy(client)\n",
"step-3": "<mask token>\nif TYPE_CHECKING:\n from stake.client import StakeClient\ncamelcase = partial(inflection.camelize, uppercase_first_letter=False)\n__all__ = ['SideEnum']\n\n\nclass SideEnum(str, Enum):\n BUY = 'B'\n SELL = 'S'\n\n\nclass BaseClient:\n\n def __init__(self, client: 'StakeClient'):\n self._client = weakref.proxy(client)\n",
"step-4": "import weakref\nfrom enum import Enum\nfrom functools import partial\nfrom typing import TYPE_CHECKING\nimport inflection\nif TYPE_CHECKING:\n from stake.client import StakeClient\ncamelcase = partial(inflection.camelize, uppercase_first_letter=False)\n__all__ = ['SideEnum']\n\n\nclass SideEnum(str, Enum):\n BUY = 'B'\n SELL = 'S'\n\n\nclass BaseClient:\n\n def __init__(self, client: 'StakeClient'):\n self._client = weakref.proxy(client)\n",
"step-5": "import weakref\nfrom enum import Enum\nfrom functools import partial\nfrom typing import TYPE_CHECKING\n\nimport inflection\n\nif TYPE_CHECKING:\n from stake.client import StakeClient\n\ncamelcase = partial(inflection.camelize, uppercase_first_letter=False)\n\n__all__ = [\"SideEnum\"]\n\n\nclass SideEnum(str, Enum):\n BUY = \"B\"\n SELL = \"S\"\n\n\nclass BaseClient:\n # flake8: noqa\n def __init__(self, client: \"StakeClient\"):\n self._client = weakref.proxy(client)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# open a converted base to bits file and convert it back to the base sequences
seq2 = ''
with open('chr01.txt') as a:
while 1:
seq = a.read(2)
# print(seq)
seq = seq.replace('00', 'c').replace('01', 'g').replace('10', 'a').replace('11', 't')
seq2 += seq
if not seq:
break
print(len(seq2))
print(seq2)
|
normal
|
{
"blob_id": "c2f859e0ed0e812768dec04b2b1f9ddd349350f6",
"index": 9780,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('chr01.txt') as a:\n while 1:\n seq = a.read(2)\n seq = seq.replace('00', 'c').replace('01', 'g').replace('10', 'a'\n ).replace('11', 't')\n seq2 += seq\n if not seq:\n break\nprint(len(seq2))\nprint(seq2)\n",
"step-3": "seq2 = ''\nwith open('chr01.txt') as a:\n while 1:\n seq = a.read(2)\n seq = seq.replace('00', 'c').replace('01', 'g').replace('10', 'a'\n ).replace('11', 't')\n seq2 += seq\n if not seq:\n break\nprint(len(seq2))\nprint(seq2)\n",
"step-4": "# open a converted base to bits file and convert it back to the base sequences\n\nseq2 = ''\nwith open('chr01.txt') as a:\n while 1:\n seq = a.read(2)\n # print(seq)\n seq = seq.replace('00', 'c').replace('01', 'g').replace('10', 'a').replace('11', 't')\n seq2 += seq\n if not seq:\n break\n\nprint(len(seq2))\nprint(seq2)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def getfanyiInfo():
vocaben, rev_vocaben = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_fileen))
vocab_sizeen = len(vocaben)
vocabch, rev_vocabch = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_filech))
vocab_sizech = len(vocabch)
return vocab_sizeen, vocab_sizech, vocaben, vocabch
def createModel(session, forward_only, from_vocab_size, to_vocab_size):
model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size,
_buckets, hidden_size, num_layers, dropout, grad_clip, batch_size,
learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf
.float32)
ckpt = tf.train.latest_checkpoint(checkpoint_dir)
if ckpt != None:
model.saver.restore(session, ckpt)
else:
session.run(tf.global_variables_initializer())
return model
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tf.reset_default_graph
<|reserved_special_token_0|>
def getfanyiInfo():
vocaben, rev_vocaben = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_fileen))
vocab_sizeen = len(vocaben)
vocabch, rev_vocabch = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_filech))
vocab_sizech = len(vocabch)
return vocab_sizeen, vocab_sizech, vocaben, vocabch
def createModel(session, forward_only, from_vocab_size, to_vocab_size):
model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size,
_buckets, hidden_size, num_layers, dropout, grad_clip, batch_size,
learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf
.float32)
ckpt = tf.train.latest_checkpoint(checkpoint_dir)
if ckpt != None:
model.saver.restore(session, ckpt)
else:
session.run(tf.global_variables_initializer())
return model
def main():
vocab_sizeen, vocab_sizech, vocaben, rev_vocabch = getfanyiInfo()
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
with tf.Session() as sess:
model = createModel(sess, True, vocab_sizeen, vocab_sizech)
model.batch_size = 1
conversation_history = []
while True:
prompt = '请输入:'
sentence = input(prompt)
conversation_history.append(sentence)
conversation_history = conversation_history[-conversation_history:]
token_ids = list(reversed(datautil.sentence_to_ids(' '.join(
conversation_history), vocaben, normalize_digits=True, Isch
=True)))
bucket_id = min([b for b in range(len(_buckets)) if _buckets[b]
[0] > len(token_ids)])
encoder_inputs, decoder_inputs, target_weights = model.get_batch({
bucket_id: [(token_ids, [])]}, bucket_id)
_, _, output_logits = model.step(sess, encoder_inputs,
decoder_inputs, target_weights, bucket_id, True)
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits
]
if datautil.EOS_ID in outputs:
outputs = outputs[:outputs.index(datautil.EOS_ID)]
convo_output = ' '.join(datautil.ids2texts(outputs,
rev_vocabch))
conversation_history.append(convo_output)
else:
print('can not translation!')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_buckets = []
convo_hist_limit = 1
max_source_length = 1
max_target_length = 2
flags = tf.app.flags
FLAGS = flags.FLAGS
tf.reset_default_graph
max_train_data_size = 0
data_dir = 'datacn/'
dropout = 1.0
grad_clip = 5.0
batch_size = 60
hidden_size = 14
num_layers = 2
learning_rate = 0.5
lr_decay_factor = 0.99
checkpoint_dir = 'data/checkpoints/'
hidden_size = 100
checkpoint_dir = 'fanyichina/checkpoints/'
data_dir = 'fanyichina'
_buckets = [(20, 20), (40, 40), (50, 50), (60, 60)]
def getfanyiInfo():
vocaben, rev_vocaben = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_fileen))
vocab_sizeen = len(vocaben)
vocabch, rev_vocabch = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_filech))
vocab_sizech = len(vocabch)
return vocab_sizeen, vocab_sizech, vocaben, vocabch
def createModel(session, forward_only, from_vocab_size, to_vocab_size):
model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size,
_buckets, hidden_size, num_layers, dropout, grad_clip, batch_size,
learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf
.float32)
ckpt = tf.train.latest_checkpoint(checkpoint_dir)
if ckpt != None:
model.saver.restore(session, ckpt)
else:
session.run(tf.global_variables_initializer())
return model
def main():
vocab_sizeen, vocab_sizech, vocaben, rev_vocabch = getfanyiInfo()
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
with tf.Session() as sess:
model = createModel(sess, True, vocab_sizeen, vocab_sizech)
model.batch_size = 1
conversation_history = []
while True:
prompt = '请输入:'
sentence = input(prompt)
conversation_history.append(sentence)
conversation_history = conversation_history[-conversation_history:]
token_ids = list(reversed(datautil.sentence_to_ids(' '.join(
conversation_history), vocaben, normalize_digits=True, Isch
=True)))
bucket_id = min([b for b in range(len(_buckets)) if _buckets[b]
[0] > len(token_ids)])
encoder_inputs, decoder_inputs, target_weights = model.get_batch({
bucket_id: [(token_ids, [])]}, bucket_id)
_, _, output_logits = model.step(sess, encoder_inputs,
decoder_inputs, target_weights, bucket_id, True)
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits
]
if datautil.EOS_ID in outputs:
outputs = outputs[:outputs.index(datautil.EOS_ID)]
convo_output = ' '.join(datautil.ids2texts(outputs,
rev_vocabch))
conversation_history.append(convo_output)
else:
print('can not translation!')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import os
import numpy as np
import tensorflow as tf
from translate import datautil
import seq2seq_model
_buckets = []
convo_hist_limit = 1
max_source_length = 1
max_target_length = 2
flags = tf.app.flags
FLAGS = flags.FLAGS
tf.reset_default_graph
max_train_data_size = 0
data_dir = 'datacn/'
dropout = 1.0
grad_clip = 5.0
batch_size = 60
hidden_size = 14
num_layers = 2
learning_rate = 0.5
lr_decay_factor = 0.99
checkpoint_dir = 'data/checkpoints/'
hidden_size = 100
checkpoint_dir = 'fanyichina/checkpoints/'
data_dir = 'fanyichina'
_buckets = [(20, 20), (40, 40), (50, 50), (60, 60)]
def getfanyiInfo():
vocaben, rev_vocaben = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_fileen))
vocab_sizeen = len(vocaben)
vocabch, rev_vocabch = datautil.initialize_vocabulary(os.path.join(
datautil.data_dir, datautil.vocabulary_filech))
vocab_sizech = len(vocabch)
return vocab_sizeen, vocab_sizech, vocaben, vocabch
def createModel(session, forward_only, from_vocab_size, to_vocab_size):
model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size,
_buckets, hidden_size, num_layers, dropout, grad_clip, batch_size,
learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf
.float32)
ckpt = tf.train.latest_checkpoint(checkpoint_dir)
if ckpt != None:
model.saver.restore(session, ckpt)
else:
session.run(tf.global_variables_initializer())
return model
def main():
vocab_sizeen, vocab_sizech, vocaben, rev_vocabch = getfanyiInfo()
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
with tf.Session() as sess:
model = createModel(sess, True, vocab_sizeen, vocab_sizech)
model.batch_size = 1
conversation_history = []
while True:
prompt = '请输入:'
sentence = input(prompt)
conversation_history.append(sentence)
conversation_history = conversation_history[-conversation_history:]
token_ids = list(reversed(datautil.sentence_to_ids(' '.join(
conversation_history), vocaben, normalize_digits=True, Isch
=True)))
bucket_id = min([b for b in range(len(_buckets)) if _buckets[b]
[0] > len(token_ids)])
encoder_inputs, decoder_inputs, target_weights = model.get_batch({
bucket_id: [(token_ids, [])]}, bucket_id)
_, _, output_logits = model.step(sess, encoder_inputs,
decoder_inputs, target_weights, bucket_id, True)
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits
]
if datautil.EOS_ID in outputs:
outputs = outputs[:outputs.index(datautil.EOS_ID)]
convo_output = ' '.join(datautil.ids2texts(outputs,
rev_vocabch))
conversation_history.append(convo_output)
else:
print('can not translation!')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
import os
import numpy as np
import tensorflow as tf
from translate import datautil
import seq2seq_model
_buckets = []
convo_hist_limit = 1
max_source_length = 1
max_target_length = 2
flags = tf.app.flags
FLAGS = flags.FLAGS
tf.reset_default_graph
max_train_data_size = 0
data_dir = 'datacn/'
dropout = 1.0
grad_clip = 5.0
batch_size = 60
hidden_size = 14
num_layers = 2
learning_rate = 0.5
lr_decay_factor = 0.99
checkpoint_dir = 'data/checkpoints/'
hidden_size = 100
checkpoint_dir = 'fanyichina/checkpoints/'
data_dir = 'fanyichina'
_buckets = [(20, 20), (40, 40), (50, 50), (60, 60)]
def getfanyiInfo():
vocaben, rev_vocaben = datautil.initialize_vocabulary(
os.path.join(datautil.data_dir, datautil.vocabulary_fileen))
vocab_sizeen = len(vocaben)
vocabch, rev_vocabch = datautil.initialize_vocabulary(
os.path.join(datautil.data_dir, datautil.vocabulary_filech))
vocab_sizech = len(vocabch)
return vocab_sizeen, vocab_sizech, vocaben, vocabch
def createModel(session, forward_only, from_vocab_size, to_vocab_size):
model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size, _buckets, hidden_size, num_layers, dropout,
grad_clip, batch_size, learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf.float32)
ckpt = tf.train.latest_checkpoint(checkpoint_dir)
if ckpt != None:
model.saver.restore(session, ckpt)
else:
session.run(tf.global_variables_initializer())
return model
def main():
vocab_sizeen, vocab_sizech, vocaben, rev_vocabch = getfanyiInfo()
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
with tf.Session() as sess:
model = createModel(sess, True, vocab_sizeen, vocab_sizech)
model.batch_size = 1
conversation_history = []
while True:
prompt = '请输入:'
sentence = input(prompt)
conversation_history.append(sentence)
conversation_history = conversation_history[-conversation_history:]
token_ids = list(reversed(datautil.sentence_to_ids(
" ".join(conversation_history), vocaben, normalize_digits=True, Isch=True)))
bucket_id = min([b for b in range(len(_buckets))
if _buckets[b][0] > len(token_ids)])
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
_, _, output_logits = model.step(
sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, True)
outputs = [int(np.argmax(logit, axis=1))
for logit in output_logits]
if datautil.EOS_ID in outputs:
outputs = outputs[:outputs.index(datautil.EOS_ID)]
convo_output = " ".join(
datautil.ids2texts(outputs, rev_vocabch))
conversation_history.append(convo_output)
else:
print('can not translation!')
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "b7007778ea9dfac3af8c31d66d32d8157dc0d69b",
"index": 1517,
"step-1": "<mask token>\n\n\ndef getfanyiInfo():\n vocaben, rev_vocaben = datautil.initialize_vocabulary(os.path.join(\n datautil.data_dir, datautil.vocabulary_fileen))\n vocab_sizeen = len(vocaben)\n vocabch, rev_vocabch = datautil.initialize_vocabulary(os.path.join(\n datautil.data_dir, datautil.vocabulary_filech))\n vocab_sizech = len(vocabch)\n return vocab_sizeen, vocab_sizech, vocaben, vocabch\n\n\ndef createModel(session, forward_only, from_vocab_size, to_vocab_size):\n model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size,\n _buckets, hidden_size, num_layers, dropout, grad_clip, batch_size,\n learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf\n .float32)\n ckpt = tf.train.latest_checkpoint(checkpoint_dir)\n if ckpt != None:\n model.saver.restore(session, ckpt)\n else:\n session.run(tf.global_variables_initializer())\n return model\n\n\n<mask token>\n",
"step-2": "<mask token>\ntf.reset_default_graph\n<mask token>\n\n\ndef getfanyiInfo():\n vocaben, rev_vocaben = datautil.initialize_vocabulary(os.path.join(\n datautil.data_dir, datautil.vocabulary_fileen))\n vocab_sizeen = len(vocaben)\n vocabch, rev_vocabch = datautil.initialize_vocabulary(os.path.join(\n datautil.data_dir, datautil.vocabulary_filech))\n vocab_sizech = len(vocabch)\n return vocab_sizeen, vocab_sizech, vocaben, vocabch\n\n\ndef createModel(session, forward_only, from_vocab_size, to_vocab_size):\n model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size,\n _buckets, hidden_size, num_layers, dropout, grad_clip, batch_size,\n learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf\n .float32)\n ckpt = tf.train.latest_checkpoint(checkpoint_dir)\n if ckpt != None:\n model.saver.restore(session, ckpt)\n else:\n session.run(tf.global_variables_initializer())\n return model\n\n\ndef main():\n vocab_sizeen, vocab_sizech, vocaben, rev_vocabch = getfanyiInfo()\n if not os.path.exists(checkpoint_dir):\n os.mkdir(checkpoint_dir)\n with tf.Session() as sess:\n model = createModel(sess, True, vocab_sizeen, vocab_sizech)\n model.batch_size = 1\n conversation_history = []\n while True:\n prompt = '请输入:'\n sentence = input(prompt)\n conversation_history.append(sentence)\n conversation_history = conversation_history[-conversation_history:]\n token_ids = list(reversed(datautil.sentence_to_ids(' '.join(\n conversation_history), vocaben, normalize_digits=True, Isch\n =True)))\n bucket_id = min([b for b in range(len(_buckets)) if _buckets[b]\n [0] > len(token_ids)])\n encoder_inputs, decoder_inputs, target_weights = model.get_batch({\n bucket_id: [(token_ids, [])]}, bucket_id)\n _, _, output_logits = model.step(sess, encoder_inputs,\n decoder_inputs, target_weights, bucket_id, True)\n outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits\n ]\n if datautil.EOS_ID in outputs:\n outputs = outputs[:outputs.index(datautil.EOS_ID)]\n convo_output = ' '.join(datautil.ids2texts(outputs,\n rev_vocabch))\n conversation_history.append(convo_output)\n else:\n print('can not translation!')\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\n_buckets = []\nconvo_hist_limit = 1\nmax_source_length = 1\nmax_target_length = 2\nflags = tf.app.flags\nFLAGS = flags.FLAGS\ntf.reset_default_graph\nmax_train_data_size = 0\ndata_dir = 'datacn/'\ndropout = 1.0\ngrad_clip = 5.0\nbatch_size = 60\nhidden_size = 14\nnum_layers = 2\nlearning_rate = 0.5\nlr_decay_factor = 0.99\ncheckpoint_dir = 'data/checkpoints/'\nhidden_size = 100\ncheckpoint_dir = 'fanyichina/checkpoints/'\ndata_dir = 'fanyichina'\n_buckets = [(20, 20), (40, 40), (50, 50), (60, 60)]\n\n\ndef getfanyiInfo():\n vocaben, rev_vocaben = datautil.initialize_vocabulary(os.path.join(\n datautil.data_dir, datautil.vocabulary_fileen))\n vocab_sizeen = len(vocaben)\n vocabch, rev_vocabch = datautil.initialize_vocabulary(os.path.join(\n datautil.data_dir, datautil.vocabulary_filech))\n vocab_sizech = len(vocabch)\n return vocab_sizeen, vocab_sizech, vocaben, vocabch\n\n\ndef createModel(session, forward_only, from_vocab_size, to_vocab_size):\n model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size,\n _buckets, hidden_size, num_layers, dropout, grad_clip, batch_size,\n learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf\n .float32)\n ckpt = tf.train.latest_checkpoint(checkpoint_dir)\n if ckpt != None:\n model.saver.restore(session, ckpt)\n else:\n session.run(tf.global_variables_initializer())\n return model\n\n\ndef main():\n vocab_sizeen, vocab_sizech, vocaben, rev_vocabch = getfanyiInfo()\n if not os.path.exists(checkpoint_dir):\n os.mkdir(checkpoint_dir)\n with tf.Session() as sess:\n model = createModel(sess, True, vocab_sizeen, vocab_sizech)\n model.batch_size = 1\n conversation_history = []\n while True:\n prompt = '请输入:'\n sentence = input(prompt)\n conversation_history.append(sentence)\n conversation_history = conversation_history[-conversation_history:]\n token_ids = list(reversed(datautil.sentence_to_ids(' '.join(\n conversation_history), vocaben, normalize_digits=True, Isch\n =True)))\n bucket_id = min([b for b in range(len(_buckets)) if _buckets[b]\n [0] > len(token_ids)])\n encoder_inputs, decoder_inputs, target_weights = model.get_batch({\n bucket_id: [(token_ids, [])]}, bucket_id)\n _, _, output_logits = model.step(sess, encoder_inputs,\n decoder_inputs, target_weights, bucket_id, True)\n outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits\n ]\n if datautil.EOS_ID in outputs:\n outputs = outputs[:outputs.index(datautil.EOS_ID)]\n convo_output = ' '.join(datautil.ids2texts(outputs,\n rev_vocabch))\n conversation_history.append(convo_output)\n else:\n print('can not translation!')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nimport numpy as np\nimport tensorflow as tf\nfrom translate import datautil\nimport seq2seq_model\n_buckets = []\nconvo_hist_limit = 1\nmax_source_length = 1\nmax_target_length = 2\nflags = tf.app.flags\nFLAGS = flags.FLAGS\ntf.reset_default_graph\nmax_train_data_size = 0\ndata_dir = 'datacn/'\ndropout = 1.0\ngrad_clip = 5.0\nbatch_size = 60\nhidden_size = 14\nnum_layers = 2\nlearning_rate = 0.5\nlr_decay_factor = 0.99\ncheckpoint_dir = 'data/checkpoints/'\nhidden_size = 100\ncheckpoint_dir = 'fanyichina/checkpoints/'\ndata_dir = 'fanyichina'\n_buckets = [(20, 20), (40, 40), (50, 50), (60, 60)]\n\n\ndef getfanyiInfo():\n vocaben, rev_vocaben = datautil.initialize_vocabulary(os.path.join(\n datautil.data_dir, datautil.vocabulary_fileen))\n vocab_sizeen = len(vocaben)\n vocabch, rev_vocabch = datautil.initialize_vocabulary(os.path.join(\n datautil.data_dir, datautil.vocabulary_filech))\n vocab_sizech = len(vocabch)\n return vocab_sizeen, vocab_sizech, vocaben, vocabch\n\n\ndef createModel(session, forward_only, from_vocab_size, to_vocab_size):\n model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size,\n _buckets, hidden_size, num_layers, dropout, grad_clip, batch_size,\n learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf\n .float32)\n ckpt = tf.train.latest_checkpoint(checkpoint_dir)\n if ckpt != None:\n model.saver.restore(session, ckpt)\n else:\n session.run(tf.global_variables_initializer())\n return model\n\n\ndef main():\n vocab_sizeen, vocab_sizech, vocaben, rev_vocabch = getfanyiInfo()\n if not os.path.exists(checkpoint_dir):\n os.mkdir(checkpoint_dir)\n with tf.Session() as sess:\n model = createModel(sess, True, vocab_sizeen, vocab_sizech)\n model.batch_size = 1\n conversation_history = []\n while True:\n prompt = '请输入:'\n sentence = input(prompt)\n conversation_history.append(sentence)\n conversation_history = conversation_history[-conversation_history:]\n token_ids = list(reversed(datautil.sentence_to_ids(' '.join(\n conversation_history), vocaben, normalize_digits=True, Isch\n =True)))\n bucket_id = min([b for b in range(len(_buckets)) if _buckets[b]\n [0] > len(token_ids)])\n encoder_inputs, decoder_inputs, target_weights = model.get_batch({\n bucket_id: [(token_ids, [])]}, bucket_id)\n _, _, output_logits = model.step(sess, encoder_inputs,\n decoder_inputs, target_weights, bucket_id, True)\n outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits\n ]\n if datautil.EOS_ID in outputs:\n outputs = outputs[:outputs.index(datautil.EOS_ID)]\n convo_output = ' '.join(datautil.ids2texts(outputs,\n rev_vocabch))\n conversation_history.append(convo_output)\n else:\n print('can not translation!')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# -*- coding:utf-8 -*-\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom translate import datautil\nimport seq2seq_model\n\n_buckets = []\nconvo_hist_limit = 1\nmax_source_length = 1\nmax_target_length = 2\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\ntf.reset_default_graph\n\nmax_train_data_size = 0\n\ndata_dir = 'datacn/'\n\ndropout = 1.0\ngrad_clip = 5.0\nbatch_size = 60\nhidden_size = 14\nnum_layers = 2\nlearning_rate = 0.5\nlr_decay_factor = 0.99\n\ncheckpoint_dir = 'data/checkpoints/'\n\nhidden_size = 100\ncheckpoint_dir = 'fanyichina/checkpoints/'\ndata_dir = 'fanyichina'\n_buckets = [(20, 20), (40, 40), (50, 50), (60, 60)]\n\n\ndef getfanyiInfo():\n vocaben, rev_vocaben = datautil.initialize_vocabulary(\n os.path.join(datautil.data_dir, datautil.vocabulary_fileen))\n vocab_sizeen = len(vocaben)\n vocabch, rev_vocabch = datautil.initialize_vocabulary(\n os.path.join(datautil.data_dir, datautil.vocabulary_filech))\n vocab_sizech = len(vocabch)\n return vocab_sizeen, vocab_sizech, vocaben, vocabch\n\n\ndef createModel(session, forward_only, from_vocab_size, to_vocab_size):\n model = seq2seq_model.Seq2SeqModel(from_vocab_size, to_vocab_size, _buckets, hidden_size, num_layers, dropout,\n grad_clip, batch_size, learning_rate, lr_decay_factor, forward_only=forward_only, dtype=tf.float32)\n ckpt = tf.train.latest_checkpoint(checkpoint_dir)\n if ckpt != None:\n model.saver.restore(session, ckpt)\n else:\n session.run(tf.global_variables_initializer())\n return model\n\n\ndef main():\n vocab_sizeen, vocab_sizech, vocaben, rev_vocabch = getfanyiInfo()\n if not os.path.exists(checkpoint_dir):\n os.mkdir(checkpoint_dir)\n with tf.Session() as sess:\n model = createModel(sess, True, vocab_sizeen, vocab_sizech)\n model.batch_size = 1\n conversation_history = []\n while True:\n prompt = '请输入:'\n sentence = input(prompt)\n conversation_history.append(sentence)\n conversation_history = conversation_history[-conversation_history:]\n\n token_ids = list(reversed(datautil.sentence_to_ids(\n \" \".join(conversation_history), vocaben, normalize_digits=True, Isch=True)))\n bucket_id = min([b for b in range(len(_buckets))\n if _buckets[b][0] > len(token_ids)])\n\n encoder_inputs, decoder_inputs, target_weights = model.get_batch(\n {bucket_id: [(token_ids, [])]}, bucket_id)\n _, _, output_logits = model.step(\n sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, True)\n outputs = [int(np.argmax(logit, axis=1))\n for logit in output_logits]\n if datautil.EOS_ID in outputs:\n outputs = outputs[:outputs.index(datautil.EOS_ID)]\n convo_output = \" \".join(\n datautil.ids2texts(outputs, rev_vocabch))\n conversation_history.append(convo_output)\n else:\n print('can not translation!')\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class TestVideoMethods(luna.TestBase):
def vlog(self, message):
if VERBOSE_LOG:
print(message)
def setUp(self):
self.vlog('setUp')
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog('register ' + pid)
luna.call(API_URL + 'register', {'context': pid})
self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':
True})
def tearDown(self):
self.vlog('tearDown')
for sink in SINK_LIST:
self.vlog('disconnect ' + sink)
luna.call(API_URL + 'disconnect', {'sink': sink})
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog('unregister ' + pid)
luna.call(API_URL + 'unregister', {'context': pid})
luna.cancelSubscribe(self.statusSub)
<|reserved_special_token_0|>
def mute(self, sink, blank):
self.vlog('- Mute' + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {
'video': [{'sink': sink, 'muted': blank}]})
def disconnect(self, sink, pid):
self.vlog('disconnect ' + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'disconnect', {'sink': sink}, self.statusSub, {'video': [{
'sink': sink, 'connectedSource': None}]})
def testConnectDisconnect(self):
print('[testConnectDisconnect]')
for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():
for port in ports:
for sink in SINK_LIST:
for i in range(3):
self.connect(sink, source, port, '')
self.disconnect(sink, '')
<|reserved_special_token_0|>
def testMute(self):
print('[testMute]')
for sink in SINK_LIST:
self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')
for blank in [False, True]:
self.mute(sink, blank)
<|reserved_special_token_0|>
def testSetVideoDataAndDisplayWindow(self):
print('[testSetVideoDataAndDisplayWindow]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',
'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},
self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen':
False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,
'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen':
False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[
'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},
'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],
'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.
statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,
'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate':
29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'
], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},
'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],
'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetFullscreen(self):
print('[testSetFullscreen]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',
'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},
self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen':
False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,
'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen':
True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,
'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':
SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y':
0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},
'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}
)
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetCompositing(self):
print('[testSetCompositing]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
if len(SINK_LIST) > 1:
self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,
'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,
'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',
'opacity': 20, 'zOrder': 1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen':
True, 'opacity': 130}, self.statusSub, {'video': [{'sink':
SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':
True, 'opacity': 200}, self.statusSub, {'video': [{'sink':
'SUB0', 'opacity': 200, 'zOrder': 0}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':
True, 'opacity': 230}, self.statusSub, {'video': [{'sink':
'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',
'opacity': 230, 'zOrder': 1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':
True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':
[{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':
'SUB0', 'opacity': 30, 'zOrder': 1}]})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestVideoMethods(luna.TestBase):
def vlog(self, message):
if VERBOSE_LOG:
print(message)
def setUp(self):
self.vlog('setUp')
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog('register ' + pid)
luna.call(API_URL + 'register', {'context': pid})
self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':
True})
def tearDown(self):
self.vlog('tearDown')
for sink in SINK_LIST:
self.vlog('disconnect ' + sink)
luna.call(API_URL + 'disconnect', {'sink': sink})
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog('unregister ' + pid)
luna.call(API_URL + 'unregister', {'context': pid})
luna.cancelSubscribe(self.statusSub)
def connect(self, sink, source, port, pid):
self.vlog('connect ' + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + 'connect',
{'outputMode': 'DISPLAY', 'sink': sink, 'source': source,
'sourcePort': port}, self.statusSub, {'video': [{'sink': sink,
'connectedSource': source, 'connectedSourcePort': port}]})
def mute(self, sink, blank):
self.vlog('- Mute' + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {
'video': [{'sink': sink, 'muted': blank}]})
def disconnect(self, sink, pid):
self.vlog('disconnect ' + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'disconnect', {'sink': sink}, self.statusSub, {'video': [{
'sink': sink, 'connectedSource': None}]})
def testConnectDisconnect(self):
print('[testConnectDisconnect]')
for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():
for port in ports:
for sink in SINK_LIST:
for i in range(3):
self.connect(sink, source, port, '')
self.disconnect(sink, '')
def testDualConnect(self):
print('[testDualConnect]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'connect', {'outputMode': 'DISPLAY', 'sink': SINK_SUB,
'source': SOURCE_NAME, 'sourcePort': SOURCE_PORT}, self.
statusSub, {'video': [{'sink': SINK_MAIN, 'connectedSource':
SOURCE_NAME, 'connectedSourcePort': SOURCE_PORT}, {'sink':
SINK_SUB, 'connectedSource': SOURCE_NAME,
'connectedSourcePort': SOURCE_PORT}]})
self.disconnect(SINK_MAIN, '')
if len(SINK_LIST) > 1:
self.disconnect(SINK_SUB, '')
def testMute(self):
print('[testMute]')
for sink in SINK_LIST:
self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')
for blank in [False, True]:
self.mute(sink, blank)
def testSetDisplayWindowAndVideoData(self):
print('[testSetDisplayWindowAndVideoData]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen':
False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[
'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},
'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],
'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.
statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,
'width': 0, 'height': 0, 'frameRate': 0, 'sourceInput': {'x': 0,
'y': 0, 'width': 0, 'height': 0}, 'displayOutput': {'x':
OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT[
'W'], 'height': OUTPUT_RECT['H']}}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',
'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},
self.statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,
'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate':
29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,
'height': SOURCE_HEIGHT}, 'displayOutput': {'x': OUTPUT_RECT[
'X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT['W'],
'height': OUTPUT_RECT['H']}}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetVideoDataAndDisplayWindow(self):
print('[testSetVideoDataAndDisplayWindow]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',
'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},
self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen':
False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,
'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen':
False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[
'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},
'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],
'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.
statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,
'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate':
29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'
], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},
'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],
'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetFullscreen(self):
print('[testSetFullscreen]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',
'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},
self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen':
False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,
'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen':
True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,
'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':
SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y':
0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},
'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}
)
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetCompositing(self):
print('[testSetCompositing]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
if len(SINK_LIST) > 1:
self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,
'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,
'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',
'opacity': 20, 'zOrder': 1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen':
True, 'opacity': 130}, self.statusSub, {'video': [{'sink':
SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':
True, 'opacity': 200}, self.statusSub, {'video': [{'sink':
'SUB0', 'opacity': 200, 'zOrder': 0}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':
True, 'opacity': 230}, self.statusSub, {'video': [{'sink':
'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',
'opacity': 230, 'zOrder': 1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':
True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':
[{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':
'SUB0', 'opacity': 30, 'zOrder': 1}]})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestVideoMethods(luna.TestBase):
def vlog(self, message):
if VERBOSE_LOG:
print(message)
def setUp(self):
self.vlog('setUp')
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog('register ' + pid)
luna.call(API_URL + 'register', {'context': pid})
self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':
True})
def tearDown(self):
self.vlog('tearDown')
for sink in SINK_LIST:
self.vlog('disconnect ' + sink)
luna.call(API_URL + 'disconnect', {'sink': sink})
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog('unregister ' + pid)
luna.call(API_URL + 'unregister', {'context': pid})
luna.cancelSubscribe(self.statusSub)
def connect(self, sink, source, port, pid):
self.vlog('connect ' + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + 'connect',
{'outputMode': 'DISPLAY', 'sink': sink, 'source': source,
'sourcePort': port}, self.statusSub, {'video': [{'sink': sink,
'connectedSource': source, 'connectedSourcePort': port}]})
def mute(self, sink, blank):
self.vlog('- Mute' + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {
'video': [{'sink': sink, 'muted': blank}]})
def disconnect(self, sink, pid):
self.vlog('disconnect ' + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'disconnect', {'sink': sink}, self.statusSub, {'video': [{
'sink': sink, 'connectedSource': None}]})
def testConnectDisconnect(self):
print('[testConnectDisconnect]')
for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():
for port in ports:
for sink in SINK_LIST:
for i in range(3):
self.connect(sink, source, port, '')
self.disconnect(sink, '')
def testDualConnect(self):
print('[testDualConnect]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'connect', {'outputMode': 'DISPLAY', 'sink': SINK_SUB,
'source': SOURCE_NAME, 'sourcePort': SOURCE_PORT}, self.
statusSub, {'video': [{'sink': SINK_MAIN, 'connectedSource':
SOURCE_NAME, 'connectedSourcePort': SOURCE_PORT}, {'sink':
SINK_SUB, 'connectedSource': SOURCE_NAME,
'connectedSourcePort': SOURCE_PORT}]})
self.disconnect(SINK_MAIN, '')
if len(SINK_LIST) > 1:
self.disconnect(SINK_SUB, '')
def testMute(self):
print('[testMute]')
for sink in SINK_LIST:
self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')
for blank in [False, True]:
self.mute(sink, blank)
def testSetDisplayWindowAndVideoData(self):
print('[testSetDisplayWindowAndVideoData]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen':
False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[
'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},
'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],
'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.
statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,
'width': 0, 'height': 0, 'frameRate': 0, 'sourceInput': {'x': 0,
'y': 0, 'width': 0, 'height': 0}, 'displayOutput': {'x':
OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT[
'W'], 'height': OUTPUT_RECT['H']}}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',
'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},
self.statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,
'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate':
29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,
'height': SOURCE_HEIGHT}, 'displayOutput': {'x': OUTPUT_RECT[
'X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT['W'],
'height': OUTPUT_RECT['H']}}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetVideoDataAndDisplayWindow(self):
print('[testSetVideoDataAndDisplayWindow]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',
'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},
self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen':
False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,
'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen':
False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[
'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},
'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],
'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.
statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,
'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate':
29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'
], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},
'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],
'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetFullscreen(self):
print('[testSetFullscreen]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',
'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},
self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen':
False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,
'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen':
True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,
'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':
SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y':
0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},
'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}
)
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetCompositing(self):
print('[testSetCompositing]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
if len(SINK_LIST) > 1:
self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,
'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,
'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',
'opacity': 20, 'zOrder': 1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen':
True, 'opacity': 130}, self.statusSub, {'video': [{'sink':
SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':
True, 'opacity': 200}, self.statusSub, {'video': [{'sink':
'SUB0', 'opacity': 200, 'zOrder': 0}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':
True, 'opacity': 230}, self.statusSub, {'video': [{'sink':
'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',
'opacity': 230, 'zOrder': 1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':
True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':
[{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':
'SUB0', 'opacity': 30, 'zOrder': 1}]})
if __name__ == '__main__':
luna.VERBOSE = False
unittest.main()
<|reserved_special_token_1|>
import unittest
import luna_utils as luna
import time
API_URL = 'com.webos.service.videooutput/'
VERBOSE_LOG = True
SUPPORT_REGISTER = False
SINK_MAIN = 'MAIN'
SINK_SUB = 'SUB0'
SINK_LIST = [SINK_MAIN]
PID1 = 'pipeline1'
PID2 = 'pipeline2'
PID_LIST = [PID1, PID2]
INPUT_RECT = {'X': 0, 'Y': 0, 'W': 1920, 'H': 1080}
OUTPUT_RECT = {'X': 400, 'Y': 400, 'W': 1920, 'H': 1080}
SOURCE_NAME = 'HDMI'
SOURCE_PORT = 3
SOURCE_WIDTH = 1920
SOURCE_HEIGHT = 1080
SLEEP_TIME = 1
class TestVideoMethods(luna.TestBase):
def vlog(self, message):
if VERBOSE_LOG:
print(message)
def setUp(self):
self.vlog('setUp')
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog('register ' + pid)
luna.call(API_URL + 'register', {'context': pid})
self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':
True})
def tearDown(self):
self.vlog('tearDown')
for sink in SINK_LIST:
self.vlog('disconnect ' + sink)
luna.call(API_URL + 'disconnect', {'sink': sink})
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog('unregister ' + pid)
luna.call(API_URL + 'unregister', {'context': pid})
luna.cancelSubscribe(self.statusSub)
def connect(self, sink, source, port, pid):
self.vlog('connect ' + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + 'connect',
{'outputMode': 'DISPLAY', 'sink': sink, 'source': source,
'sourcePort': port}, self.statusSub, {'video': [{'sink': sink,
'connectedSource': source, 'connectedSourcePort': port}]})
def mute(self, sink, blank):
self.vlog('- Mute' + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {
'video': [{'sink': sink, 'muted': blank}]})
def disconnect(self, sink, pid):
self.vlog('disconnect ' + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'disconnect', {'sink': sink}, self.statusSub, {'video': [{
'sink': sink, 'connectedSource': None}]})
def testConnectDisconnect(self):
print('[testConnectDisconnect]')
for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():
for port in ports:
for sink in SINK_LIST:
for i in range(3):
self.connect(sink, source, port, '')
self.disconnect(sink, '')
def testDualConnect(self):
print('[testDualConnect]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'connect', {'outputMode': 'DISPLAY', 'sink': SINK_SUB,
'source': SOURCE_NAME, 'sourcePort': SOURCE_PORT}, self.
statusSub, {'video': [{'sink': SINK_MAIN, 'connectedSource':
SOURCE_NAME, 'connectedSourcePort': SOURCE_PORT}, {'sink':
SINK_SUB, 'connectedSource': SOURCE_NAME,
'connectedSourcePort': SOURCE_PORT}]})
self.disconnect(SINK_MAIN, '')
if len(SINK_LIST) > 1:
self.disconnect(SINK_SUB, '')
def testMute(self):
print('[testMute]')
for sink in SINK_LIST:
self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')
for blank in [False, True]:
self.mute(sink, blank)
def testSetDisplayWindowAndVideoData(self):
print('[testSetDisplayWindowAndVideoData]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen':
False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[
'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},
'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],
'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.
statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,
'width': 0, 'height': 0, 'frameRate': 0, 'sourceInput': {'x': 0,
'y': 0, 'width': 0, 'height': 0}, 'displayOutput': {'x':
OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT[
'W'], 'height': OUTPUT_RECT['H']}}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',
'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},
self.statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,
'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate':
29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,
'height': SOURCE_HEIGHT}, 'displayOutput': {'x': OUTPUT_RECT[
'X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT['W'],
'height': OUTPUT_RECT['H']}}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetVideoDataAndDisplayWindow(self):
print('[testSetVideoDataAndDisplayWindow]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',
'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},
self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen':
False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,
'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen':
False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[
'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},
'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],
'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.
statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,
'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate':
29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'
], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},
'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],
'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetFullscreen(self):
print('[testSetFullscreen]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',
'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},
self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen':
False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,
'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,
'height': 0}}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen':
True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,
'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':
SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':
SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y':
0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},
'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}
)
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetCompositing(self):
print('[testSetCompositing]')
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')
if len(SINK_LIST) > 1:
self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,
'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,
'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',
'opacity': 20, 'zOrder': 1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen':
True, 'opacity': 130}, self.statusSub, {'video': [{'sink':
SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':
True, 'opacity': 200}, self.statusSub, {'video': [{'sink':
'SUB0', 'opacity': 200, 'zOrder': 0}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':
True, 'opacity': 230}, self.statusSub, {'video': [{'sink':
'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',
'opacity': 230, 'zOrder': 1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +
'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':
True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':
[{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':
'SUB0', 'opacity': 30, 'zOrder': 1}]})
if __name__ == '__main__':
luna.VERBOSE = False
unittest.main()
<|reserved_special_token_1|>
#!/usr/bin/python2
import unittest
import luna_utils as luna
import time
API_URL = "com.webos.service.videooutput/"
VERBOSE_LOG = True
SUPPORT_REGISTER = False
SINK_MAIN = "MAIN"
SINK_SUB = "SUB0"
#TODO(ekwang): If you connect SUB, HAL error occurs. Just test MAIN in the current state
#SINK_LIST = [SINK_MAIN, SINK_SUB]
SINK_LIST = [SINK_MAIN]
PID1 = "pipeline1"
PID2 = "pipeline2"
PID_LIST = [PID1, PID2]
INPUT_RECT = {'X':0, 'Y':0, 'W':1920, 'H':1080}
OUTPUT_RECT = {'X':400, 'Y':400, 'W':1920, 'H':1080}
#Choose source type VDEC or HDMI for test input
#SOURCE_NAME = SOURCE_NAME
#SOURCE_PORT = 0
SOURCE_NAME = "HDMI"
SOURCE_PORT = 3
SOURCE_WIDTH = 1920
SOURCE_HEIGHT = 1080
SLEEP_TIME = 1
class TestVideoMethods(luna.TestBase):
def vlog(self, message):
if VERBOSE_LOG:
print(message)
def setUp(self):
self.vlog("setUp")
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog("register " + pid)
luna.call(API_URL + "register", { "context": pid })
self.statusSub = luna.subscribe(API_URL + "getStatus", {"subscribe":True})
def tearDown(self):
self.vlog("tearDown")
for sink in SINK_LIST:
self.vlog("disconnect " + sink)
luna.call(API_URL + "disconnect", { "sink": sink })
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog("unregister " + pid)
luna.call(API_URL + "unregister", { "context": pid })
luna.cancelSubscribe(self.statusSub)
def connect(self, sink, source, port, pid):
self.vlog("connect " + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + "connect",
{ "outputMode": "DISPLAY", "sink": sink, "source": source, "sourcePort": port },
self.statusSub,
{"video":[{"sink": sink, "connectedSource": source, "connectedSourcePort": port}]})
def mute(self, sink, blank):
self.vlog("- Mute" + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "blankVideo",
{"sink": sink, "blank": blank},
self.statusSub,
{"video":[{"sink": sink, "muted": blank}]})
def disconnect(self, sink, pid):
self.vlog("disconnect " + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + "disconnect", { "sink": sink },
self.statusSub,
{"video": [{"sink": sink, "connectedSource": None}]})
def testConnectDisconnect(self):
print("[testConnectDisconnect]")
for source, ports in {"VDEC":[0,1], "HDMI":[0,1,2]}.iteritems():
for port in ports:
for sink in SINK_LIST:
for i in range(3):
self.connect(sink, source, port, "")
self.disconnect(sink, "")
def testDualConnect(self):
print("[testDualConnect]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + "connect",
{"outputMode": "DISPLAY", "sink": SINK_SUB, "source": SOURCE_NAME, "sourcePort": SOURCE_PORT},
self.statusSub,
{"video": [{"sink": SINK_MAIN, "connectedSource": SOURCE_NAME, "connectedSourcePort": SOURCE_PORT},
{"sink": SINK_SUB, "connectedSource": SOURCE_NAME, "connectedSourcePort": SOURCE_PORT}]})
self.disconnect(SINK_MAIN, "")
if len(SINK_LIST) > 1:
self.disconnect(SINK_SUB, "")
def testMute(self):
print("[testMute]")
for sink in SINK_LIST:
self.connect(sink, SOURCE_NAME, SOURCE_PORT, "")
for blank in [False, True]:
self.mute(sink, blank)
#test different orders of display window and media data
def testSetDisplayWindowAndVideoData(self):
print("[testSetDisplayWindowAndVideoData]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_MAIN,
"fullScreen": False,
"sourceInput": {"x":INPUT_RECT['X'], "y":INPUT_RECT['Y'], "width":INPUT_RECT['W'], "height":INPUT_RECT['H']},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}},
self.statusSub,
{"video":[{"sink": "MAIN",
"fullScreen": False,
"width":0,
"height":0,
"frameRate":0,
"sourceInput": {"x":0, "y":0, "width":0, "height":0}, # no media data yet so can't determine appliedsourceInput yet
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}
}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "setVideoData",
{"sink": SINK_MAIN,
"contentType": "media",
"frameRate":29.5,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"scanType":"progressive",
"adaptive": False},
self.statusSub,
{"video":[{"sink": "MAIN",
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":SOURCE_WIDTH, "height":SOURCE_HEIGHT},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}
}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetVideoDataAndDisplayWindow(self):
print("[testSetVideoDataAndDisplayWindow]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "setVideoData",
{"sink": SINK_MAIN,
"contentType": "media",
"frameRate":29.5,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"scanType":"progressive",
"adaptive": False},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":0, "height":0},
"displayOutput": {"x":0, "y":0, "width":0, "height":0}
}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": "MAIN",
"fullScreen": False,
"sourceInput": {"x":INPUT_RECT['X'], "y":INPUT_RECT['Y'], "width":INPUT_RECT['W'], "height":INPUT_RECT['H']},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":INPUT_RECT['X'], "y":INPUT_RECT['Y'], "width":INPUT_RECT['W'], "height":INPUT_RECT['H']},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}
}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetFullscreen(self):
print("[testSetFullscreen]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "setVideoData",
{"sink": SINK_MAIN,
"contentType": "media",
"frameRate":29.5,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"scanType":"progressive",
"adaptive": False},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":0, "height":0},
"displayOutput": {"x":0, "y":0, "width":0, "height":0}
}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_MAIN,
"fullScreen": True,
"sourceInput": {"x":0, "y":0, "width":SOURCE_WIDTH, "height":SOURCE_HEIGHT}},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": True,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":SOURCE_WIDTH, "height":SOURCE_HEIGHT},
"displayOutput": {"x":0, "y":0, "width":3840, "height":2160}
}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetCompositing(self):
print("[testSetCompositing]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
if len(SINK_LIST) > 1:
self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setCompositing",
{"composeOrder": [{"sink":SINK_MAIN, "opacity":20, "zOrder":1},
{"sink":SINK_SUB, "opacity":31, "zOrder":0}]},
self.statusSub, {"video":[{"sink": "MAIN", "opacity":20, "zOrder":1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_MAIN, "fullScreen":True, "opacity":130},
self.statusSub, {"video":[{"sink": SINK_MAIN, "opacity":130, "zOrder":1}]})
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_SUB, "fullScreen":True, "opacity":200},
self.statusSub, {"video":[{"sink": "SUB0", "opacity":200, "zOrder":0}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_SUB, "fullScreen":True, "opacity":230},
self.statusSub, {"video":[{"sink": "MAIN", "opacity":130, "zOrder":0}, {"sink": "SUB0", "opacity":230, "zOrder":1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_SUB, "fullScreen":True, "opacity":30, "zOrder": 1},
self.statusSub, {"video":[{"sink": "MAIN", "opacity":130, "zOrder":0}, {"sink": "SUB0", "opacity":30, "zOrder":1}]})
if __name__ == '__main__':
luna.VERBOSE = False
unittest.main()
|
flexible
|
{
"blob_id": "27e66b2a03bc626d5babd804e736a4652ba030d5",
"index": 8624,
"step-1": "<mask token>\n\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog('setUp')\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('register ' + pid)\n luna.call(API_URL + 'register', {'context': pid})\n self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':\n True})\n\n def tearDown(self):\n self.vlog('tearDown')\n for sink in SINK_LIST:\n self.vlog('disconnect ' + sink)\n luna.call(API_URL + 'disconnect', {'sink': sink})\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('unregister ' + pid)\n luna.call(API_URL + 'unregister', {'context': pid})\n luna.cancelSubscribe(self.statusSub)\n <mask token>\n\n def mute(self, sink, blank):\n self.vlog('- Mute' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {\n 'video': [{'sink': sink, 'muted': blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog('disconnect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'disconnect', {'sink': sink}, self.statusSub, {'video': [{\n 'sink': sink, 'connectedSource': None}]})\n\n def testConnectDisconnect(self):\n print('[testConnectDisconnect]')\n for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, '')\n self.disconnect(sink, '')\n <mask token>\n\n def testMute(self):\n print('[testMute]')\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')\n for blank in [False, True]:\n self.mute(sink, blank)\n <mask token>\n\n def testSetVideoDataAndDisplayWindow(self):\n print('[testSetVideoDataAndDisplayWindow]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'\n ], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print('[testSetFullscreen]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': \n 0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},\n 'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}\n )\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print('[testSetCompositing]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,\n 'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,\n 'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',\n 'opacity': 20, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'opacity': 130}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 200}, self.statusSub, {'video': [{'sink':\n 'SUB0', 'opacity': 200, 'zOrder': 0}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 230}, self.statusSub, {'video': [{'sink':\n 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',\n 'opacity': 230, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':\n [{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':\n 'SUB0', 'opacity': 30, 'zOrder': 1}]})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog('setUp')\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('register ' + pid)\n luna.call(API_URL + 'register', {'context': pid})\n self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':\n True})\n\n def tearDown(self):\n self.vlog('tearDown')\n for sink in SINK_LIST:\n self.vlog('disconnect ' + sink)\n luna.call(API_URL + 'disconnect', {'sink': sink})\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('unregister ' + pid)\n luna.call(API_URL + 'unregister', {'context': pid})\n luna.cancelSubscribe(self.statusSub)\n\n def connect(self, sink, source, port, pid):\n self.vlog('connect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + 'connect',\n {'outputMode': 'DISPLAY', 'sink': sink, 'source': source,\n 'sourcePort': port}, self.statusSub, {'video': [{'sink': sink,\n 'connectedSource': source, 'connectedSourcePort': port}]})\n\n def mute(self, sink, blank):\n self.vlog('- Mute' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {\n 'video': [{'sink': sink, 'muted': blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog('disconnect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'disconnect', {'sink': sink}, self.statusSub, {'video': [{\n 'sink': sink, 'connectedSource': None}]})\n\n def testConnectDisconnect(self):\n print('[testConnectDisconnect]')\n for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, '')\n self.disconnect(sink, '')\n\n def testDualConnect(self):\n print('[testDualConnect]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'connect', {'outputMode': 'DISPLAY', 'sink': SINK_SUB,\n 'source': SOURCE_NAME, 'sourcePort': SOURCE_PORT}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'connectedSource':\n SOURCE_NAME, 'connectedSourcePort': SOURCE_PORT}, {'sink':\n SINK_SUB, 'connectedSource': SOURCE_NAME,\n 'connectedSourcePort': SOURCE_PORT}]})\n self.disconnect(SINK_MAIN, '')\n if len(SINK_LIST) > 1:\n self.disconnect(SINK_SUB, '')\n\n def testMute(self):\n print('[testMute]')\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')\n for blank in [False, True]:\n self.mute(sink, blank)\n\n def testSetDisplayWindowAndVideoData(self):\n print('[testSetDisplayWindowAndVideoData]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': 0, 'height': 0, 'frameRate': 0, 'sourceInput': {'x': 0,\n 'y': 0, 'width': 0, 'height': 0}, 'displayOutput': {'x':\n OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT[\n 'W'], 'height': OUTPUT_RECT['H']}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}, 'displayOutput': {'x': OUTPUT_RECT[\n 'X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT['W'],\n 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetVideoDataAndDisplayWindow(self):\n print('[testSetVideoDataAndDisplayWindow]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'\n ], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print('[testSetFullscreen]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': \n 0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},\n 'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}\n )\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print('[testSetCompositing]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,\n 'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,\n 'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',\n 'opacity': 20, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'opacity': 130}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 200}, self.statusSub, {'video': [{'sink':\n 'SUB0', 'opacity': 200, 'zOrder': 0}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 230}, self.statusSub, {'video': [{'sink':\n 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',\n 'opacity': 230, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':\n [{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':\n 'SUB0', 'opacity': 30, 'zOrder': 1}]})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog('setUp')\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('register ' + pid)\n luna.call(API_URL + 'register', {'context': pid})\n self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':\n True})\n\n def tearDown(self):\n self.vlog('tearDown')\n for sink in SINK_LIST:\n self.vlog('disconnect ' + sink)\n luna.call(API_URL + 'disconnect', {'sink': sink})\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('unregister ' + pid)\n luna.call(API_URL + 'unregister', {'context': pid})\n luna.cancelSubscribe(self.statusSub)\n\n def connect(self, sink, source, port, pid):\n self.vlog('connect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + 'connect',\n {'outputMode': 'DISPLAY', 'sink': sink, 'source': source,\n 'sourcePort': port}, self.statusSub, {'video': [{'sink': sink,\n 'connectedSource': source, 'connectedSourcePort': port}]})\n\n def mute(self, sink, blank):\n self.vlog('- Mute' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {\n 'video': [{'sink': sink, 'muted': blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog('disconnect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'disconnect', {'sink': sink}, self.statusSub, {'video': [{\n 'sink': sink, 'connectedSource': None}]})\n\n def testConnectDisconnect(self):\n print('[testConnectDisconnect]')\n for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, '')\n self.disconnect(sink, '')\n\n def testDualConnect(self):\n print('[testDualConnect]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'connect', {'outputMode': 'DISPLAY', 'sink': SINK_SUB,\n 'source': SOURCE_NAME, 'sourcePort': SOURCE_PORT}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'connectedSource':\n SOURCE_NAME, 'connectedSourcePort': SOURCE_PORT}, {'sink':\n SINK_SUB, 'connectedSource': SOURCE_NAME,\n 'connectedSourcePort': SOURCE_PORT}]})\n self.disconnect(SINK_MAIN, '')\n if len(SINK_LIST) > 1:\n self.disconnect(SINK_SUB, '')\n\n def testMute(self):\n print('[testMute]')\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')\n for blank in [False, True]:\n self.mute(sink, blank)\n\n def testSetDisplayWindowAndVideoData(self):\n print('[testSetDisplayWindowAndVideoData]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': 0, 'height': 0, 'frameRate': 0, 'sourceInput': {'x': 0,\n 'y': 0, 'width': 0, 'height': 0}, 'displayOutput': {'x':\n OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT[\n 'W'], 'height': OUTPUT_RECT['H']}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}, 'displayOutput': {'x': OUTPUT_RECT[\n 'X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT['W'],\n 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetVideoDataAndDisplayWindow(self):\n print('[testSetVideoDataAndDisplayWindow]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'\n ], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print('[testSetFullscreen]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': \n 0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},\n 'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}\n )\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print('[testSetCompositing]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,\n 'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,\n 'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',\n 'opacity': 20, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'opacity': 130}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 200}, self.statusSub, {'video': [{'sink':\n 'SUB0', 'opacity': 200, 'zOrder': 0}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 230}, self.statusSub, {'video': [{'sink':\n 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',\n 'opacity': 230, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':\n [{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':\n 'SUB0', 'opacity': 30, 'zOrder': 1}]})\n\n\nif __name__ == '__main__':\n luna.VERBOSE = False\n unittest.main()\n",
"step-4": "import unittest\nimport luna_utils as luna\nimport time\nAPI_URL = 'com.webos.service.videooutput/'\nVERBOSE_LOG = True\nSUPPORT_REGISTER = False\nSINK_MAIN = 'MAIN'\nSINK_SUB = 'SUB0'\nSINK_LIST = [SINK_MAIN]\nPID1 = 'pipeline1'\nPID2 = 'pipeline2'\nPID_LIST = [PID1, PID2]\nINPUT_RECT = {'X': 0, 'Y': 0, 'W': 1920, 'H': 1080}\nOUTPUT_RECT = {'X': 400, 'Y': 400, 'W': 1920, 'H': 1080}\nSOURCE_NAME = 'HDMI'\nSOURCE_PORT = 3\nSOURCE_WIDTH = 1920\nSOURCE_HEIGHT = 1080\nSLEEP_TIME = 1\n\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog('setUp')\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('register ' + pid)\n luna.call(API_URL + 'register', {'context': pid})\n self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':\n True})\n\n def tearDown(self):\n self.vlog('tearDown')\n for sink in SINK_LIST:\n self.vlog('disconnect ' + sink)\n luna.call(API_URL + 'disconnect', {'sink': sink})\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('unregister ' + pid)\n luna.call(API_URL + 'unregister', {'context': pid})\n luna.cancelSubscribe(self.statusSub)\n\n def connect(self, sink, source, port, pid):\n self.vlog('connect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + 'connect',\n {'outputMode': 'DISPLAY', 'sink': sink, 'source': source,\n 'sourcePort': port}, self.statusSub, {'video': [{'sink': sink,\n 'connectedSource': source, 'connectedSourcePort': port}]})\n\n def mute(self, sink, blank):\n self.vlog('- Mute' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {\n 'video': [{'sink': sink, 'muted': blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog('disconnect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'disconnect', {'sink': sink}, self.statusSub, {'video': [{\n 'sink': sink, 'connectedSource': None}]})\n\n def testConnectDisconnect(self):\n print('[testConnectDisconnect]')\n for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, '')\n self.disconnect(sink, '')\n\n def testDualConnect(self):\n print('[testDualConnect]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'connect', {'outputMode': 'DISPLAY', 'sink': SINK_SUB,\n 'source': SOURCE_NAME, 'sourcePort': SOURCE_PORT}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'connectedSource':\n SOURCE_NAME, 'connectedSourcePort': SOURCE_PORT}, {'sink':\n SINK_SUB, 'connectedSource': SOURCE_NAME,\n 'connectedSourcePort': SOURCE_PORT}]})\n self.disconnect(SINK_MAIN, '')\n if len(SINK_LIST) > 1:\n self.disconnect(SINK_SUB, '')\n\n def testMute(self):\n print('[testMute]')\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')\n for blank in [False, True]:\n self.mute(sink, blank)\n\n def testSetDisplayWindowAndVideoData(self):\n print('[testSetDisplayWindowAndVideoData]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': 0, 'height': 0, 'frameRate': 0, 'sourceInput': {'x': 0,\n 'y': 0, 'width': 0, 'height': 0}, 'displayOutput': {'x':\n OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT[\n 'W'], 'height': OUTPUT_RECT['H']}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}, 'displayOutput': {'x': OUTPUT_RECT[\n 'X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT['W'],\n 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetVideoDataAndDisplayWindow(self):\n print('[testSetVideoDataAndDisplayWindow]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'\n ], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print('[testSetFullscreen]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': \n 0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},\n 'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}\n )\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print('[testSetCompositing]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,\n 'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,\n 'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',\n 'opacity': 20, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'opacity': 130}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 200}, self.statusSub, {'video': [{'sink':\n 'SUB0', 'opacity': 200, 'zOrder': 0}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 230}, self.statusSub, {'video': [{'sink':\n 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',\n 'opacity': 230, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':\n [{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':\n 'SUB0', 'opacity': 30, 'zOrder': 1}]})\n\n\nif __name__ == '__main__':\n luna.VERBOSE = False\n unittest.main()\n",
"step-5": "#!/usr/bin/python2\nimport unittest\nimport luna_utils as luna\nimport time\n\nAPI_URL = \"com.webos.service.videooutput/\"\n\nVERBOSE_LOG = True\nSUPPORT_REGISTER = False\n\nSINK_MAIN = \"MAIN\"\nSINK_SUB = \"SUB0\"\n\n#TODO(ekwang): If you connect SUB, HAL error occurs. Just test MAIN in the current state\n#SINK_LIST = [SINK_MAIN, SINK_SUB]\nSINK_LIST = [SINK_MAIN]\n\nPID1 = \"pipeline1\"\nPID2 = \"pipeline2\"\n\nPID_LIST = [PID1, PID2]\n\nINPUT_RECT = {'X':0, 'Y':0, 'W':1920, 'H':1080}\nOUTPUT_RECT = {'X':400, 'Y':400, 'W':1920, 'H':1080}\n\n#Choose source type VDEC or HDMI for test input\n#SOURCE_NAME = SOURCE_NAME\n#SOURCE_PORT = 0\nSOURCE_NAME = \"HDMI\"\nSOURCE_PORT = 3\n\nSOURCE_WIDTH = 1920\nSOURCE_HEIGHT = 1080\n\nSLEEP_TIME = 1\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog(\"setUp\")\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog(\"register \" + pid)\n luna.call(API_URL + \"register\", { \"context\": pid })\n\n self.statusSub = luna.subscribe(API_URL + \"getStatus\", {\"subscribe\":True})\n\n def tearDown(self):\n self.vlog(\"tearDown\")\n for sink in SINK_LIST:\n self.vlog(\"disconnect \" + sink)\n luna.call(API_URL + \"disconnect\", { \"sink\": sink })\n\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog(\"unregister \" + pid)\n luna.call(API_URL + \"unregister\", { \"context\": pid })\n\n luna.cancelSubscribe(self.statusSub)\n\n def connect(self, sink, source, port, pid):\n self.vlog(\"connect \" + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + \"connect\",\n { \"outputMode\": \"DISPLAY\", \"sink\": sink, \"source\": source, \"sourcePort\": port },\n self.statusSub,\n {\"video\":[{\"sink\": sink, \"connectedSource\": source, \"connectedSourcePort\": port}]})\n\n def mute(self, sink, blank):\n self.vlog(\"- Mute\" + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"blankVideo\",\n {\"sink\": sink, \"blank\": blank},\n self.statusSub,\n {\"video\":[{\"sink\": sink, \"muted\": blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog(\"disconnect \" + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + \"disconnect\", { \"sink\": sink },\n self.statusSub,\n {\"video\": [{\"sink\": sink, \"connectedSource\": None}]})\n\n def testConnectDisconnect(self):\n print(\"[testConnectDisconnect]\")\n for source, ports in {\"VDEC\":[0,1], \"HDMI\":[0,1,2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, \"\")\n self.disconnect(sink, \"\")\n\n def testDualConnect(self):\n print(\"[testDualConnect]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + \"connect\",\n {\"outputMode\": \"DISPLAY\", \"sink\": SINK_SUB, \"source\": SOURCE_NAME, \"sourcePort\": SOURCE_PORT},\n self.statusSub,\n {\"video\": [{\"sink\": SINK_MAIN, \"connectedSource\": SOURCE_NAME, \"connectedSourcePort\": SOURCE_PORT},\n {\"sink\": SINK_SUB, \"connectedSource\": SOURCE_NAME, \"connectedSourcePort\": SOURCE_PORT}]})\n\n self.disconnect(SINK_MAIN, \"\")\n if len(SINK_LIST) > 1:\n self.disconnect(SINK_SUB, \"\")\n\n def testMute(self):\n print(\"[testMute]\")\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, \"\")\n\n for blank in [False, True]:\n self.mute(sink, blank)\n\n #test different orders of display window and media data\n\n def testSetDisplayWindowAndVideoData(self):\n print(\"[testSetDisplayWindowAndVideoData]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_MAIN,\n \"fullScreen\": False,\n \"sourceInput\": {\"x\":INPUT_RECT['X'], \"y\":INPUT_RECT['Y'], \"width\":INPUT_RECT['W'], \"height\":INPUT_RECT['H']},\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}},\n self.statusSub,\n {\"video\":[{\"sink\": \"MAIN\",\n \"fullScreen\": False,\n \"width\":0,\n \"height\":0,\n \"frameRate\":0,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0}, # no media data yet so can't determine appliedsourceInput yet\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}\n }]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"setVideoData\",\n {\"sink\": SINK_MAIN,\n \"contentType\": \"media\",\n \"frameRate\":29.5,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"scanType\":\"progressive\",\n \"adaptive\": False},\n self.statusSub,\n {\"video\":[{\"sink\": \"MAIN\",\n \"fullScreen\": False,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":SOURCE_WIDTH, \"height\":SOURCE_HEIGHT},\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}\n }]})\n\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetVideoDataAndDisplayWindow(self):\n print(\"[testSetVideoDataAndDisplayWindow]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"setVideoData\",\n {\"sink\": SINK_MAIN,\n \"contentType\": \"media\",\n \"frameRate\":29.5,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"scanType\":\"progressive\",\n \"adaptive\": False},\n self.statusSub,\n {\"video\":[{\"sink\": SINK_MAIN,\n \"fullScreen\": False,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0},\n \"displayOutput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0}\n }]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": \"MAIN\",\n \"fullScreen\": False,\n \"sourceInput\": {\"x\":INPUT_RECT['X'], \"y\":INPUT_RECT['Y'], \"width\":INPUT_RECT['W'], \"height\":INPUT_RECT['H']},\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}},\n self.statusSub,\n {\"video\":[{\"sink\": SINK_MAIN,\n \"fullScreen\": False,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":INPUT_RECT['X'], \"y\":INPUT_RECT['Y'], \"width\":INPUT_RECT['W'], \"height\":INPUT_RECT['H']},\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}\n }]})\n\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print(\"[testSetFullscreen]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"setVideoData\",\n {\"sink\": SINK_MAIN,\n \"contentType\": \"media\",\n \"frameRate\":29.5,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"scanType\":\"progressive\",\n \"adaptive\": False},\n self.statusSub,\n {\"video\":[{\"sink\": SINK_MAIN,\n \"fullScreen\": False,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0},\n \"displayOutput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0}\n }]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_MAIN,\n \"fullScreen\": True,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":SOURCE_WIDTH, \"height\":SOURCE_HEIGHT}},\n self.statusSub,\n {\"video\":[{\"sink\": SINK_MAIN,\n \"fullScreen\": True,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":SOURCE_WIDTH, \"height\":SOURCE_HEIGHT},\n \"displayOutput\": {\"x\":0, \"y\":0, \"width\":3840, \"height\":2160}\n }]})\n\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print(\"[testSetCompositing]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, \"\")\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setCompositing\",\n {\"composeOrder\": [{\"sink\":SINK_MAIN, \"opacity\":20, \"zOrder\":1},\n {\"sink\":SINK_SUB, \"opacity\":31, \"zOrder\":0}]},\n self.statusSub, {\"video\":[{\"sink\": \"MAIN\", \"opacity\":20, \"zOrder\":1}]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_MAIN, \"fullScreen\":True, \"opacity\":130},\n self.statusSub, {\"video\":[{\"sink\": SINK_MAIN, \"opacity\":130, \"zOrder\":1}]})\n\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_SUB, \"fullScreen\":True, \"opacity\":200},\n self.statusSub, {\"video\":[{\"sink\": \"SUB0\", \"opacity\":200, \"zOrder\":0}]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_SUB, \"fullScreen\":True, \"opacity\":230},\n self.statusSub, {\"video\":[{\"sink\": \"MAIN\", \"opacity\":130, \"zOrder\":0}, {\"sink\": \"SUB0\", \"opacity\":230, \"zOrder\":1}]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_SUB, \"fullScreen\":True, \"opacity\":30, \"zOrder\": 1},\n self.statusSub, {\"video\":[{\"sink\": \"MAIN\", \"opacity\":130, \"zOrder\":0}, {\"sink\": \"SUB0\", \"opacity\":30, \"zOrder\":1}]})\n\nif __name__ == '__main__':\n luna.VERBOSE = False\n unittest.main()\n",
"step-ids": [
11,
14,
15,
17,
18
]
}
|
[
11,
14,
15,
17,
18
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def assert_shapes(shape, other):
assert len(shape) == len(other), 'Dimensions are different'
for s, o in zip(shape, other):
if s is not None and o is not None:
assert s == o, 'Shapes {} and {} are not equal'.format(shape, other
)
<|reserved_special_token_1|>
def assert_shapes(shape, other):
assert len(shape) == len(other), "Dimensions are different"
for s, o in zip(shape, other):
if s is not None and o is not None:
assert s == o, "Shapes {} and {} are not equal".format(shape, other)
|
flexible
|
{
"blob_id": "337311c3fbb6a8baab7a237d08152f0db9822527",
"index": 2931,
"step-1": "<mask token>\n",
"step-2": "def assert_shapes(shape, other):\n assert len(shape) == len(other), 'Dimensions are different'\n for s, o in zip(shape, other):\n if s is not None and o is not None:\n assert s == o, 'Shapes {} and {} are not equal'.format(shape, other\n )\n",
"step-3": "\ndef assert_shapes(shape, other):\n assert len(shape) == len(other), \"Dimensions are different\"\n for s, o in zip(shape, other):\n if s is not None and o is not None:\n assert s == o, \"Shapes {} and {} are not equal\".format(shape, other)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.colors as colors
import matplotlib.cm as cm
def plot_hist(data_list):
plt.hist(data_list, bins=500)
plt.show()
return
def compare_hits_plot(np_array, compare=False):
if compare:
clist = list(np_array[:,2])
minima, maxima = min(clist), max(clist)
print minima, maxima
hits=np_array[np_array[:,2]==1]
total_hits=np_array[np_array[:,2]>=1]
scatter = plt.scatter(np_array[:,3], np_array[:,1], c=clist, vmin=0, vmax=1, s=8, cmap=cm.winter)
plt.ylim(ymin=0, ymax=max(hits[:,3]))
plt.colorbar(scatter)
plt.axhline(spot_count_cutoff)
else:
scatter = plt.scatter(np_array[:,3], np_array[:,1])
def pickle_ratio_plot(np_array):
clist = list(np_array[:,5])
minima, maxima = min(clist), max(clist)
print minima, maxima
scatter = plt.scatter(np_array[:,1], np_array[:,2], c=clist, s=8, cmap=cm.winter)
plt.colorbar(scatter)
plt.axhline(spot_count_cutoff)
|
normal
|
{
"blob_id": "b6adb956aed934451fc21e51663be36d08c5b645",
"index": 2535,
"step-1": "import matplotlib.pyplot as plt\nimport matplotlib\nimport matplotlib.colors as colors\nimport matplotlib.cm as cm\n\ndef plot_hist(data_list):\n plt.hist(data_list, bins=500)\n plt.show()\n return\n\ndef compare_hits_plot(np_array, compare=False):\n if compare:\n clist = list(np_array[:,2])\n minima, maxima = min(clist), max(clist)\n print minima, maxima\n hits=np_array[np_array[:,2]==1]\n total_hits=np_array[np_array[:,2]>=1]\n scatter = plt.scatter(np_array[:,3], np_array[:,1], c=clist, vmin=0, vmax=1, s=8, cmap=cm.winter)\n plt.ylim(ymin=0, ymax=max(hits[:,3]))\n plt.colorbar(scatter)\n plt.axhline(spot_count_cutoff)\n else:\n scatter = plt.scatter(np_array[:,3], np_array[:,1])\n\n\ndef pickle_ratio_plot(np_array):\n clist = list(np_array[:,5])\n minima, maxima = min(clist), max(clist)\n print minima, maxima\n scatter = plt.scatter(np_array[:,1], np_array[:,2], c=clist, s=8, cmap=cm.winter)\n plt.colorbar(scatter)\n plt.axhline(spot_count_cutoff)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python3
import os
from Alfred3 import Items, Tools
def to_absolute_path(filepath):
filepath = os.path.expanduser(filepath)
return os.path.abspath(filepath)
def is_valid_path(path):
abs_path = to_absolute_path(path)
if os.path.exists(abs_path) and os.path.isdir(abs_path):
return True
else:
return False
env_source = Tools.getEnv("source")
env_target = Tools.getEnv("target")
query = Tools.getArgv(1)
path_to_ask = "source" if env_source == "" else "target"
new_path = to_absolute_path(query)
wf = Items()
if query != "" and is_valid_path(new_path):
wf.setItem(
title=f"Path exists, add as {path_to_ask} path?",
subtitle=new_path,
arg=f"{new_path}|add"
)
elif query.startswith("/") or query.startswith("~"):
wf.setItem(
title="Path does not exists, create?",
subtitle=new_path,
arg=f"{new_path}|create"
)
else:
wf.setItem(
title=f"Enter {path_to_ask} path",
subtitle="Type a directory path starting with / or ~",
valid=False
)
wf.addItem()
wf.write()
|
normal
|
{
"blob_id": "1cf573863fca660cc1fec71ab64743e7a2dd74d8",
"index": 1730,
"step-1": "<mask token>\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\n<mask token>\nif query != '' and is_valid_path(new_path):\n wf.setItem(title=f'Path exists, add as {path_to_ask} path?', subtitle=\n new_path, arg=f'{new_path}|add')\nelif query.startswith('/') or query.startswith('~'):\n wf.setItem(title='Path does not exists, create?', subtitle=new_path,\n arg=f'{new_path}|create')\nelse:\n wf.setItem(title=f'Enter {path_to_ask} path', subtitle=\n 'Type a directory path starting with / or ~', valid=False)\nwf.addItem()\nwf.write()\n",
"step-3": "<mask token>\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\nenv_source = Tools.getEnv('source')\nenv_target = Tools.getEnv('target')\nquery = Tools.getArgv(1)\npath_to_ask = 'source' if env_source == '' else 'target'\nnew_path = to_absolute_path(query)\nwf = Items()\nif query != '' and is_valid_path(new_path):\n wf.setItem(title=f'Path exists, add as {path_to_ask} path?', subtitle=\n new_path, arg=f'{new_path}|add')\nelif query.startswith('/') or query.startswith('~'):\n wf.setItem(title='Path does not exists, create?', subtitle=new_path,\n arg=f'{new_path}|create')\nelse:\n wf.setItem(title=f'Enter {path_to_ask} path', subtitle=\n 'Type a directory path starting with / or ~', valid=False)\nwf.addItem()\nwf.write()\n",
"step-4": "import os\nfrom Alfred3 import Items, Tools\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\nenv_source = Tools.getEnv('source')\nenv_target = Tools.getEnv('target')\nquery = Tools.getArgv(1)\npath_to_ask = 'source' if env_source == '' else 'target'\nnew_path = to_absolute_path(query)\nwf = Items()\nif query != '' and is_valid_path(new_path):\n wf.setItem(title=f'Path exists, add as {path_to_ask} path?', subtitle=\n new_path, arg=f'{new_path}|add')\nelif query.startswith('/') or query.startswith('~'):\n wf.setItem(title='Path does not exists, create?', subtitle=new_path,\n arg=f'{new_path}|create')\nelse:\n wf.setItem(title=f'Enter {path_to_ask} path', subtitle=\n 'Type a directory path starting with / or ~', valid=False)\nwf.addItem()\nwf.write()\n",
"step-5": "#!/usr/bin/env python3\n\nimport os\n\nfrom Alfred3 import Items, Tools\n\n\ndef to_absolute_path(filepath):\n filepath = os.path.expanduser(filepath)\n return os.path.abspath(filepath)\n\n\ndef is_valid_path(path):\n abs_path = to_absolute_path(path)\n if os.path.exists(abs_path) and os.path.isdir(abs_path):\n return True\n else:\n return False\n\n\nenv_source = Tools.getEnv(\"source\")\nenv_target = Tools.getEnv(\"target\")\nquery = Tools.getArgv(1)\n\npath_to_ask = \"source\" if env_source == \"\" else \"target\"\n\nnew_path = to_absolute_path(query)\n\n\nwf = Items()\n\nif query != \"\" and is_valid_path(new_path):\n wf.setItem(\n title=f\"Path exists, add as {path_to_ask} path?\",\n subtitle=new_path,\n arg=f\"{new_path}|add\"\n )\nelif query.startswith(\"/\") or query.startswith(\"~\"):\n wf.setItem(\n title=\"Path does not exists, create?\",\n subtitle=new_path,\n arg=f\"{new_path}|create\"\n )\nelse:\n wf.setItem(\n title=f\"Enter {path_to_ask} path\",\n subtitle=\"Type a directory path starting with / or ~\",\n valid=False\n )\nwf.addItem()\nwf.write()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from __future__ import absolute_import, print_function, unicode_literals
import six
from six.moves import zip, filter, map, reduce, input, range
import pathlib
import unittest
import networkx as nx
import multiworm
TEST_ROOT = pathlib.Path(__file__).parent.resolve()
DATA_DIR = TEST_ROOT / 'data'
SYNTH1 = DATA_DIR / 'synth1'
SYNTH1_N_BLOBS = 12
class TestExperimentOpen(unittest.TestCase):
def test_pathlib(self):
ex = multiworm.Experiment(SYNTH1)
def test_strpath(self):
ex = multiworm.Experiment(str(SYNTH1))
def test_root_and_id(self):
ex = multiworm.Experiment(
data_root=DATA_DIR,
experiment_id='synth1',
)
def test_strroot_and_id(self):
ex = multiworm.Experiment(
data_root=str(DATA_DIR),
experiment_id='synth1',
)
def test_empty_fail(self):
try:
multiworm.Experiment()
except Exception as e:
if not isinstance(e, ValueError):
self.fail('raised some unexpected error')
if not all(x in str(e) for x in ['experiment_id', 'must', 'provided']):
self.fail('error message unexpected')
else:
self.fail('experiment constructor worked with no arguments')
def test_dataroot_only_fail(self):
try:
multiworm.Experiment(data_root=DATA_DIR)
except Exception as e:
if not isinstance(e, ValueError):
self.fail('raised some unexpected error')
if not all(x in str(e) for x in ['experiment_id', 'must', 'provided']):
self.fail('error message unexpected')
else:
self.fail('experiment constructor allowed data-root only without erroring')
def test_custom_id(self):
my_id = 'peterspeppers'
ex = multiworm.Experiment(fullpath=SYNTH1, experiment_id=my_id)
self.assertEquals(ex.id, my_id)
def test_callback(self):
class StateThing(object):
def __init__(self):
self.progress = -1
def __call__(self, progress):
assert progress >= self.progress
self.progress = progress
ex = multiworm.Experiment(SYNTH1, callback=StateThing())
class TestMalformedExperiments(unittest.TestCase):
def test_nonexistent_folder(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'guaranteedtohopefullynotbethere')
except multiworm.core.MWTDataError:
self.fail('Overly specific error raised')
except IOError as e:
self.assertIn('exist', str(e))
else:
self.fail("Didn't even mention the folder isn't there")
def test_check_is_dir(self):
try:
ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')
except multiworm.core.MWTDataError:
self.fail('Overly specific error raised')
except IOError as e:
self.assertIn('directory', str(e))
else:
self.fail("Didn't even mention the folder isn't there")
def test_missing_summary(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_empty')
except multiworm.core.MWTDataError as e:
pass
else:
self.fail("Didn't raise error despite no summary file")
def test_dupe_summary(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')
except multiworm.core.MWTSummaryError as e:
pass
else:
self.fail("Didn't raise error with ambiguous summary file")
class TestMalformedData(unittest.TestCase):
def test_zero_frame(self):
try:
ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')
except multiworm.core.MWTDataError:
pass
else:
self.fail("Didn't raise error on malformed data with a frame 0")
class TestReadingData(unittest.TestCase):
def setUp(self):
self.ex = multiworm.Experiment(SYNTH1)
def test_length_is_num_blobs(self):
self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))
def test_iter(self):
count = 0
for thing in self.ex:
count += 1
self.assertEqual(SYNTH1_N_BLOBS, count)
def test_iter_blobs(self):
count = 0
for thing in self.ex.blobs():
count += 1
self.assertEqual(SYNTH1_N_BLOBS, count)
class TestExperimentProperties(unittest.TestCase):
def setUp(self):
self.ex = multiworm.Experiment(SYNTH1)
def test_blobs_in_frame(self):
self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))
self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12)))
def test_locked_graph(self):
try:
self.ex.graph.add_node(123)
except nx.NetworkXError as e:
self.assertIn('frozen', str(e).lower())
else:
self.fail('experiment graph should be frozen/locked')
def test_graph_copy_unlocked(self):
G = self.ex.graph.copy()
G.add_node(123)
G.add_edge(55, 66)
|
normal
|
{
"blob_id": "dfee0407eaed7b1ab96467874bbfe6463865bcb4",
"index": 6238,
"step-1": "<mask token>\n\n\nclass TestExperimentOpen(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestMalformedExperiments(unittest.TestCase):\n\n def test_nonexistent_folder(self):\n try:\n ex = multiworm.Experiment(DATA_DIR /\n 'guaranteedtohopefullynotbethere')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('exist', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_check_is_dir(self):\n try:\n ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('directory', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_missing_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_empty')\n except multiworm.core.MWTDataError as e:\n pass\n else:\n self.fail(\"Didn't raise error despite no summary file\")\n\n def test_dupe_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')\n except multiworm.core.MWTSummaryError as e:\n pass\n else:\n self.fail(\"Didn't raise error with ambiguous summary file\")\n\n\nclass TestMalformedData(unittest.TestCase):\n\n def test_zero_frame(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')\n except multiworm.core.MWTDataError:\n pass\n else:\n self.fail(\"Didn't raise error on malformed data with a frame 0\")\n\n\nclass TestReadingData(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_length_is_num_blobs(self):\n self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))\n\n def test_iter(self):\n count = 0\n for thing in self.ex:\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n def test_iter_blobs(self):\n count = 0\n for thing in self.ex.blobs():\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n\nclass TestExperimentProperties(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_blobs_in_frame(self):\n self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))\n self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12))\n )\n\n def test_locked_graph(self):\n try:\n self.ex.graph.add_node(123)\n except nx.NetworkXError as e:\n self.assertIn('frozen', str(e).lower())\n else:\n self.fail('experiment graph should be frozen/locked')\n\n def test_graph_copy_unlocked(self):\n G = self.ex.graph.copy()\n G.add_node(123)\n G.add_edge(55, 66)\n",
"step-2": "<mask token>\n\n\nclass TestExperimentOpen(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_callback(self):\n\n\n class StateThing(object):\n\n def __init__(self):\n self.progress = -1\n\n def __call__(self, progress):\n assert progress >= self.progress\n self.progress = progress\n ex = multiworm.Experiment(SYNTH1, callback=StateThing())\n\n\nclass TestMalformedExperiments(unittest.TestCase):\n\n def test_nonexistent_folder(self):\n try:\n ex = multiworm.Experiment(DATA_DIR /\n 'guaranteedtohopefullynotbethere')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('exist', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_check_is_dir(self):\n try:\n ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('directory', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_missing_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_empty')\n except multiworm.core.MWTDataError as e:\n pass\n else:\n self.fail(\"Didn't raise error despite no summary file\")\n\n def test_dupe_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')\n except multiworm.core.MWTSummaryError as e:\n pass\n else:\n self.fail(\"Didn't raise error with ambiguous summary file\")\n\n\nclass TestMalformedData(unittest.TestCase):\n\n def test_zero_frame(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')\n except multiworm.core.MWTDataError:\n pass\n else:\n self.fail(\"Didn't raise error on malformed data with a frame 0\")\n\n\nclass TestReadingData(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_length_is_num_blobs(self):\n self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))\n\n def test_iter(self):\n count = 0\n for thing in self.ex:\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n def test_iter_blobs(self):\n count = 0\n for thing in self.ex.blobs():\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n\nclass TestExperimentProperties(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_blobs_in_frame(self):\n self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))\n self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12))\n )\n\n def test_locked_graph(self):\n try:\n self.ex.graph.add_node(123)\n except nx.NetworkXError as e:\n self.assertIn('frozen', str(e).lower())\n else:\n self.fail('experiment graph should be frozen/locked')\n\n def test_graph_copy_unlocked(self):\n G = self.ex.graph.copy()\n G.add_node(123)\n G.add_edge(55, 66)\n",
"step-3": "<mask token>\n\n\nclass TestExperimentOpen(unittest.TestCase):\n\n def test_pathlib(self):\n ex = multiworm.Experiment(SYNTH1)\n <mask token>\n <mask token>\n\n def test_strroot_and_id(self):\n ex = multiworm.Experiment(data_root=str(DATA_DIR), experiment_id=\n 'synth1')\n\n def test_empty_fail(self):\n try:\n multiworm.Experiment()\n except Exception as e:\n if not isinstance(e, ValueError):\n self.fail('raised some unexpected error')\n if not all(x in str(e) for x in ['experiment_id', 'must',\n 'provided']):\n self.fail('error message unexpected')\n else:\n self.fail('experiment constructor worked with no arguments')\n\n def test_dataroot_only_fail(self):\n try:\n multiworm.Experiment(data_root=DATA_DIR)\n except Exception as e:\n if not isinstance(e, ValueError):\n self.fail('raised some unexpected error')\n if not all(x in str(e) for x in ['experiment_id', 'must',\n 'provided']):\n self.fail('error message unexpected')\n else:\n self.fail(\n 'experiment constructor allowed data-root only without erroring'\n )\n\n def test_custom_id(self):\n my_id = 'peterspeppers'\n ex = multiworm.Experiment(fullpath=SYNTH1, experiment_id=my_id)\n self.assertEquals(ex.id, my_id)\n\n def test_callback(self):\n\n\n class StateThing(object):\n\n def __init__(self):\n self.progress = -1\n\n def __call__(self, progress):\n assert progress >= self.progress\n self.progress = progress\n ex = multiworm.Experiment(SYNTH1, callback=StateThing())\n\n\nclass TestMalformedExperiments(unittest.TestCase):\n\n def test_nonexistent_folder(self):\n try:\n ex = multiworm.Experiment(DATA_DIR /\n 'guaranteedtohopefullynotbethere')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('exist', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_check_is_dir(self):\n try:\n ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('directory', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_missing_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_empty')\n except multiworm.core.MWTDataError as e:\n pass\n else:\n self.fail(\"Didn't raise error despite no summary file\")\n\n def test_dupe_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')\n except multiworm.core.MWTSummaryError as e:\n pass\n else:\n self.fail(\"Didn't raise error with ambiguous summary file\")\n\n\nclass TestMalformedData(unittest.TestCase):\n\n def test_zero_frame(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')\n except multiworm.core.MWTDataError:\n pass\n else:\n self.fail(\"Didn't raise error on malformed data with a frame 0\")\n\n\nclass TestReadingData(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_length_is_num_blobs(self):\n self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))\n\n def test_iter(self):\n count = 0\n for thing in self.ex:\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n def test_iter_blobs(self):\n count = 0\n for thing in self.ex.blobs():\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n\nclass TestExperimentProperties(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_blobs_in_frame(self):\n self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))\n self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12))\n )\n\n def test_locked_graph(self):\n try:\n self.ex.graph.add_node(123)\n except nx.NetworkXError as e:\n self.assertIn('frozen', str(e).lower())\n else:\n self.fail('experiment graph should be frozen/locked')\n\n def test_graph_copy_unlocked(self):\n G = self.ex.graph.copy()\n G.add_node(123)\n G.add_edge(55, 66)\n",
"step-4": "<mask token>\nTEST_ROOT = pathlib.Path(__file__).parent.resolve()\nDATA_DIR = TEST_ROOT / 'data'\nSYNTH1 = DATA_DIR / 'synth1'\nSYNTH1_N_BLOBS = 12\n\n\nclass TestExperimentOpen(unittest.TestCase):\n\n def test_pathlib(self):\n ex = multiworm.Experiment(SYNTH1)\n\n def test_strpath(self):\n ex = multiworm.Experiment(str(SYNTH1))\n\n def test_root_and_id(self):\n ex = multiworm.Experiment(data_root=DATA_DIR, experiment_id='synth1')\n\n def test_strroot_and_id(self):\n ex = multiworm.Experiment(data_root=str(DATA_DIR), experiment_id=\n 'synth1')\n\n def test_empty_fail(self):\n try:\n multiworm.Experiment()\n except Exception as e:\n if not isinstance(e, ValueError):\n self.fail('raised some unexpected error')\n if not all(x in str(e) for x in ['experiment_id', 'must',\n 'provided']):\n self.fail('error message unexpected')\n else:\n self.fail('experiment constructor worked with no arguments')\n\n def test_dataroot_only_fail(self):\n try:\n multiworm.Experiment(data_root=DATA_DIR)\n except Exception as e:\n if not isinstance(e, ValueError):\n self.fail('raised some unexpected error')\n if not all(x in str(e) for x in ['experiment_id', 'must',\n 'provided']):\n self.fail('error message unexpected')\n else:\n self.fail(\n 'experiment constructor allowed data-root only without erroring'\n )\n\n def test_custom_id(self):\n my_id = 'peterspeppers'\n ex = multiworm.Experiment(fullpath=SYNTH1, experiment_id=my_id)\n self.assertEquals(ex.id, my_id)\n\n def test_callback(self):\n\n\n class StateThing(object):\n\n def __init__(self):\n self.progress = -1\n\n def __call__(self, progress):\n assert progress >= self.progress\n self.progress = progress\n ex = multiworm.Experiment(SYNTH1, callback=StateThing())\n\n\nclass TestMalformedExperiments(unittest.TestCase):\n\n def test_nonexistent_folder(self):\n try:\n ex = multiworm.Experiment(DATA_DIR /\n 'guaranteedtohopefullynotbethere')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('exist', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_check_is_dir(self):\n try:\n ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('directory', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_missing_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_empty')\n except multiworm.core.MWTDataError as e:\n pass\n else:\n self.fail(\"Didn't raise error despite no summary file\")\n\n def test_dupe_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')\n except multiworm.core.MWTSummaryError as e:\n pass\n else:\n self.fail(\"Didn't raise error with ambiguous summary file\")\n\n\nclass TestMalformedData(unittest.TestCase):\n\n def test_zero_frame(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')\n except multiworm.core.MWTDataError:\n pass\n else:\n self.fail(\"Didn't raise error on malformed data with a frame 0\")\n\n\nclass TestReadingData(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_length_is_num_blobs(self):\n self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))\n\n def test_iter(self):\n count = 0\n for thing in self.ex:\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n def test_iter_blobs(self):\n count = 0\n for thing in self.ex.blobs():\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n\nclass TestExperimentProperties(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_blobs_in_frame(self):\n self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))\n self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12))\n )\n\n def test_locked_graph(self):\n try:\n self.ex.graph.add_node(123)\n except nx.NetworkXError as e:\n self.assertIn('frozen', str(e).lower())\n else:\n self.fail('experiment graph should be frozen/locked')\n\n def test_graph_copy_unlocked(self):\n G = self.ex.graph.copy()\n G.add_node(123)\n G.add_edge(55, 66)\n",
"step-5": "from __future__ import absolute_import, print_function, unicode_literals\nimport six\nfrom six.moves import zip, filter, map, reduce, input, range\n\nimport pathlib\nimport unittest\n\nimport networkx as nx\n\nimport multiworm\n\n\nTEST_ROOT = pathlib.Path(__file__).parent.resolve()\nDATA_DIR = TEST_ROOT / 'data'\nSYNTH1 = DATA_DIR / 'synth1'\n\nSYNTH1_N_BLOBS = 12\n\n\nclass TestExperimentOpen(unittest.TestCase):\n\n def test_pathlib(self):\n ex = multiworm.Experiment(SYNTH1)\n\n def test_strpath(self):\n ex = multiworm.Experiment(str(SYNTH1))\n\n def test_root_and_id(self):\n ex = multiworm.Experiment(\n data_root=DATA_DIR,\n experiment_id='synth1',\n )\n\n def test_strroot_and_id(self):\n ex = multiworm.Experiment(\n data_root=str(DATA_DIR),\n experiment_id='synth1',\n )\n\n def test_empty_fail(self):\n try:\n multiworm.Experiment()\n except Exception as e:\n if not isinstance(e, ValueError):\n self.fail('raised some unexpected error')\n if not all(x in str(e) for x in ['experiment_id', 'must', 'provided']):\n self.fail('error message unexpected')\n else:\n self.fail('experiment constructor worked with no arguments')\n\n def test_dataroot_only_fail(self):\n try:\n multiworm.Experiment(data_root=DATA_DIR)\n except Exception as e:\n if not isinstance(e, ValueError):\n self.fail('raised some unexpected error')\n if not all(x in str(e) for x in ['experiment_id', 'must', 'provided']):\n self.fail('error message unexpected')\n else:\n self.fail('experiment constructor allowed data-root only without erroring')\n\n def test_custom_id(self):\n my_id = 'peterspeppers'\n ex = multiworm.Experiment(fullpath=SYNTH1, experiment_id=my_id)\n self.assertEquals(ex.id, my_id)\n\n def test_callback(self):\n class StateThing(object):\n def __init__(self):\n self.progress = -1\n\n def __call__(self, progress):\n assert progress >= self.progress\n self.progress = progress\n\n ex = multiworm.Experiment(SYNTH1, callback=StateThing())\n\n\nclass TestMalformedExperiments(unittest.TestCase):\n\n def test_nonexistent_folder(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'guaranteedtohopefullynotbethere')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('exist', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_check_is_dir(self):\n try:\n ex = multiworm.Experiment(SYNTH1 / 'test_blobsfile.png')\n except multiworm.core.MWTDataError:\n self.fail('Overly specific error raised')\n except IOError as e:\n self.assertIn('directory', str(e))\n else:\n self.fail(\"Didn't even mention the folder isn't there\")\n\n def test_missing_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_empty')\n except multiworm.core.MWTDataError as e:\n pass\n else:\n self.fail(\"Didn't raise error despite no summary file\")\n\n def test_dupe_summary(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_twosummary')\n except multiworm.core.MWTSummaryError as e:\n pass\n else:\n self.fail(\"Didn't raise error with ambiguous summary file\")\n\n\nclass TestMalformedData(unittest.TestCase):\n\n def test_zero_frame(self):\n try:\n ex = multiworm.Experiment(DATA_DIR / 'bad_framezero')\n except multiworm.core.MWTDataError:\n pass\n else:\n self.fail(\"Didn't raise error on malformed data with a frame 0\")\n\n\nclass TestReadingData(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_length_is_num_blobs(self):\n self.assertEqual(SYNTH1_N_BLOBS, len(self.ex))\n\n def test_iter(self):\n count = 0\n for thing in self.ex:\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n def test_iter_blobs(self):\n count = 0\n for thing in self.ex.blobs():\n count += 1\n self.assertEqual(SYNTH1_N_BLOBS, count)\n\n\nclass TestExperimentProperties(unittest.TestCase):\n\n def setUp(self):\n self.ex = multiworm.Experiment(SYNTH1)\n\n def test_blobs_in_frame(self):\n self.assertEquals(list(self.ex.blobs_in_frame(10)), list(range(1, 12)))\n self.assertEquals(list(self.ex.blobs_in_frame(200)), list(range(5, 12)))\n\n def test_locked_graph(self):\n try:\n self.ex.graph.add_node(123)\n except nx.NetworkXError as e:\n self.assertIn('frozen', str(e).lower())\n else:\n self.fail('experiment graph should be frozen/locked')\n\n def test_graph_copy_unlocked(self):\n G = self.ex.graph.copy()\n G.add_node(123)\n G.add_edge(55, 66)\n",
"step-ids": [
18,
19,
24,
27,
29
]
}
|
[
18,
19,
24,
27,
29
] |
<|reserved_special_token_0|>
def get_year_progress():
dt = pendulum.now()
percent = year_progress(dt)
year = dt.year
return f'你的 {year} 使用进度:{percent}%\n\n\n{make_progress_string(percent)}'
def year_progress(dt):
year_days = 366 if dt.is_leap_year() else 365
passed_days = dt.timetuple().tm_yday
percent = math.floor(passed_days / year_days * 100)
return percent
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_year_progress():
dt = pendulum.now()
percent = year_progress(dt)
year = dt.year
return f'你的 {year} 使用进度:{percent}%\n\n\n{make_progress_string(percent)}'
def year_progress(dt):
year_days = 366 if dt.is_leap_year() else 365
passed_days = dt.timetuple().tm_yday
percent = math.floor(passed_days / year_days * 100)
return percent
def make_progress_string(percent):
blocks = 15
percent = percent * blocks / 100
return ''.join([('▓' if i < percent else '░') for i in range(blocks)])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@on_command('yearprogress')
async def year_progress(session: CommandSession):
await session.send(get_year_progress())
def get_year_progress():
dt = pendulum.now()
percent = year_progress(dt)
year = dt.year
return f'你的 {year} 使用进度:{percent}%\n\n\n{make_progress_string(percent)}'
def year_progress(dt):
year_days = 366 if dt.is_leap_year() else 365
passed_days = dt.timetuple().tm_yday
percent = math.floor(passed_days / year_days * 100)
return percent
def make_progress_string(percent):
blocks = 15
percent = percent * blocks / 100
return ''.join([('▓' if i < percent else '░') for i in range(blocks)])
<|reserved_special_token_1|>
import math
import pendulum
from none import *
@on_command('yearprogress')
async def year_progress(session: CommandSession):
await session.send(get_year_progress())
def get_year_progress():
dt = pendulum.now()
percent = year_progress(dt)
year = dt.year
return f'你的 {year} 使用进度:{percent}%\n\n\n{make_progress_string(percent)}'
def year_progress(dt):
year_days = 366 if dt.is_leap_year() else 365
passed_days = dt.timetuple().tm_yday
percent = math.floor(passed_days / year_days * 100)
return percent
def make_progress_string(percent):
blocks = 15
percent = percent * blocks / 100
return ''.join([('▓' if i < percent else '░') for i in range(blocks)])
<|reserved_special_token_1|>
import math
import pendulum
from none import *
@on_command('yearprogress')
async def year_progress(session: CommandSession):
await session.send(get_year_progress())
def get_year_progress():
dt = pendulum.now()
percent = year_progress(dt)
year = dt.year
return f'你的 {year} 使用进度:{percent}%\n' \
f'\n\n' \
f'{make_progress_string(percent)}'
def year_progress(dt):
year_days = 366 if dt.is_leap_year() else 365
passed_days = dt.timetuple().tm_yday
percent = math.floor((passed_days / year_days) * 100)
return percent
def make_progress_string(percent):
blocks = 15
percent = percent * blocks / 100
return ''.join(["▓" if i < percent else "░" for i in range(blocks)])
|
flexible
|
{
"blob_id": "f54d0eeffa140af9c16a1fedb8dcd7d06ced29f2",
"index": 2395,
"step-1": "<mask token>\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n\\n\\n{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor(passed_days / year_days * 100)\n return percent\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n\\n\\n{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor(passed_days / year_days * 100)\n return percent\n\n\ndef make_progress_string(percent):\n blocks = 15\n percent = percent * blocks / 100\n return ''.join([('▓' if i < percent else '░') for i in range(blocks)])\n",
"step-3": "<mask token>\n\n\n@on_command('yearprogress')\nasync def year_progress(session: CommandSession):\n await session.send(get_year_progress())\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n\\n\\n{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor(passed_days / year_days * 100)\n return percent\n\n\ndef make_progress_string(percent):\n blocks = 15\n percent = percent * blocks / 100\n return ''.join([('▓' if i < percent else '░') for i in range(blocks)])\n",
"step-4": "import math\nimport pendulum\nfrom none import *\n\n\n@on_command('yearprogress')\nasync def year_progress(session: CommandSession):\n await session.send(get_year_progress())\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n\\n\\n{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor(passed_days / year_days * 100)\n return percent\n\n\ndef make_progress_string(percent):\n blocks = 15\n percent = percent * blocks / 100\n return ''.join([('▓' if i < percent else '░') for i in range(blocks)])\n",
"step-5": "import math\n\nimport pendulum\nfrom none import *\n\n\n@on_command('yearprogress')\nasync def year_progress(session: CommandSession):\n await session.send(get_year_progress())\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n' \\\n f'\\n\\n' \\\n f'{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor((passed_days / year_days) * 100)\n return percent\n\n\ndef make_progress_string(percent):\n blocks = 15\n percent = percent * blocks / 100\n return ''.join([\"▓\" if i < percent else \"░\" for i in range(blocks)])\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(
client, service_one, api_user_active, sample_invite, mock_get_service,
mock_check_invite_token, mock_get_user_by_email,
mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):
expected_service = service_one['id']
expected_redirect_location = ('http://localhost/services/{}/dashboard'.
format(expected_service))
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
assert mock_accept_invite.call_count == 1
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_existing_user_with_no_permissions_accept_invite(client, mocker,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_get_service):
expected_service = service_one['id']
sample_invite['permissions'] = ''
expected_permissions = []
mocker.patch('app.invite_api_client.accept_invite', return_value=
sample_invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
def test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,
mocker, sample_invite, mock_get_service):
sample_invite['status'] = 'accepted'
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_existing_user_of_service_get_redirected_to_signin(client, mocker,
api_user_active, sample_invite, mock_get_service,
mock_get_user_by_email, mock_accept_invite):
sample_invite['email_address'] = api_user_active.email_address
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
assert mock_accept_invite.call_count == 1
def test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_accept_invite, mock_get_service):
expected_service = service_one['id']
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert mock_accept_invite.call_count == 1
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_new_user_accept_invite_calls_api_and_views_registration_page(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'Create an account'
email_in_page = page.find('main').find('p')
assert email_in_page.text.strip(
) == 'Your account will be created with this email: [email protected]'
form = page.find('form')
name = form.find('input', id='name')
password = form.find('input', id='password')
service = form.find('input', type='hidden', id='service')
email = form.find('input', type='hidden', id='email_address')
assert email
assert email.attrs['value'] == '[email protected]'
assert name
assert password
assert service
assert service.attrs['value'] == service_one['id']
<|reserved_special_token_0|>
def test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,
mocker, api_user_active, sample_invite, mock_get_user,
mock_accept_invite, mock_get_service):
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = logged_in_client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 403
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == '403'
flash_banners = page.find_all('div', class_='banner-dangerous')
assert len(flash_banners) == 1
banner_contents = flash_banners[0].text.strip()
assert 'You’re signed in as [email protected].' in banner_contents
assert 'This invite is for another email address.' in banner_contents
assert 'Sign out and click the link again to accept this invite.' in banner_contents
assert mock_accept_invite.call_count == 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(
client, service_one, api_user_active, sample_invite, mock_get_service,
mock_check_invite_token, mock_get_user_by_email,
mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):
expected_service = service_one['id']
expected_redirect_location = ('http://localhost/services/{}/dashboard'.
format(expected_service))
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
assert mock_accept_invite.call_count == 1
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_existing_user_with_no_permissions_accept_invite(client, mocker,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_get_service):
expected_service = service_one['id']
sample_invite['permissions'] = ''
expected_permissions = []
mocker.patch('app.invite_api_client.accept_invite', return_value=
sample_invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
def test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,
mocker, sample_invite, mock_get_service):
sample_invite['status'] = 'accepted'
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_existing_user_of_service_get_redirected_to_signin(client, mocker,
api_user_active, sample_invite, mock_get_service,
mock_get_user_by_email, mock_accept_invite):
sample_invite['email_address'] = api_user_active.email_address
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
assert mock_accept_invite.call_count == 1
def test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_accept_invite, mock_get_service):
expected_service = service_one['id']
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert mock_accept_invite.call_count == 1
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_new_user_accept_invite_calls_api_and_views_registration_page(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'Create an account'
email_in_page = page.find('main').find('p')
assert email_in_page.text.strip(
) == 'Your account will be created with this email: [email protected]'
form = page.find('form')
name = form.find('input', id='name')
password = form.find('input', id='password')
service = form.find('input', type='hidden', id='service')
email = form.find('input', type='hidden', id='email_address')
assert email
assert email.attrs['value'] == '[email protected]'
assert name
assert password
assert service
assert service.attrs['value'] == service_one['id']
def test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(
client, service_one, mocker, mock_get_user, mock_get_service):
cancelled_invitation = create_sample_invite(mocker, service_one, status
='cancelled')
mock_check_token_invite(mocker, cancelled_invitation)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip(
) == 'The invitation you were sent has been cancelled'
<|reserved_special_token_0|>
def test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,
mocker, api_user_active, sample_invite, mock_get_user,
mock_accept_invite, mock_get_service):
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = logged_in_client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 403
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == '403'
flash_banners = page.find_all('div', class_='banner-dangerous')
assert len(flash_banners) == 1
banner_contents = flash_banners[0].text.strip()
assert 'You’re signed in as [email protected].' in banner_contents
assert 'This invite is for another email address.' in banner_contents
assert 'Sign out and click the link again to accept this invite.' in banner_contents
assert mock_accept_invite.call_count == 0
def test_new_invited_user_verifies_and_added_to_service(client, service_one,
sample_invite, api_user_active, mock_check_invite_token,
mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,
mock_send_verify_code, mock_check_verify_code, mock_get_user,
mock_update_user, mock_add_user_to_service, mock_accept_invite,
mock_get_service, mock_get_service_templates,
mock_get_template_statistics, mock_get_jobs, mock_has_permissions,
mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
data = {'service': sample_invite['service'], 'email_address':
sample_invite['email_address'], 'from_user': sample_invite[
'from_user'], 'password': 'longpassword', 'mobile_number':
'+447890123456', 'name': 'Invited User'}
response = client.post(url_for('main.register_from_invite'), data=data)
response = client.post(url_for('main.verify'), data={'sms_code':
'12345'}, follow_redirects=True)
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
with client.session_transaction() as session:
new_user_id = session['user_id']
mock_add_user_to_service.assert_called_with(data['service'],
new_user_id, expected_permissions)
mock_accept_invite.assert_called_with(data['service'],
sample_invite['id'])
mock_check_verify_code.assert_called_once_with(new_user_id, '12345',
'sms')
assert service_one['id'] == session['service_id']
raw_html = response.data.decode('utf-8')
page = BeautifulSoup(raw_html, 'html.parser')
assert page.find('h1').text == 'Dashboard'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(
client, service_one, api_user_active, sample_invite, mock_get_service,
mock_check_invite_token, mock_get_user_by_email,
mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):
expected_service = service_one['id']
expected_redirect_location = ('http://localhost/services/{}/dashboard'.
format(expected_service))
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
assert mock_accept_invite.call_count == 1
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_existing_user_with_no_permissions_accept_invite(client, mocker,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_get_service):
expected_service = service_one['id']
sample_invite['permissions'] = ''
expected_permissions = []
mocker.patch('app.invite_api_client.accept_invite', return_value=
sample_invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
def test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,
mocker, sample_invite, mock_get_service):
sample_invite['status'] = 'accepted'
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_existing_user_of_service_get_redirected_to_signin(client, mocker,
api_user_active, sample_invite, mock_get_service,
mock_get_user_by_email, mock_accept_invite):
sample_invite['email_address'] = api_user_active.email_address
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
assert mock_accept_invite.call_count == 1
def test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_accept_invite, mock_get_service):
expected_service = service_one['id']
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert mock_accept_invite.call_count == 1
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_new_user_accept_invite_calls_api_and_views_registration_page(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'Create an account'
email_in_page = page.find('main').find('p')
assert email_in_page.text.strip(
) == 'Your account will be created with this email: [email protected]'
form = page.find('form')
name = form.find('input', id='name')
password = form.find('input', id='password')
service = form.find('input', type='hidden', id='service')
email = form.find('input', type='hidden', id='email_address')
assert email
assert email.attrs['value'] == '[email protected]'
assert name
assert password
assert service
assert service.attrs['value'] == service_one['id']
def test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(
client, service_one, mocker, mock_get_user, mock_get_service):
cancelled_invitation = create_sample_invite(mocker, service_one, status
='cancelled')
mock_check_token_invite(mocker, cancelled_invitation)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip(
) == 'The invitation you were sent has been cancelled'
def test_new_user_accept_invite_completes_new_registration_redirects_to_verify(
client, service_one, sample_invite, api_user_active,
mock_check_invite_token, mock_dont_get_user_by_email,
mock_is_email_unique, mock_register_user, mock_send_verify_code,
mock_accept_invite, mock_get_users_by_service, mock_add_user_to_service,
mock_get_service):
expected_service = service_one['id']
expected_email = sample_invite['email_address']
expected_from_user = service_one['users'][0]
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
with client.session_transaction() as session:
assert response.status_code == 302
assert response.location == expected_redirect_location
invited_user = session.get('invited_user')
assert invited_user
assert expected_service == invited_user['service']
assert expected_email == invited_user['email_address']
assert expected_from_user == invited_user['from_user']
data = {'service': invited_user['service'], 'email_address':
invited_user['email_address'], 'from_user': invited_user[
'from_user'], 'password': 'longpassword', 'mobile_number':
'+447890123456', 'name': 'Invited User'}
expected_redirect_location = 'http://localhost/verify'
response = client.post(url_for('main.register_from_invite'), data=data)
assert response.status_code == 302
assert response.location == expected_redirect_location
mock_send_verify_code.assert_called_once_with(ANY, 'sms', data[
'mobile_number'])
mock_register_user.assert_called_with(data['name'], data[
'email_address'], data['mobile_number'], data['password'])
assert mock_accept_invite.call_count == 1
def test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,
mocker, api_user_active, sample_invite, mock_get_user,
mock_accept_invite, mock_get_service):
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = logged_in_client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 403
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == '403'
flash_banners = page.find_all('div', class_='banner-dangerous')
assert len(flash_banners) == 1
banner_contents = flash_banners[0].text.strip()
assert 'You’re signed in as [email protected].' in banner_contents
assert 'This invite is for another email address.' in banner_contents
assert 'Sign out and click the link again to accept this invite.' in banner_contents
assert mock_accept_invite.call_count == 0
def test_new_invited_user_verifies_and_added_to_service(client, service_one,
sample_invite, api_user_active, mock_check_invite_token,
mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,
mock_send_verify_code, mock_check_verify_code, mock_get_user,
mock_update_user, mock_add_user_to_service, mock_accept_invite,
mock_get_service, mock_get_service_templates,
mock_get_template_statistics, mock_get_jobs, mock_has_permissions,
mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
data = {'service': sample_invite['service'], 'email_address':
sample_invite['email_address'], 'from_user': sample_invite[
'from_user'], 'password': 'longpassword', 'mobile_number':
'+447890123456', 'name': 'Invited User'}
response = client.post(url_for('main.register_from_invite'), data=data)
response = client.post(url_for('main.verify'), data={'sms_code':
'12345'}, follow_redirects=True)
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
with client.session_transaction() as session:
new_user_id = session['user_id']
mock_add_user_to_service.assert_called_with(data['service'],
new_user_id, expected_permissions)
mock_accept_invite.assert_called_with(data['service'],
sample_invite['id'])
mock_check_verify_code.assert_called_once_with(new_user_id, '12345',
'sms')
assert service_one['id'] == session['service_id']
raw_html = response.data.decode('utf-8')
page = BeautifulSoup(raw_html, 'html.parser')
assert page.find('h1').text == 'Dashboard'
<|reserved_special_token_1|>
from flask import url_for
from bs4 import BeautifulSoup
from unittest.mock import ANY
import app
from app.notify_client.models import InvitedUser
from tests.conftest import sample_invite as create_sample_invite
from tests.conftest import mock_check_invite_token as mock_check_token_invite
def test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(
client, service_one, api_user_active, sample_invite, mock_get_service,
mock_check_invite_token, mock_get_user_by_email,
mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):
expected_service = service_one['id']
expected_redirect_location = ('http://localhost/services/{}/dashboard'.
format(expected_service))
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
assert mock_accept_invite.call_count == 1
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_existing_user_with_no_permissions_accept_invite(client, mocker,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_get_service):
expected_service = service_one['id']
sample_invite['permissions'] = ''
expected_permissions = []
mocker.patch('app.invite_api_client.accept_invite', return_value=
sample_invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert response.status_code == 302
def test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,
mocker, sample_invite, mock_get_service):
sample_invite['status'] = 'accepted'
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_existing_user_of_service_get_redirected_to_signin(client, mocker,
api_user_active, sample_invite, mock_get_service,
mock_get_user_by_email, mock_accept_invite):
sample_invite['email_address'] = api_user_active.email_address
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
assert mock_accept_invite.call_count == 1
def test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,
service_one, api_user_active, sample_invite, mock_check_invite_token,
mock_get_user_by_email, mock_get_users_by_service,
mock_add_user_to_service, mock_accept_invite, mock_get_service):
expected_service = service_one['id']
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
mock_add_user_to_service.assert_called_with(expected_service,
api_user_active.id, expected_permissions)
assert mock_accept_invite.call_count == 1
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (page.h1.string, page.select('main p')[0].text.strip()) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.')
def test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_new_user_accept_invite_calls_api_and_views_registration_page(client,
service_one, mock_check_invite_token, mock_dont_get_user_by_email,
mock_add_user_to_service, mock_get_users_by_service, mock_get_service):
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'Create an account'
email_in_page = page.find('main').find('p')
assert email_in_page.text.strip(
) == 'Your account will be created with this email: [email protected]'
form = page.find('form')
name = form.find('input', id='name')
password = form.find('input', id='password')
service = form.find('input', type='hidden', id='service')
email = form.find('input', type='hidden', id='email_address')
assert email
assert email.attrs['value'] == '[email protected]'
assert name
assert password
assert service
assert service.attrs['value'] == service_one['id']
def test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(
client, service_one, mocker, mock_get_user, mock_get_service):
cancelled_invitation = create_sample_invite(mocker, service_one, status
='cancelled')
mock_check_token_invite(mocker, cancelled_invitation)
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip(
) == 'The invitation you were sent has been cancelled'
def test_new_user_accept_invite_completes_new_registration_redirects_to_verify(
client, service_one, sample_invite, api_user_active,
mock_check_invite_token, mock_dont_get_user_by_email,
mock_is_email_unique, mock_register_user, mock_send_verify_code,
mock_accept_invite, mock_get_users_by_service, mock_add_user_to_service,
mock_get_service):
expected_service = service_one['id']
expected_email = sample_invite['email_address']
expected_from_user = service_one['users'][0]
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
with client.session_transaction() as session:
assert response.status_code == 302
assert response.location == expected_redirect_location
invited_user = session.get('invited_user')
assert invited_user
assert expected_service == invited_user['service']
assert expected_email == invited_user['email_address']
assert expected_from_user == invited_user['from_user']
data = {'service': invited_user['service'], 'email_address':
invited_user['email_address'], 'from_user': invited_user[
'from_user'], 'password': 'longpassword', 'mobile_number':
'+447890123456', 'name': 'Invited User'}
expected_redirect_location = 'http://localhost/verify'
response = client.post(url_for('main.register_from_invite'), data=data)
assert response.status_code == 302
assert response.location == expected_redirect_location
mock_send_verify_code.assert_called_once_with(ANY, 'sms', data[
'mobile_number'])
mock_register_user.assert_called_with(data['name'], data[
'email_address'], data['mobile_number'], data['password'])
assert mock_accept_invite.call_count == 1
def test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,
mocker, api_user_active, sample_invite, mock_get_user,
mock_accept_invite, mock_get_service):
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=
[api_user_active])
response = logged_in_client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 403
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == '403'
flash_banners = page.find_all('div', class_='banner-dangerous')
assert len(flash_banners) == 1
banner_contents = flash_banners[0].text.strip()
assert 'You’re signed in as [email protected].' in banner_contents
assert 'This invite is for another email address.' in banner_contents
assert 'Sign out and click the link again to accept this invite.' in banner_contents
assert mock_accept_invite.call_count == 0
def test_new_invited_user_verifies_and_added_to_service(client, service_one,
sample_invite, api_user_active, mock_check_invite_token,
mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,
mock_send_verify_code, mock_check_verify_code, mock_get_user,
mock_update_user, mock_add_user_to_service, mock_accept_invite,
mock_get_service, mock_get_service_templates,
mock_get_template_statistics, mock_get_jobs, mock_has_permissions,
mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):
response = client.get(url_for('main.accept_invite', token=
'thisisnotarealtoken'))
data = {'service': sample_invite['service'], 'email_address':
sample_invite['email_address'], 'from_user': sample_invite[
'from_user'], 'password': 'longpassword', 'mobile_number':
'+447890123456', 'name': 'Invited User'}
response = client.post(url_for('main.register_from_invite'), data=data)
response = client.post(url_for('main.verify'), data={'sms_code':
'12345'}, follow_redirects=True)
expected_permissions = ['send_messages', 'manage_service',
'manage_api_keys']
with client.session_transaction() as session:
new_user_id = session['user_id']
mock_add_user_to_service.assert_called_with(data['service'],
new_user_id, expected_permissions)
mock_accept_invite.assert_called_with(data['service'],
sample_invite['id'])
mock_check_verify_code.assert_called_once_with(new_user_id, '12345',
'sms')
assert service_one['id'] == session['service_id']
raw_html = response.data.decode('utf-8')
page = BeautifulSoup(raw_html, 'html.parser')
assert page.find('h1').text == 'Dashboard'
<|reserved_special_token_1|>
from flask import url_for
from bs4 import BeautifulSoup
from unittest.mock import ANY
import app
from app.notify_client.models import InvitedUser
from tests.conftest import sample_invite as create_sample_invite
from tests.conftest import mock_check_invite_token as mock_check_token_invite
def test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(
client,
service_one,
api_user_active,
sample_invite,
mock_get_service,
mock_check_invite_token,
mock_get_user_by_email,
mock_get_users_by_service,
mock_accept_invite,
mock_add_user_to_service,
):
expected_service = service_one['id']
expected_redirect_location = 'http://localhost/services/{}/dashboard'.format(expected_service)
expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
assert mock_accept_invite.call_count == 1
mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_existing_user_with_no_permissions_accept_invite(
client,
mocker,
service_one,
api_user_active,
sample_invite,
mock_check_invite_token,
mock_get_user_by_email,
mock_get_users_by_service,
mock_add_user_to_service,
mock_get_service,
):
expected_service = service_one['id']
sample_invite['permissions'] = ''
expected_permissions = []
mocker.patch('app.invite_api_client.accept_invite', return_value=sample_invite)
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)
assert response.status_code == 302
def test_if_existing_user_accepts_twice_they_redirect_to_sign_in(
client,
mocker,
sample_invite,
mock_get_service,
):
sample_invite['status'] = 'accepted'
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (
page.h1.string,
page.select('main p')[0].text.strip(),
) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.',
)
def test_existing_user_of_service_get_redirected_to_signin(
client,
mocker,
api_user_active,
sample_invite,
mock_get_service,
mock_get_user_by_email,
mock_accept_invite,
):
sample_invite['email_address'] = api_user_active.email_address
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=[api_user_active])
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (
page.h1.string,
page.select('main p')[0].text.strip(),
) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.',
)
assert mock_accept_invite.call_count == 1
def test_existing_signed_out_user_accept_invite_redirects_to_sign_in(
client,
service_one,
api_user_active,
sample_invite,
mock_check_invite_token,
mock_get_user_by_email,
mock_get_users_by_service,
mock_add_user_to_service,
mock_accept_invite,
mock_get_service,
):
expected_service = service_one['id']
expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('[email protected]')
mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)
assert mock_accept_invite.call_count == 1
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (
page.h1.string,
page.select('main p')[0].text.strip(),
) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.',
)
def test_new_user_accept_invite_calls_api_and_redirects_to_registration(
client,
service_one,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_add_user_to_service,
mock_get_users_by_service,
mock_get_service,
):
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_new_user_accept_invite_calls_api_and_views_registration_page(
client,
service_one,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_add_user_to_service,
mock_get_users_by_service,
mock_get_service,
):
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('[email protected]')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'Create an account'
email_in_page = page.find('main').find('p')
assert email_in_page.text.strip() == 'Your account will be created with this email: [email protected]' # noqa
form = page.find('form')
name = form.find('input', id='name')
password = form.find('input', id='password')
service = form.find('input', type='hidden', id='service')
email = form.find('input', type='hidden', id='email_address')
assert email
assert email.attrs['value'] == '[email protected]'
assert name
assert password
assert service
assert service.attrs['value'] == service_one['id']
def test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(
client,
service_one,
mocker,
mock_get_user,
mock_get_service,
):
cancelled_invitation = create_sample_invite(mocker, service_one, status='cancelled')
mock_check_token_invite(mocker, cancelled_invitation)
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'The invitation you were sent has been cancelled'
def test_new_user_accept_invite_completes_new_registration_redirects_to_verify(
client,
service_one,
sample_invite,
api_user_active,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_is_email_unique,
mock_register_user,
mock_send_verify_code,
mock_accept_invite,
mock_get_users_by_service,
mock_add_user_to_service,
mock_get_service,
):
expected_service = service_one['id']
expected_email = sample_invite['email_address']
expected_from_user = service_one['users'][0]
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
with client.session_transaction() as session:
assert response.status_code == 302
assert response.location == expected_redirect_location
invited_user = session.get('invited_user')
assert invited_user
assert expected_service == invited_user['service']
assert expected_email == invited_user['email_address']
assert expected_from_user == invited_user['from_user']
data = {'service': invited_user['service'],
'email_address': invited_user['email_address'],
'from_user': invited_user['from_user'],
'password': 'longpassword',
'mobile_number': '+447890123456',
'name': 'Invited User'
}
expected_redirect_location = 'http://localhost/verify'
response = client.post(url_for('main.register_from_invite'), data=data)
assert response.status_code == 302
assert response.location == expected_redirect_location
mock_send_verify_code.assert_called_once_with(ANY, 'sms', data['mobile_number'])
mock_register_user.assert_called_with(data['name'],
data['email_address'],
data['mobile_number'],
data['password'])
assert mock_accept_invite.call_count == 1
def test_signed_in_existing_user_cannot_use_anothers_invite(
logged_in_client,
mocker,
api_user_active,
sample_invite,
mock_get_user,
mock_accept_invite,
mock_get_service,
):
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=[api_user_active])
response = logged_in_client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 403
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == '403'
flash_banners = page.find_all('div', class_='banner-dangerous')
assert len(flash_banners) == 1
banner_contents = flash_banners[0].text.strip()
assert "You’re signed in as [email protected]." in banner_contents
assert "This invite is for another email address." in banner_contents
assert "Sign out and click the link again to accept this invite." in banner_contents
assert mock_accept_invite.call_count == 0
def test_new_invited_user_verifies_and_added_to_service(
client,
service_one,
sample_invite,
api_user_active,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_is_email_unique,
mock_register_user,
mock_send_verify_code,
mock_check_verify_code,
mock_get_user,
mock_update_user,
mock_add_user_to_service,
mock_accept_invite,
mock_get_service,
mock_get_service_templates,
mock_get_template_statistics,
mock_get_jobs,
mock_has_permissions,
mock_get_users_by_service,
mock_get_detailed_service,
mock_get_usage,
):
# visit accept token page
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
data = {'service': sample_invite['service'],
'email_address': sample_invite['email_address'],
'from_user': sample_invite['from_user'],
'password': 'longpassword',
'mobile_number': '+447890123456',
'name': 'Invited User'
}
# get redirected to register from invite
response = client.post(url_for('main.register_from_invite'), data=data)
# that sends user on to verify
response = client.post(url_for('main.verify'), data={'sms_code': '12345'}, follow_redirects=True)
# when they post codes back to admin user should be added to
# service and sent on to dash board
expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']
with client.session_transaction() as session:
new_user_id = session['user_id']
mock_add_user_to_service.assert_called_with(data['service'], new_user_id, expected_permissions)
mock_accept_invite.assert_called_with(data['service'], sample_invite['id'])
mock_check_verify_code.assert_called_once_with(new_user_id, '12345', 'sms')
assert service_one['id'] == session['service_id']
raw_html = response.data.decode('utf-8')
page = BeautifulSoup(raw_html, 'html.parser')
assert page.find('h1').text == 'Dashboard'
|
flexible
|
{
"blob_id": "0baa133bd9eb8a162a82b23ba4d26cdd34f701c4",
"index": 1507,
"step-1": "<mask token>\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client, service_one, api_user_active, sample_invite, mock_get_service,\n mock_check_invite_token, mock_get_user_by_email,\n mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):\n expected_service = service_one['id']\n expected_redirect_location = ('http://localhost/services/{}/dashboard'.\n format(expected_service))\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(client, mocker,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_get_service):\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=\n sample_invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,\n mocker, sample_invite, mock_get_service):\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(client, mocker,\n api_user_active, sample_invite, mock_get_service,\n mock_get_user_by_email, mock_accept_invite):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_accept_invite, mock_get_service):\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip(\n ) == 'Your account will be created with this email: [email protected]'\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n assert email\n assert email.attrs['value'] == '[email protected]'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\n<mask token>\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,\n mocker, api_user_active, sample_invite, mock_get_user,\n mock_accept_invite, mock_get_service):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = logged_in_client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert 'You’re signed in as [email protected].' in banner_contents\n assert 'This invite is for another email address.' in banner_contents\n assert 'Sign out and click the link again to accept this invite.' in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client, service_one, api_user_active, sample_invite, mock_get_service,\n mock_check_invite_token, mock_get_user_by_email,\n mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):\n expected_service = service_one['id']\n expected_redirect_location = ('http://localhost/services/{}/dashboard'.\n format(expected_service))\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(client, mocker,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_get_service):\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=\n sample_invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,\n mocker, sample_invite, mock_get_service):\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(client, mocker,\n api_user_active, sample_invite, mock_get_service,\n mock_get_user_by_email, mock_accept_invite):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_accept_invite, mock_get_service):\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip(\n ) == 'Your account will be created with this email: [email protected]'\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n assert email\n assert email.attrs['value'] == '[email protected]'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\ndef test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(\n client, service_one, mocker, mock_get_user, mock_get_service):\n cancelled_invitation = create_sample_invite(mocker, service_one, status\n ='cancelled')\n mock_check_token_invite(mocker, cancelled_invitation)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip(\n ) == 'The invitation you were sent has been cancelled'\n\n\n<mask token>\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,\n mocker, api_user_active, sample_invite, mock_get_user,\n mock_accept_invite, mock_get_service):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = logged_in_client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert 'You’re signed in as [email protected].' in banner_contents\n assert 'This invite is for another email address.' in banner_contents\n assert 'Sign out and click the link again to accept this invite.' in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\ndef test_new_invited_user_verifies_and_added_to_service(client, service_one,\n sample_invite, api_user_active, mock_check_invite_token,\n mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,\n mock_send_verify_code, mock_check_verify_code, mock_get_user,\n mock_update_user, mock_add_user_to_service, mock_accept_invite,\n mock_get_service, mock_get_service_templates,\n mock_get_template_statistics, mock_get_jobs, mock_has_permissions,\n mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n data = {'service': sample_invite['service'], 'email_address':\n sample_invite['email_address'], 'from_user': sample_invite[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n response = client.post(url_for('main.register_from_invite'), data=data)\n response = client.post(url_for('main.verify'), data={'sms_code':\n '12345'}, follow_redirects=True)\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n with client.session_transaction() as session:\n new_user_id = session['user_id']\n mock_add_user_to_service.assert_called_with(data['service'],\n new_user_id, expected_permissions)\n mock_accept_invite.assert_called_with(data['service'],\n sample_invite['id'])\n mock_check_verify_code.assert_called_once_with(new_user_id, '12345',\n 'sms')\n assert service_one['id'] == session['service_id']\n raw_html = response.data.decode('utf-8')\n page = BeautifulSoup(raw_html, 'html.parser')\n assert page.find('h1').text == 'Dashboard'\n",
"step-3": "<mask token>\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client, service_one, api_user_active, sample_invite, mock_get_service,\n mock_check_invite_token, mock_get_user_by_email,\n mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):\n expected_service = service_one['id']\n expected_redirect_location = ('http://localhost/services/{}/dashboard'.\n format(expected_service))\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(client, mocker,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_get_service):\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=\n sample_invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,\n mocker, sample_invite, mock_get_service):\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(client, mocker,\n api_user_active, sample_invite, mock_get_service,\n mock_get_user_by_email, mock_accept_invite):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_accept_invite, mock_get_service):\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip(\n ) == 'Your account will be created with this email: [email protected]'\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n assert email\n assert email.attrs['value'] == '[email protected]'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\ndef test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(\n client, service_one, mocker, mock_get_user, mock_get_service):\n cancelled_invitation = create_sample_invite(mocker, service_one, status\n ='cancelled')\n mock_check_token_invite(mocker, cancelled_invitation)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip(\n ) == 'The invitation you were sent has been cancelled'\n\n\ndef test_new_user_accept_invite_completes_new_registration_redirects_to_verify(\n client, service_one, sample_invite, api_user_active,\n mock_check_invite_token, mock_dont_get_user_by_email,\n mock_is_email_unique, mock_register_user, mock_send_verify_code,\n mock_accept_invite, mock_get_users_by_service, mock_add_user_to_service,\n mock_get_service):\n expected_service = service_one['id']\n expected_email = sample_invite['email_address']\n expected_from_user = service_one['users'][0]\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n with client.session_transaction() as session:\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n invited_user = session.get('invited_user')\n assert invited_user\n assert expected_service == invited_user['service']\n assert expected_email == invited_user['email_address']\n assert expected_from_user == invited_user['from_user']\n data = {'service': invited_user['service'], 'email_address':\n invited_user['email_address'], 'from_user': invited_user[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n expected_redirect_location = 'http://localhost/verify'\n response = client.post(url_for('main.register_from_invite'), data=data)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n mock_send_verify_code.assert_called_once_with(ANY, 'sms', data[\n 'mobile_number'])\n mock_register_user.assert_called_with(data['name'], data[\n 'email_address'], data['mobile_number'], data['password'])\n assert mock_accept_invite.call_count == 1\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,\n mocker, api_user_active, sample_invite, mock_get_user,\n mock_accept_invite, mock_get_service):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = logged_in_client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert 'You’re signed in as [email protected].' in banner_contents\n assert 'This invite is for another email address.' in banner_contents\n assert 'Sign out and click the link again to accept this invite.' in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\ndef test_new_invited_user_verifies_and_added_to_service(client, service_one,\n sample_invite, api_user_active, mock_check_invite_token,\n mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,\n mock_send_verify_code, mock_check_verify_code, mock_get_user,\n mock_update_user, mock_add_user_to_service, mock_accept_invite,\n mock_get_service, mock_get_service_templates,\n mock_get_template_statistics, mock_get_jobs, mock_has_permissions,\n mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n data = {'service': sample_invite['service'], 'email_address':\n sample_invite['email_address'], 'from_user': sample_invite[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n response = client.post(url_for('main.register_from_invite'), data=data)\n response = client.post(url_for('main.verify'), data={'sms_code':\n '12345'}, follow_redirects=True)\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n with client.session_transaction() as session:\n new_user_id = session['user_id']\n mock_add_user_to_service.assert_called_with(data['service'],\n new_user_id, expected_permissions)\n mock_accept_invite.assert_called_with(data['service'],\n sample_invite['id'])\n mock_check_verify_code.assert_called_once_with(new_user_id, '12345',\n 'sms')\n assert service_one['id'] == session['service_id']\n raw_html = response.data.decode('utf-8')\n page = BeautifulSoup(raw_html, 'html.parser')\n assert page.find('h1').text == 'Dashboard'\n",
"step-4": "from flask import url_for\nfrom bs4 import BeautifulSoup\nfrom unittest.mock import ANY\nimport app\nfrom app.notify_client.models import InvitedUser\nfrom tests.conftest import sample_invite as create_sample_invite\nfrom tests.conftest import mock_check_invite_token as mock_check_token_invite\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client, service_one, api_user_active, sample_invite, mock_get_service,\n mock_check_invite_token, mock_get_user_by_email,\n mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):\n expected_service = service_one['id']\n expected_redirect_location = ('http://localhost/services/{}/dashboard'.\n format(expected_service))\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(client, mocker,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_get_service):\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=\n sample_invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,\n mocker, sample_invite, mock_get_service):\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(client, mocker,\n api_user_active, sample_invite, mock_get_service,\n mock_get_user_by_email, mock_accept_invite):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_accept_invite, mock_get_service):\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip(\n ) == 'Your account will be created with this email: [email protected]'\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n assert email\n assert email.attrs['value'] == '[email protected]'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\ndef test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(\n client, service_one, mocker, mock_get_user, mock_get_service):\n cancelled_invitation = create_sample_invite(mocker, service_one, status\n ='cancelled')\n mock_check_token_invite(mocker, cancelled_invitation)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip(\n ) == 'The invitation you were sent has been cancelled'\n\n\ndef test_new_user_accept_invite_completes_new_registration_redirects_to_verify(\n client, service_one, sample_invite, api_user_active,\n mock_check_invite_token, mock_dont_get_user_by_email,\n mock_is_email_unique, mock_register_user, mock_send_verify_code,\n mock_accept_invite, mock_get_users_by_service, mock_add_user_to_service,\n mock_get_service):\n expected_service = service_one['id']\n expected_email = sample_invite['email_address']\n expected_from_user = service_one['users'][0]\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n with client.session_transaction() as session:\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n invited_user = session.get('invited_user')\n assert invited_user\n assert expected_service == invited_user['service']\n assert expected_email == invited_user['email_address']\n assert expected_from_user == invited_user['from_user']\n data = {'service': invited_user['service'], 'email_address':\n invited_user['email_address'], 'from_user': invited_user[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n expected_redirect_location = 'http://localhost/verify'\n response = client.post(url_for('main.register_from_invite'), data=data)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n mock_send_verify_code.assert_called_once_with(ANY, 'sms', data[\n 'mobile_number'])\n mock_register_user.assert_called_with(data['name'], data[\n 'email_address'], data['mobile_number'], data['password'])\n assert mock_accept_invite.call_count == 1\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,\n mocker, api_user_active, sample_invite, mock_get_user,\n mock_accept_invite, mock_get_service):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = logged_in_client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert 'You’re signed in as [email protected].' in banner_contents\n assert 'This invite is for another email address.' in banner_contents\n assert 'Sign out and click the link again to accept this invite.' in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\ndef test_new_invited_user_verifies_and_added_to_service(client, service_one,\n sample_invite, api_user_active, mock_check_invite_token,\n mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,\n mock_send_verify_code, mock_check_verify_code, mock_get_user,\n mock_update_user, mock_add_user_to_service, mock_accept_invite,\n mock_get_service, mock_get_service_templates,\n mock_get_template_statistics, mock_get_jobs, mock_has_permissions,\n mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n data = {'service': sample_invite['service'], 'email_address':\n sample_invite['email_address'], 'from_user': sample_invite[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n response = client.post(url_for('main.register_from_invite'), data=data)\n response = client.post(url_for('main.verify'), data={'sms_code':\n '12345'}, follow_redirects=True)\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n with client.session_transaction() as session:\n new_user_id = session['user_id']\n mock_add_user_to_service.assert_called_with(data['service'],\n new_user_id, expected_permissions)\n mock_accept_invite.assert_called_with(data['service'],\n sample_invite['id'])\n mock_check_verify_code.assert_called_once_with(new_user_id, '12345',\n 'sms')\n assert service_one['id'] == session['service_id']\n raw_html = response.data.decode('utf-8')\n page = BeautifulSoup(raw_html, 'html.parser')\n assert page.find('h1').text == 'Dashboard'\n",
"step-5": "from flask import url_for\nfrom bs4 import BeautifulSoup\nfrom unittest.mock import ANY\n\nimport app\n\nfrom app.notify_client.models import InvitedUser\nfrom tests.conftest import sample_invite as create_sample_invite\nfrom tests.conftest import mock_check_invite_token as mock_check_token_invite\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client,\n service_one,\n api_user_active,\n sample_invite,\n mock_get_service,\n mock_check_invite_token,\n mock_get_user_by_email,\n mock_get_users_by_service,\n mock_accept_invite,\n mock_add_user_to_service,\n):\n\n expected_service = service_one['id']\n expected_redirect_location = 'http://localhost/services/{}/dashboard'.format(expected_service)\n expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)\n\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(\n client,\n mocker,\n service_one,\n api_user_active,\n sample_invite,\n mock_check_invite_token,\n mock_get_user_by_email,\n mock_get_users_by_service,\n mock_add_user_to_service,\n mock_get_service,\n):\n\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=sample_invite)\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)\n\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(\n client,\n mocker,\n sample_invite,\n mock_get_service,\n):\n\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (\n page.h1.string,\n page.select('main p')[0].text.strip(),\n ) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.',\n )\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(\n client,\n mocker,\n api_user_active,\n sample_invite,\n mock_get_service,\n mock_get_user_by_email,\n mock_accept_invite,\n):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=[api_user_active])\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (\n page.h1.string,\n page.select('main p')[0].text.strip(),\n ) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.',\n )\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(\n client,\n service_one,\n api_user_active,\n sample_invite,\n mock_check_invite_token,\n mock_get_user_by_email,\n mock_get_users_by_service,\n mock_add_user_to_service,\n mock_accept_invite,\n mock_get_service,\n):\n\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('[email protected]')\n mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (\n page.h1.string,\n page.select('main p')[0].text.strip(),\n ) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.',\n )\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(\n client,\n service_one,\n mock_check_invite_token,\n mock_dont_get_user_by_email,\n mock_add_user_to_service,\n mock_get_users_by_service,\n mock_get_service,\n):\n\n expected_redirect_location = 'http://localhost/register-from-invite'\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(\n client,\n service_one,\n mock_check_invite_token,\n mock_dont_get_user_by_email,\n mock_add_user_to_service,\n mock_get_users_by_service,\n mock_get_service,\n):\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('[email protected]')\n\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip() == 'Your account will be created with this email: [email protected]' # noqa\n\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n\n assert email\n assert email.attrs['value'] == '[email protected]'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\ndef test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(\n client,\n service_one,\n mocker,\n mock_get_user,\n mock_get_service,\n):\n cancelled_invitation = create_sample_invite(mocker, service_one, status='cancelled')\n mock_check_token_invite(mocker, cancelled_invitation)\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n\n app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'The invitation you were sent has been cancelled'\n\n\ndef test_new_user_accept_invite_completes_new_registration_redirects_to_verify(\n client,\n service_one,\n sample_invite,\n api_user_active,\n mock_check_invite_token,\n mock_dont_get_user_by_email,\n mock_is_email_unique,\n mock_register_user,\n mock_send_verify_code,\n mock_accept_invite,\n mock_get_users_by_service,\n mock_add_user_to_service,\n mock_get_service,\n):\n\n expected_service = service_one['id']\n expected_email = sample_invite['email_address']\n expected_from_user = service_one['users'][0]\n expected_redirect_location = 'http://localhost/register-from-invite'\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n with client.session_transaction() as session:\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n invited_user = session.get('invited_user')\n assert invited_user\n assert expected_service == invited_user['service']\n assert expected_email == invited_user['email_address']\n assert expected_from_user == invited_user['from_user']\n\n data = {'service': invited_user['service'],\n 'email_address': invited_user['email_address'],\n 'from_user': invited_user['from_user'],\n 'password': 'longpassword',\n 'mobile_number': '+447890123456',\n 'name': 'Invited User'\n }\n\n expected_redirect_location = 'http://localhost/verify'\n response = client.post(url_for('main.register_from_invite'), data=data)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n mock_send_verify_code.assert_called_once_with(ANY, 'sms', data['mobile_number'])\n\n mock_register_user.assert_called_with(data['name'],\n data['email_address'],\n data['mobile_number'],\n data['password'])\n\n assert mock_accept_invite.call_count == 1\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(\n logged_in_client,\n mocker,\n api_user_active,\n sample_invite,\n mock_get_user,\n mock_accept_invite,\n mock_get_service,\n):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=[api_user_active])\n\n response = logged_in_client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert \"You’re signed in as [email protected].\" in banner_contents\n assert \"This invite is for another email address.\" in banner_contents\n assert \"Sign out and click the link again to accept this invite.\" in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\ndef test_new_invited_user_verifies_and_added_to_service(\n client,\n service_one,\n sample_invite,\n api_user_active,\n mock_check_invite_token,\n mock_dont_get_user_by_email,\n mock_is_email_unique,\n mock_register_user,\n mock_send_verify_code,\n mock_check_verify_code,\n mock_get_user,\n mock_update_user,\n mock_add_user_to_service,\n mock_accept_invite,\n mock_get_service,\n mock_get_service_templates,\n mock_get_template_statistics,\n mock_get_jobs,\n mock_has_permissions,\n mock_get_users_by_service,\n mock_get_detailed_service,\n mock_get_usage,\n):\n\n # visit accept token page\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n data = {'service': sample_invite['service'],\n 'email_address': sample_invite['email_address'],\n 'from_user': sample_invite['from_user'],\n 'password': 'longpassword',\n 'mobile_number': '+447890123456',\n 'name': 'Invited User'\n }\n\n # get redirected to register from invite\n response = client.post(url_for('main.register_from_invite'), data=data)\n\n # that sends user on to verify\n response = client.post(url_for('main.verify'), data={'sms_code': '12345'}, follow_redirects=True)\n\n # when they post codes back to admin user should be added to\n # service and sent on to dash board\n expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']\n\n with client.session_transaction() as session:\n new_user_id = session['user_id']\n mock_add_user_to_service.assert_called_with(data['service'], new_user_id, expected_permissions)\n mock_accept_invite.assert_called_with(data['service'], sample_invite['id'])\n mock_check_verify_code.assert_called_once_with(new_user_id, '12345', 'sms')\n assert service_one['id'] == session['service_id']\n\n raw_html = response.data.decode('utf-8')\n page = BeautifulSoup(raw_html, 'html.parser')\n assert page.find('h1').text == 'Dashboard'\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def iterative_train_test(X, y, test_size):
"""
Iteratively splits data with stratification.
This function is based on the iterative_train_test_split function from the
skmultilearn.model_selection package, but uses pandas dataframes as input and output.
Parameters
----------
X : pandas dataframe
Data samples.
y : array or sparse matrix
Indicator matrix.
test_size : float [0,1]
The proportion of the dataset to include in the test split, the rest will be put in the train set.
Returns
-------
X_train : pandas dataframe
Training samples.
y_train : array or sparse matrix
Indicator matrix of the training samples.
X_test : pandas dataframe
Test samples.
y_test : array or sparse matrix
Indicator matrix of the test samples.
"""
stratifier = IterativeStratification(n_splits=2, order=2,
sample_distribution_per_fold=[test_size, 1.0 - test_size])
train_indexes, test_indexes = next(stratifier.split(X, y))
X_train, y_train = X.iloc[train_indexes], y[train_indexes]
X_test, y_test = X.iloc[test_indexes], y[test_indexes]
return X_train, y_train, X_test, y_test
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from skmultilearn.model_selection import IterativeStratification
def iterative_train_test(X, y, test_size):
"""
Iteratively splits data with stratification.
This function is based on the iterative_train_test_split function from the
skmultilearn.model_selection package, but uses pandas dataframes as input and output.
Parameters
----------
X : pandas dataframe
Data samples.
y : array or sparse matrix
Indicator matrix.
test_size : float [0,1]
The proportion of the dataset to include in the test split, the rest will be put in the train set.
Returns
-------
X_train : pandas dataframe
Training samples.
y_train : array or sparse matrix
Indicator matrix of the training samples.
X_test : pandas dataframe
Test samples.
y_test : array or sparse matrix
Indicator matrix of the test samples.
"""
stratifier = IterativeStratification(n_splits=2, order=2,
sample_distribution_per_fold=[test_size, 1.0 - test_size])
train_indexes, test_indexes = next(stratifier.split(X, y))
X_train, y_train = X.iloc[train_indexes], y[train_indexes]
X_test, y_test = X.iloc[test_indexes], y[test_indexes]
return X_train, y_train, X_test, y_test
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
""" This module provides a function for splitting datasets."""
from skmultilearn.model_selection import IterativeStratification
def iterative_train_test(X, y, test_size):
"""
Iteratively splits data with stratification.
This function is based on the iterative_train_test_split function from the
skmultilearn.model_selection package, but uses pandas dataframes as input and output.
Parameters
----------
X : pandas dataframe
Data samples.
y : array or sparse matrix
Indicator matrix.
test_size : float [0,1]
The proportion of the dataset to include in the test split, the rest will be put in the train set.
Returns
-------
X_train : pandas dataframe
Training samples.
y_train : array or sparse matrix
Indicator matrix of the training samples.
X_test : pandas dataframe
Test samples.
y_test : array or sparse matrix
Indicator matrix of the test samples.
"""
stratifier = IterativeStratification(n_splits=2, order=2, sample_distribution_per_fold=[test_size, 1.0-test_size])
train_indexes, test_indexes = next(stratifier.split(X, y))
X_train, y_train = X.iloc[train_indexes], y[train_indexes]
X_test, y_test = X.iloc[test_indexes], y[test_indexes]
return X_train, y_train, X_test, y_test
|
flexible
|
{
"blob_id": "c4c068c7b50d1811f224701ad7e95d88f6734230",
"index": 2867,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef iterative_train_test(X, y, test_size):\n \"\"\"\n Iteratively splits data with stratification.\n\n This function is based on the iterative_train_test_split function from the\n skmultilearn.model_selection package, but uses pandas dataframes as input and output.\n\n Parameters\n ----------\n X : pandas dataframe\n Data samples.\n y : array or sparse matrix\n Indicator matrix.\n test_size : float [0,1]\n The proportion of the dataset to include in the test split, the rest will be put in the train set.\n\n Returns\n -------\n X_train : pandas dataframe\n Training samples.\n y_train : array or sparse matrix\n Indicator matrix of the training samples.\n X_test : pandas dataframe\n Test samples.\n y_test : array or sparse matrix\n Indicator matrix of the test samples.\n\n \"\"\"\n stratifier = IterativeStratification(n_splits=2, order=2,\n sample_distribution_per_fold=[test_size, 1.0 - test_size])\n train_indexes, test_indexes = next(stratifier.split(X, y))\n X_train, y_train = X.iloc[train_indexes], y[train_indexes]\n X_test, y_test = X.iloc[test_indexes], y[test_indexes]\n return X_train, y_train, X_test, y_test\n",
"step-3": "<mask token>\nfrom skmultilearn.model_selection import IterativeStratification\n\n\ndef iterative_train_test(X, y, test_size):\n \"\"\"\n Iteratively splits data with stratification.\n\n This function is based on the iterative_train_test_split function from the\n skmultilearn.model_selection package, but uses pandas dataframes as input and output.\n\n Parameters\n ----------\n X : pandas dataframe\n Data samples.\n y : array or sparse matrix\n Indicator matrix.\n test_size : float [0,1]\n The proportion of the dataset to include in the test split, the rest will be put in the train set.\n\n Returns\n -------\n X_train : pandas dataframe\n Training samples.\n y_train : array or sparse matrix\n Indicator matrix of the training samples.\n X_test : pandas dataframe\n Test samples.\n y_test : array or sparse matrix\n Indicator matrix of the test samples.\n\n \"\"\"\n stratifier = IterativeStratification(n_splits=2, order=2,\n sample_distribution_per_fold=[test_size, 1.0 - test_size])\n train_indexes, test_indexes = next(stratifier.split(X, y))\n X_train, y_train = X.iloc[train_indexes], y[train_indexes]\n X_test, y_test = X.iloc[test_indexes], y[test_indexes]\n return X_train, y_train, X_test, y_test\n",
"step-4": "# -*- coding: utf-8 -*-\n\"\"\" This module provides a function for splitting datasets.\"\"\"\n\nfrom skmultilearn.model_selection import IterativeStratification\n\ndef iterative_train_test(X, y, test_size):\n \"\"\"\n Iteratively splits data with stratification.\n\n This function is based on the iterative_train_test_split function from the\n skmultilearn.model_selection package, but uses pandas dataframes as input and output.\n\n Parameters\n ----------\n X : pandas dataframe\n Data samples.\n y : array or sparse matrix\n Indicator matrix.\n test_size : float [0,1]\n The proportion of the dataset to include in the test split, the rest will be put in the train set.\n\n Returns\n -------\n X_train : pandas dataframe\n Training samples.\n y_train : array or sparse matrix\n Indicator matrix of the training samples.\n X_test : pandas dataframe\n Test samples.\n y_test : array or sparse matrix\n Indicator matrix of the test samples.\n\n \"\"\"\n stratifier = IterativeStratification(n_splits=2, order=2, sample_distribution_per_fold=[test_size, 1.0-test_size])\n train_indexes, test_indexes = next(stratifier.split(X, y))\n\n X_train, y_train = X.iloc[train_indexes], y[train_indexes]\n X_test, y_test = X.iloc[test_indexes], y[test_indexes]\n\n return X_train, y_train, X_test, y_test\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Food(Turtle):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Food(Turtle):
def __init__(self):
super().__init__()
self.shape('circle')
self.penup()
self.color('red')
self.speed('fastest')
self.refresh()
def refresh(self):
self.color(random.choice(colors))
self.goto(random.randint(-280, 280), random.randint(-280, 280))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
colors = ['red', 'blue', 'green', 'peru', 'purple', 'pink', 'chocolate',
'grey', 'cyan', 'brown']
class Food(Turtle):
def __init__(self):
super().__init__()
self.shape('circle')
self.penup()
self.color('red')
self.speed('fastest')
self.refresh()
def refresh(self):
self.color(random.choice(colors))
self.goto(random.randint(-280, 280), random.randint(-280, 280))
<|reserved_special_token_1|>
import random
from turtle import Turtle
colors = ['red', 'blue', 'green', 'peru', 'purple', 'pink', 'chocolate',
'grey', 'cyan', 'brown']
class Food(Turtle):
def __init__(self):
super().__init__()
self.shape('circle')
self.penup()
self.color('red')
self.speed('fastest')
self.refresh()
def refresh(self):
self.color(random.choice(colors))
self.goto(random.randint(-280, 280), random.randint(-280, 280))
<|reserved_special_token_1|>
import random
from turtle import Turtle
colors = ["red", "blue", 'green', 'peru', 'purple', 'pink', 'chocolate', 'grey', 'cyan', 'brown']
class Food(Turtle):
def __init__(self):
super().__init__()
self.shape("circle")
self.penup()
self.color("red")
self.speed("fastest")
self.refresh()
def refresh(self):
self.color(random.choice(colors))
self.goto(random.randint(-280, 280), random.randint(-280, 280))
|
flexible
|
{
"blob_id": "8adda42dfebd3f394a1026720465824a836c1dd1",
"index": 7997,
"step-1": "<mask token>\n\n\nclass Food(Turtle):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Food(Turtle):\n\n def __init__(self):\n super().__init__()\n self.shape('circle')\n self.penup()\n self.color('red')\n self.speed('fastest')\n self.refresh()\n\n def refresh(self):\n self.color(random.choice(colors))\n self.goto(random.randint(-280, 280), random.randint(-280, 280))\n",
"step-3": "<mask token>\ncolors = ['red', 'blue', 'green', 'peru', 'purple', 'pink', 'chocolate',\n 'grey', 'cyan', 'brown']\n\n\nclass Food(Turtle):\n\n def __init__(self):\n super().__init__()\n self.shape('circle')\n self.penup()\n self.color('red')\n self.speed('fastest')\n self.refresh()\n\n def refresh(self):\n self.color(random.choice(colors))\n self.goto(random.randint(-280, 280), random.randint(-280, 280))\n",
"step-4": "import random\nfrom turtle import Turtle\ncolors = ['red', 'blue', 'green', 'peru', 'purple', 'pink', 'chocolate',\n 'grey', 'cyan', 'brown']\n\n\nclass Food(Turtle):\n\n def __init__(self):\n super().__init__()\n self.shape('circle')\n self.penup()\n self.color('red')\n self.speed('fastest')\n self.refresh()\n\n def refresh(self):\n self.color(random.choice(colors))\n self.goto(random.randint(-280, 280), random.randint(-280, 280))\n",
"step-5": "import random\nfrom turtle import Turtle\n\ncolors = [\"red\", \"blue\", 'green', 'peru', 'purple', 'pink', 'chocolate', 'grey', 'cyan', 'brown']\n\n\nclass Food(Turtle):\n\n def __init__(self):\n super().__init__()\n\n self.shape(\"circle\")\n self.penup()\n self.color(\"red\")\n self.speed(\"fastest\")\n self.refresh()\n\n def refresh(self):\n self.color(random.choice(colors))\n self.goto(random.randint(-280, 280), random.randint(-280, 280))\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Example(QWidget):
class A(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 300, 220)
self.setWindowTitle('Icon')
self.setWindowIcon(QIcon('web.png'))
self.show()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def create_table(self):
self.battle_table = QTableWidget()
self.battle_table.setColumnCount(8)
self.battle_table.setHorizontalHeaderLabels(['match_id', 'head',
'date', 'time', 'kill_count', 'death', 'support', 'score'])
self.battle_table.setAlternatingRowColors(True)
self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.battle_table.resizeRowsToContents()
self.battle_table.doubleClicked.connect(self.on_click)
<|reserved_special_token_0|>
def showDialog(self, match_id):
data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'
.format(match_id))
a = self.A()
<|reserved_special_token_0|>
def search(self):
print(self.user)
print(__name__)
runner = CrawlerRunner(get_project_settings())
print('a')
runner.crawl('JumpReport', user=self.user)
print(self.user)
d = runner.join()
d.addBoth(lambda _: reactor.stop())
reactor.run()
print('complete')
name = self.qle.text()
db = db_handle()
with db as con:
sql = (
"select * from player where name = '{}' order by update_time"
.format(name))
con.execute(sql)
player = con.fetchone()
if player:
(id, name, win, match_count, strength, level, update_time, rank
) = player
text = (
'角色名: {}\n胜场: {}\n总场数: {}\n团分: {}\n团分排行: {}\n等级: {}\n更新时间: {}'
.format(name, win, match_count, strength, rank, level,
update_time))
self.txt.setText(text)
sql = ("select * from player_data where name = '{}' order by date"
.format(name))
con.execute(sql)
player_data = con.fetchall()
a = ''
for data in player_data:
a += str(data)
a += '\n'
self.battle.setText(str(a))
sql = 'select * from game_data order by match_id desc'
con.execute(sql)
game_data = con.fetchall()
a = ''
l = 0
self.battle_table.setRowCount(len(game_data))
for data in game_data:
a += str(data[1:])
print(type(data))
for i in range(self.battle_table.columnCount()):
item = QTableWidgetItem(str(data[i + 1]))
item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.battle_table.setItem(l, i, item)
a += '\n'
self.player_status.setText(str(a))
l += 1
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class BatterReport(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.txt = QTextEdit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Example(QWidget):
class A(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 300, 220)
self.setWindowTitle('Icon')
self.setWindowIcon(QIcon('web.png'))
self.show()
<|reserved_special_token_0|>
def initUI(self):
self.qle = QLineEdit('蔽月八云')
self.user = self.qle.text()
self.para = 'user={}'.format(self.user)
print(self.user, '1')
btn = QPushButton('查询', self)
btn.resize(btn.sizeHint())
btn.clicked.connect(self.search)
self.txt = QTextEdit()
self.battle = QTextEdit()
self.player_status = QTextEdit()
self.create_table()
exitAction = QAction('Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('application')
exitAction.triggered.connect(qApp.quit)
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.qle, 1, 0)
grid.addWidget(btn, 2, 0)
grid.addWidget(self.txt, 3, 0)
grid.addWidget(self.battle, 1, 1, 3, 1)
grid.addWidget(self.player_status, 4, 0, 2, 2)
grid.addWidget(self.battle_table, 6, 0, 2, 2)
self.setLayout(grid)
self.setGeometry(600, 600, 800, 600)
self.center()
self.setWindowTitle('战绩查询')
self.show()
def create_table(self):
self.battle_table = QTableWidget()
self.battle_table.setColumnCount(8)
self.battle_table.setHorizontalHeaderLabels(['match_id', 'head',
'date', 'time', 'kill_count', 'death', 'support', 'score'])
self.battle_table.setAlternatingRowColors(True)
self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.battle_table.resizeRowsToContents()
self.battle_table.doubleClicked.connect(self.on_click)
<|reserved_special_token_0|>
def showDialog(self, match_id):
data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'
.format(match_id))
a = self.A()
<|reserved_special_token_0|>
def search(self):
print(self.user)
print(__name__)
runner = CrawlerRunner(get_project_settings())
print('a')
runner.crawl('JumpReport', user=self.user)
print(self.user)
d = runner.join()
d.addBoth(lambda _: reactor.stop())
reactor.run()
print('complete')
name = self.qle.text()
db = db_handle()
with db as con:
sql = (
"select * from player where name = '{}' order by update_time"
.format(name))
con.execute(sql)
player = con.fetchone()
if player:
(id, name, win, match_count, strength, level, update_time, rank
) = player
text = (
'角色名: {}\n胜场: {}\n总场数: {}\n团分: {}\n团分排行: {}\n等级: {}\n更新时间: {}'
.format(name, win, match_count, strength, rank, level,
update_time))
self.txt.setText(text)
sql = ("select * from player_data where name = '{}' order by date"
.format(name))
con.execute(sql)
player_data = con.fetchall()
a = ''
for data in player_data:
a += str(data)
a += '\n'
self.battle.setText(str(a))
sql = 'select * from game_data order by match_id desc'
con.execute(sql)
game_data = con.fetchall()
a = ''
l = 0
self.battle_table.setRowCount(len(game_data))
for data in game_data:
a += str(data[1:])
print(type(data))
for i in range(self.battle_table.columnCount()):
item = QTableWidgetItem(str(data[i + 1]))
item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.battle_table.setItem(l, i, item)
a += '\n'
self.player_status.setText(str(a))
l += 1
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class BatterReport(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.txt = QTextEdit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Example(QWidget):
class A(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 300, 220)
self.setWindowTitle('Icon')
self.setWindowIcon(QIcon('web.png'))
self.show()
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.qle = QLineEdit('蔽月八云')
self.user = self.qle.text()
self.para = 'user={}'.format(self.user)
print(self.user, '1')
btn = QPushButton('查询', self)
btn.resize(btn.sizeHint())
btn.clicked.connect(self.search)
self.txt = QTextEdit()
self.battle = QTextEdit()
self.player_status = QTextEdit()
self.create_table()
exitAction = QAction('Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('application')
exitAction.triggered.connect(qApp.quit)
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.qle, 1, 0)
grid.addWidget(btn, 2, 0)
grid.addWidget(self.txt, 3, 0)
grid.addWidget(self.battle, 1, 1, 3, 1)
grid.addWidget(self.player_status, 4, 0, 2, 2)
grid.addWidget(self.battle_table, 6, 0, 2, 2)
self.setLayout(grid)
self.setGeometry(600, 600, 800, 600)
self.center()
self.setWindowTitle('战绩查询')
self.show()
def create_table(self):
self.battle_table = QTableWidget()
self.battle_table.setColumnCount(8)
self.battle_table.setHorizontalHeaderLabels(['match_id', 'head',
'date', 'time', 'kill_count', 'death', 'support', 'score'])
self.battle_table.setAlternatingRowColors(True)
self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.battle_table.resizeRowsToContents()
self.battle_table.doubleClicked.connect(self.on_click)
@pyqtSlot()
def on_click(self):
currentQTableWidgetItem = self.battle_table.selectedItems()[0]
match_id = currentQTableWidgetItem.text()
print(match_id)
self.showDialog(match_id)
def showDialog(self, match_id):
data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'
.format(match_id))
a = self.A()
<|reserved_special_token_0|>
def search(self):
print(self.user)
print(__name__)
runner = CrawlerRunner(get_project_settings())
print('a')
runner.crawl('JumpReport', user=self.user)
print(self.user)
d = runner.join()
d.addBoth(lambda _: reactor.stop())
reactor.run()
print('complete')
name = self.qle.text()
db = db_handle()
with db as con:
sql = (
"select * from player where name = '{}' order by update_time"
.format(name))
con.execute(sql)
player = con.fetchone()
if player:
(id, name, win, match_count, strength, level, update_time, rank
) = player
text = (
'角色名: {}\n胜场: {}\n总场数: {}\n团分: {}\n团分排行: {}\n等级: {}\n更新时间: {}'
.format(name, win, match_count, strength, rank, level,
update_time))
self.txt.setText(text)
sql = ("select * from player_data where name = '{}' order by date"
.format(name))
con.execute(sql)
player_data = con.fetchall()
a = ''
for data in player_data:
a += str(data)
a += '\n'
self.battle.setText(str(a))
sql = 'select * from game_data order by match_id desc'
con.execute(sql)
game_data = con.fetchall()
a = ''
l = 0
self.battle_table.setRowCount(len(game_data))
for data in game_data:
a += str(data[1:])
print(type(data))
for i in range(self.battle_table.columnCount()):
item = QTableWidgetItem(str(data[i + 1]))
item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.battle_table.setItem(l, i, item)
a += '\n'
self.player_status.setText(str(a))
l += 1
<|reserved_special_token_0|>
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message', 'Quit?', QMessageBox.
Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
class BatterReport(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.txt = QTextEdit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon, QFont
from PyQt5.QtCore import QCoreApplication
import pymysql
import requests
from twisted.internet import reactor, defer
from scrapy.crawler import CrawlerRunner, CrawlerProcess
from scrapy.utils.project import get_project_settings
from spider.jump_300heroes.jump_300heroes.spiders.my_report import JumpReport
from scrapy.settings import Settings
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from multiprocessing import Process
def db_handle():
con = pymysql.connect(host='localhost', user='web', passwd='web',
charset='utf8', database='heroes')
return con
class Example(QWidget):
class A(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 300, 220)
self.setWindowTitle('Icon')
self.setWindowIcon(QIcon('web.png'))
self.show()
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.qle = QLineEdit('蔽月八云')
self.user = self.qle.text()
self.para = 'user={}'.format(self.user)
print(self.user, '1')
btn = QPushButton('查询', self)
btn.resize(btn.sizeHint())
btn.clicked.connect(self.search)
self.txt = QTextEdit()
self.battle = QTextEdit()
self.player_status = QTextEdit()
self.create_table()
exitAction = QAction('Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('application')
exitAction.triggered.connect(qApp.quit)
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.qle, 1, 0)
grid.addWidget(btn, 2, 0)
grid.addWidget(self.txt, 3, 0)
grid.addWidget(self.battle, 1, 1, 3, 1)
grid.addWidget(self.player_status, 4, 0, 2, 2)
grid.addWidget(self.battle_table, 6, 0, 2, 2)
self.setLayout(grid)
self.setGeometry(600, 600, 800, 600)
self.center()
self.setWindowTitle('战绩查询')
self.show()
def create_table(self):
self.battle_table = QTableWidget()
self.battle_table.setColumnCount(8)
self.battle_table.setHorizontalHeaderLabels(['match_id', 'head',
'date', 'time', 'kill_count', 'death', 'support', 'score'])
self.battle_table.setAlternatingRowColors(True)
self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.battle_table.resizeRowsToContents()
self.battle_table.doubleClicked.connect(self.on_click)
@pyqtSlot()
def on_click(self):
currentQTableWidgetItem = self.battle_table.selectedItems()[0]
match_id = currentQTableWidgetItem.text()
print(match_id)
self.showDialog(match_id)
def showDialog(self, match_id):
data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'
.format(match_id))
a = self.A()
def searchd(self):
if __name__ == '__main__':
p = Process(target=self.a)
p.start()
p.join()
def search(self):
print(self.user)
print(__name__)
runner = CrawlerRunner(get_project_settings())
print('a')
runner.crawl('JumpReport', user=self.user)
print(self.user)
d = runner.join()
d.addBoth(lambda _: reactor.stop())
reactor.run()
print('complete')
name = self.qle.text()
db = db_handle()
with db as con:
sql = (
"select * from player where name = '{}' order by update_time"
.format(name))
con.execute(sql)
player = con.fetchone()
if player:
(id, name, win, match_count, strength, level, update_time, rank
) = player
text = (
'角色名: {}\n胜场: {}\n总场数: {}\n团分: {}\n团分排行: {}\n等级: {}\n更新时间: {}'
.format(name, win, match_count, strength, rank, level,
update_time))
self.txt.setText(text)
sql = ("select * from player_data where name = '{}' order by date"
.format(name))
con.execute(sql)
player_data = con.fetchall()
a = ''
for data in player_data:
a += str(data)
a += '\n'
self.battle.setText(str(a))
sql = 'select * from game_data order by match_id desc'
con.execute(sql)
game_data = con.fetchall()
a = ''
l = 0
self.battle_table.setRowCount(len(game_data))
for data in game_data:
a += str(data[1:])
print(type(data))
for i in range(self.battle_table.columnCount()):
item = QTableWidgetItem(str(data[i + 1]))
item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.battle_table.setItem(l, i, item)
a += '\n'
self.player_status.setText(str(a))
l += 1
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message', 'Quit?', QMessageBox.
Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
class BatterReport(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.txt = QTextEdit()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
<|reserved_special_token_1|>
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon, QFont
from PyQt5.QtCore import QCoreApplication
import pymysql
import requests
from twisted.internet import reactor, defer
from scrapy.crawler import CrawlerRunner, CrawlerProcess
from scrapy.utils.project import get_project_settings
from spider.jump_300heroes.jump_300heroes.spiders.my_report import JumpReport
from scrapy.settings import Settings
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from multiprocessing import Process
def db_handle():
con = pymysql.connect(
host='localhost',
user='web',
passwd='web',
charset='utf8',
database='heroes'
)
return con
class Example(QWidget):
class A(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 300, 220)
self.setWindowTitle('Icon')
self.setWindowIcon(QIcon('web.png'))
self.show()
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
#QToolTip.setFont(QFont('SanSerif', 10))
#self.setToolTip('This is a <b>QWidget</b> widget')
#textEdit = QTextEdit()
#self.setCentralWidget(textEdit)
self.qle = QLineEdit("蔽月八云")
self.user = self.qle.text()
self.para = "user={}".format(self.user)
print(self.user, '1')
btn = QPushButton('查询', self)
#btn.setToolTip('This is a <b>QPushButton</b> widget')
btn.resize(btn.sizeHint())
btn.clicked.connect(self.search)
self.txt = QTextEdit()
#self.txt.textChanged.connect(self.adjustSize)
self.battle = QTextEdit()
self.player_status = QTextEdit()
self.create_table()
# 名称不能用Quit、Exit,用了就无法显示,原因不明
exitAction = QAction('Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('application')
exitAction.triggered.connect(qApp.quit)
#self.statusBar()
#menubar = QMainWindow.menuBar()
# Mac OS的状态栏显示不一样
#menubar.setNativeMenuBar(False)
#fileMenu = menubar.addMenu('&File')
#fileMenu.addAction(exitAction)
#toolbar = self.addToolBar('Exit')
#toolbar.addAction(exitAction)
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.qle, 1, 0)
grid.addWidget(btn, 2, 0)
grid.addWidget(self.txt, 3, 0)
grid.addWidget(self.battle, 1, 1, 3, 1)
grid.addWidget(self.player_status, 4, 0, 2, 2)
grid.addWidget(self.battle_table, 6, 0, 2, 2)
self.setLayout(grid)
self.setGeometry(600, 600, 800, 600)
self.center()
self.setWindowTitle("战绩查询")
self.show()
def create_table(self):
# 设置表
self.battle_table = QTableWidget()
# 表列数,行数在下方读取数据时,根据数据量建立
self.battle_table.setColumnCount(8)
# 设置表头
self.battle_table.setHorizontalHeaderLabels(
['match_id', 'head', 'date', 'time', 'kill_count', 'death', 'support', 'score'])
# 隔行变色
self.battle_table.setAlternatingRowColors(True)
# 整行选中
self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)
# 将列调整到跟内容大小相匹配
# self.battle_table.resizeColumnsToContents()
# #将行大小调整到跟内容的大小相匹配
self.battle_table.resizeRowsToContents()
# 点击事件
self.battle_table.doubleClicked.connect(self.on_click)
@pyqtSlot()
def on_click(self):
currentQTableWidgetItem = self.battle_table.selectedItems()[0]
# 点击的行包含的比赛id
#match_id = self.battle_table.item(currentQTableWidgetItem.row(), 0).text()
match_id = currentQTableWidgetItem.text()
print(match_id)
self.showDialog(match_id)
def showDialog(self, match_id):
data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'.format(match_id))
a = self.A()
## 启动爬虫,获取该场比赛所有人的数据
#runner = CrawlerRunner(get_project_settings())
#runner.crawl('JumpReport')
#d = runner.join()
#d.addBoth(lambda _: reactor.stop())
#reactor.run() # 阻塞运行爬虫
#
#text, ok = QInputDialog.getText(self, 'Input Dialog',
# 'Enter your name:')
def searchd(self):
if __name__ == '__main__':
#print(user, '2')
p = Process(target=self.a)
p.start()
p.join()
def search(self):
print(self.user)
print(__name__)
#print(user, '3')
#process = CrawlerProcess(get_project_settings())
#process.crawl('JumpReport')
#process.start()
#process.stop()
#process.put()
# 脚本执行爬虫代码
runner = CrawlerRunner(get_project_settings())
#def search(runner, keyword):
# return runner.crawl(JumpReport, keyword)
#runner = CrawlerProcess()
#dfs = set()
print('a')
runner.crawl('JumpReport', user=self.user)
print(self.user)
d = runner.join()
#dfs.add(d)
#defer.DeferredList(dfs).addBoth(lambda _: reactor.stop())
d.addBoth(lambda _: reactor.stop())
#search(runner, "abcd")
#search(runner, "beat")
#runner.start()
reactor.run() # 阻塞运行爬虫
print("complete")
# runner = CrawlerRunner(get_project_settings())
# dfs = set()
# for domain in range(2):
# d = runner.crawl('JumpReport')
# dfs.add(d)
#
# defer.DeferredList(dfs).addBoth(lambda _: reactor.stop())
# reactor.run() # the script will block here until all crawling jobs are finished
# runner = CrawlerRunner(get_project_settings())
#
# @defer.inlineCallbacks
# def crawl():
# for domain in range(2):
# yield runner.crawl('JumpReport')
# reactor.stop()
#
# crawl()
# reactor.run() # the script will block here until the last crawl call is finished
# settings = Settings({'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'})
# runner = CrawlerRunner(settings)
#
# d = runner.crawl(JumpReport)
# d.addBoth(lambda _: reactor.stop())
# reactor.run() # the script will block here until the crawling is finished
# runner = CrawlerProcess(get_project_settings())
# runner.crawl(JumpReport)
# runner.start()
name = self.qle.text()
db = db_handle()
with db as con:
sql = "select * from player where name = '{}' order by update_time".format(name)
con.execute(sql)
player = con.fetchone()
if player:
id, name, win, match_count, strength, level, update_time, rank = player
text = "角色名: {}\n胜场: {}\n总场数: {}\n团分: {}\n团分排行: {}\n等级: {}\n更新时间: {}".format(
name, win, match_count, strength, rank, level, update_time)
self.txt.setText(text)
sql = "select * from player_data where name = '{}' order by date".format(name)
con.execute(sql)
player_data = con.fetchall()
a = ""
for data in player_data:
a += str(data)
a += "\n"
self.battle.setText(str(a))
sql = "select * from game_data order by match_id desc"
con.execute(sql)
game_data = con.fetchall()
a = ""
l = 0
self.battle_table.setRowCount(len(game_data))
for data in game_data:
a += str(data[1:])
print(type(data))
for i in range(self.battle_table.columnCount()):
item = QTableWidgetItem(str(data[i + 1]))
# 设置填入数据的排列位置(左右居中| 上下居中)
item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.battle_table.setItem(l, i, item)
a += "\n"
self.player_status.setText(str(a))
l += 1
#for i in range(len(list(a))):
# self.battle_table.setLayout(str(a))
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message', "Quit?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
class BatterReport(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.txt = QTextEdit()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
flexible
|
{
"blob_id": "889d465ceeac57a600b2fa3bd26632edcd90a655",
"index": 2911,
"step-1": "<mask token>\n\n\nclass Example(QWidget):\n\n\n class A(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 300, 220)\n self.setWindowTitle('Icon')\n self.setWindowIcon(QIcon('web.png'))\n self.show()\n <mask token>\n <mask token>\n\n def create_table(self):\n self.battle_table = QTableWidget()\n self.battle_table.setColumnCount(8)\n self.battle_table.setHorizontalHeaderLabels(['match_id', 'head',\n 'date', 'time', 'kill_count', 'death', 'support', 'score'])\n self.battle_table.setAlternatingRowColors(True)\n self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.battle_table.resizeRowsToContents()\n self.battle_table.doubleClicked.connect(self.on_click)\n <mask token>\n\n def showDialog(self, match_id):\n data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'\n .format(match_id))\n a = self.A()\n <mask token>\n\n def search(self):\n print(self.user)\n print(__name__)\n runner = CrawlerRunner(get_project_settings())\n print('a')\n runner.crawl('JumpReport', user=self.user)\n print(self.user)\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n reactor.run()\n print('complete')\n name = self.qle.text()\n db = db_handle()\n with db as con:\n sql = (\n \"select * from player where name = '{}' order by update_time\"\n .format(name))\n con.execute(sql)\n player = con.fetchone()\n if player:\n (id, name, win, match_count, strength, level, update_time, rank\n ) = player\n text = (\n '角色名: {}\\n胜场: {}\\n总场数: {}\\n团分: {}\\n团分排行: {}\\n等级: {}\\n更新时间: {}'\n .format(name, win, match_count, strength, rank, level,\n update_time))\n self.txt.setText(text)\n sql = (\"select * from player_data where name = '{}' order by date\"\n .format(name))\n con.execute(sql)\n player_data = con.fetchall()\n a = ''\n for data in player_data:\n a += str(data)\n a += '\\n'\n self.battle.setText(str(a))\n sql = 'select * from game_data order by match_id desc'\n con.execute(sql)\n game_data = con.fetchall()\n a = ''\n l = 0\n self.battle_table.setRowCount(len(game_data))\n for data in game_data:\n a += str(data[1:])\n print(type(data))\n for i in range(self.battle_table.columnCount()):\n item = QTableWidgetItem(str(data[i + 1]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.battle_table.setItem(l, i, item)\n a += '\\n'\n self.player_status.setText(str(a))\n l += 1\n <mask token>\n <mask token>\n\n\nclass BatterReport(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.txt = QTextEdit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Example(QWidget):\n\n\n class A(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 300, 220)\n self.setWindowTitle('Icon')\n self.setWindowIcon(QIcon('web.png'))\n self.show()\n <mask token>\n\n def initUI(self):\n self.qle = QLineEdit('蔽月八云')\n self.user = self.qle.text()\n self.para = 'user={}'.format(self.user)\n print(self.user, '1')\n btn = QPushButton('查询', self)\n btn.resize(btn.sizeHint())\n btn.clicked.connect(self.search)\n self.txt = QTextEdit()\n self.battle = QTextEdit()\n self.player_status = QTextEdit()\n self.create_table()\n exitAction = QAction('Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('application')\n exitAction.triggered.connect(qApp.quit)\n grid = QGridLayout()\n grid.setSpacing(10)\n grid.addWidget(self.qle, 1, 0)\n grid.addWidget(btn, 2, 0)\n grid.addWidget(self.txt, 3, 0)\n grid.addWidget(self.battle, 1, 1, 3, 1)\n grid.addWidget(self.player_status, 4, 0, 2, 2)\n grid.addWidget(self.battle_table, 6, 0, 2, 2)\n self.setLayout(grid)\n self.setGeometry(600, 600, 800, 600)\n self.center()\n self.setWindowTitle('战绩查询')\n self.show()\n\n def create_table(self):\n self.battle_table = QTableWidget()\n self.battle_table.setColumnCount(8)\n self.battle_table.setHorizontalHeaderLabels(['match_id', 'head',\n 'date', 'time', 'kill_count', 'death', 'support', 'score'])\n self.battle_table.setAlternatingRowColors(True)\n self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.battle_table.resizeRowsToContents()\n self.battle_table.doubleClicked.connect(self.on_click)\n <mask token>\n\n def showDialog(self, match_id):\n data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'\n .format(match_id))\n a = self.A()\n <mask token>\n\n def search(self):\n print(self.user)\n print(__name__)\n runner = CrawlerRunner(get_project_settings())\n print('a')\n runner.crawl('JumpReport', user=self.user)\n print(self.user)\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n reactor.run()\n print('complete')\n name = self.qle.text()\n db = db_handle()\n with db as con:\n sql = (\n \"select * from player where name = '{}' order by update_time\"\n .format(name))\n con.execute(sql)\n player = con.fetchone()\n if player:\n (id, name, win, match_count, strength, level, update_time, rank\n ) = player\n text = (\n '角色名: {}\\n胜场: {}\\n总场数: {}\\n团分: {}\\n团分排行: {}\\n等级: {}\\n更新时间: {}'\n .format(name, win, match_count, strength, rank, level,\n update_time))\n self.txt.setText(text)\n sql = (\"select * from player_data where name = '{}' order by date\"\n .format(name))\n con.execute(sql)\n player_data = con.fetchall()\n a = ''\n for data in player_data:\n a += str(data)\n a += '\\n'\n self.battle.setText(str(a))\n sql = 'select * from game_data order by match_id desc'\n con.execute(sql)\n game_data = con.fetchall()\n a = ''\n l = 0\n self.battle_table.setRowCount(len(game_data))\n for data in game_data:\n a += str(data[1:])\n print(type(data))\n for i in range(self.battle_table.columnCount()):\n item = QTableWidgetItem(str(data[i + 1]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.battle_table.setItem(l, i, item)\n a += '\\n'\n self.player_status.setText(str(a))\n l += 1\n <mask token>\n <mask token>\n\n\nclass BatterReport(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.txt = QTextEdit()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Example(QWidget):\n\n\n class A(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 300, 220)\n self.setWindowTitle('Icon')\n self.setWindowIcon(QIcon('web.png'))\n self.show()\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.qle = QLineEdit('蔽月八云')\n self.user = self.qle.text()\n self.para = 'user={}'.format(self.user)\n print(self.user, '1')\n btn = QPushButton('查询', self)\n btn.resize(btn.sizeHint())\n btn.clicked.connect(self.search)\n self.txt = QTextEdit()\n self.battle = QTextEdit()\n self.player_status = QTextEdit()\n self.create_table()\n exitAction = QAction('Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('application')\n exitAction.triggered.connect(qApp.quit)\n grid = QGridLayout()\n grid.setSpacing(10)\n grid.addWidget(self.qle, 1, 0)\n grid.addWidget(btn, 2, 0)\n grid.addWidget(self.txt, 3, 0)\n grid.addWidget(self.battle, 1, 1, 3, 1)\n grid.addWidget(self.player_status, 4, 0, 2, 2)\n grid.addWidget(self.battle_table, 6, 0, 2, 2)\n self.setLayout(grid)\n self.setGeometry(600, 600, 800, 600)\n self.center()\n self.setWindowTitle('战绩查询')\n self.show()\n\n def create_table(self):\n self.battle_table = QTableWidget()\n self.battle_table.setColumnCount(8)\n self.battle_table.setHorizontalHeaderLabels(['match_id', 'head',\n 'date', 'time', 'kill_count', 'death', 'support', 'score'])\n self.battle_table.setAlternatingRowColors(True)\n self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.battle_table.resizeRowsToContents()\n self.battle_table.doubleClicked.connect(self.on_click)\n\n @pyqtSlot()\n def on_click(self):\n currentQTableWidgetItem = self.battle_table.selectedItems()[0]\n match_id = currentQTableWidgetItem.text()\n print(match_id)\n self.showDialog(match_id)\n\n def showDialog(self, match_id):\n data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'\n .format(match_id))\n a = self.A()\n <mask token>\n\n def search(self):\n print(self.user)\n print(__name__)\n runner = CrawlerRunner(get_project_settings())\n print('a')\n runner.crawl('JumpReport', user=self.user)\n print(self.user)\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n reactor.run()\n print('complete')\n name = self.qle.text()\n db = db_handle()\n with db as con:\n sql = (\n \"select * from player where name = '{}' order by update_time\"\n .format(name))\n con.execute(sql)\n player = con.fetchone()\n if player:\n (id, name, win, match_count, strength, level, update_time, rank\n ) = player\n text = (\n '角色名: {}\\n胜场: {}\\n总场数: {}\\n团分: {}\\n团分排行: {}\\n等级: {}\\n更新时间: {}'\n .format(name, win, match_count, strength, rank, level,\n update_time))\n self.txt.setText(text)\n sql = (\"select * from player_data where name = '{}' order by date\"\n .format(name))\n con.execute(sql)\n player_data = con.fetchall()\n a = ''\n for data in player_data:\n a += str(data)\n a += '\\n'\n self.battle.setText(str(a))\n sql = 'select * from game_data order by match_id desc'\n con.execute(sql)\n game_data = con.fetchall()\n a = ''\n l = 0\n self.battle_table.setRowCount(len(game_data))\n for data in game_data:\n a += str(data[1:])\n print(type(data))\n for i in range(self.battle_table.columnCount()):\n item = QTableWidgetItem(str(data[i + 1]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.battle_table.setItem(l, i, item)\n a += '\\n'\n self.player_status.setText(str(a))\n l += 1\n <mask token>\n\n def closeEvent(self, event):\n reply = QMessageBox.question(self, 'Message', 'Quit?', QMessageBox.\n Yes | QMessageBox.No, QMessageBox.Yes)\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n\nclass BatterReport(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.txt = QTextEdit()\n\n\n<mask token>\n",
"step-4": "import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QIcon, QFont\nfrom PyQt5.QtCore import QCoreApplication\nimport pymysql\nimport requests\nfrom twisted.internet import reactor, defer\nfrom scrapy.crawler import CrawlerRunner, CrawlerProcess\nfrom scrapy.utils.project import get_project_settings\nfrom spider.jump_300heroes.jump_300heroes.spiders.my_report import JumpReport\nfrom scrapy.settings import Settings\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom multiprocessing import Process\n\n\ndef db_handle():\n con = pymysql.connect(host='localhost', user='web', passwd='web',\n charset='utf8', database='heroes')\n return con\n\n\nclass Example(QWidget):\n\n\n class A(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 300, 220)\n self.setWindowTitle('Icon')\n self.setWindowIcon(QIcon('web.png'))\n self.show()\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.qle = QLineEdit('蔽月八云')\n self.user = self.qle.text()\n self.para = 'user={}'.format(self.user)\n print(self.user, '1')\n btn = QPushButton('查询', self)\n btn.resize(btn.sizeHint())\n btn.clicked.connect(self.search)\n self.txt = QTextEdit()\n self.battle = QTextEdit()\n self.player_status = QTextEdit()\n self.create_table()\n exitAction = QAction('Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('application')\n exitAction.triggered.connect(qApp.quit)\n grid = QGridLayout()\n grid.setSpacing(10)\n grid.addWidget(self.qle, 1, 0)\n grid.addWidget(btn, 2, 0)\n grid.addWidget(self.txt, 3, 0)\n grid.addWidget(self.battle, 1, 1, 3, 1)\n grid.addWidget(self.player_status, 4, 0, 2, 2)\n grid.addWidget(self.battle_table, 6, 0, 2, 2)\n self.setLayout(grid)\n self.setGeometry(600, 600, 800, 600)\n self.center()\n self.setWindowTitle('战绩查询')\n self.show()\n\n def create_table(self):\n self.battle_table = QTableWidget()\n self.battle_table.setColumnCount(8)\n self.battle_table.setHorizontalHeaderLabels(['match_id', 'head',\n 'date', 'time', 'kill_count', 'death', 'support', 'score'])\n self.battle_table.setAlternatingRowColors(True)\n self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.battle_table.resizeRowsToContents()\n self.battle_table.doubleClicked.connect(self.on_click)\n\n @pyqtSlot()\n def on_click(self):\n currentQTableWidgetItem = self.battle_table.selectedItems()[0]\n match_id = currentQTableWidgetItem.text()\n print(match_id)\n self.showDialog(match_id)\n\n def showDialog(self, match_id):\n data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'\n .format(match_id))\n a = self.A()\n\n def searchd(self):\n if __name__ == '__main__':\n p = Process(target=self.a)\n p.start()\n p.join()\n\n def search(self):\n print(self.user)\n print(__name__)\n runner = CrawlerRunner(get_project_settings())\n print('a')\n runner.crawl('JumpReport', user=self.user)\n print(self.user)\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n reactor.run()\n print('complete')\n name = self.qle.text()\n db = db_handle()\n with db as con:\n sql = (\n \"select * from player where name = '{}' order by update_time\"\n .format(name))\n con.execute(sql)\n player = con.fetchone()\n if player:\n (id, name, win, match_count, strength, level, update_time, rank\n ) = player\n text = (\n '角色名: {}\\n胜场: {}\\n总场数: {}\\n团分: {}\\n团分排行: {}\\n等级: {}\\n更新时间: {}'\n .format(name, win, match_count, strength, rank, level,\n update_time))\n self.txt.setText(text)\n sql = (\"select * from player_data where name = '{}' order by date\"\n .format(name))\n con.execute(sql)\n player_data = con.fetchall()\n a = ''\n for data in player_data:\n a += str(data)\n a += '\\n'\n self.battle.setText(str(a))\n sql = 'select * from game_data order by match_id desc'\n con.execute(sql)\n game_data = con.fetchall()\n a = ''\n l = 0\n self.battle_table.setRowCount(len(game_data))\n for data in game_data:\n a += str(data[1:])\n print(type(data))\n for i in range(self.battle_table.columnCount()):\n item = QTableWidgetItem(str(data[i + 1]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.battle_table.setItem(l, i, item)\n a += '\\n'\n self.player_status.setText(str(a))\n l += 1\n\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def closeEvent(self, event):\n reply = QMessageBox.question(self, 'Message', 'Quit?', QMessageBox.\n Yes | QMessageBox.No, QMessageBox.Yes)\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n\nclass BatterReport(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.txt = QTextEdit()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n",
"step-5": "import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QIcon, QFont\nfrom PyQt5.QtCore import QCoreApplication\n\nimport pymysql\nimport requests\n\nfrom twisted.internet import reactor, defer\nfrom scrapy.crawler import CrawlerRunner, CrawlerProcess\nfrom scrapy.utils.project import get_project_settings\nfrom spider.jump_300heroes.jump_300heroes.spiders.my_report import JumpReport\nfrom scrapy.settings import Settings\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\nfrom multiprocessing import Process\n\n\n\n\ndef db_handle():\n\n con = pymysql.connect(\n host='localhost',\n user='web',\n passwd='web',\n charset='utf8',\n database='heroes'\n )\n return con\n\nclass Example(QWidget):\n\n class A(QWidget):\n\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 300, 220)\n self.setWindowTitle('Icon')\n self.setWindowIcon(QIcon('web.png'))\n\n self.show()\n\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n\n #QToolTip.setFont(QFont('SanSerif', 10))\n\n #self.setToolTip('This is a <b>QWidget</b> widget')\n\n #textEdit = QTextEdit()\n #self.setCentralWidget(textEdit)\n\n self.qle = QLineEdit(\"蔽月八云\")\n self.user = self.qle.text()\n self.para = \"user={}\".format(self.user)\n print(self.user, '1')\n btn = QPushButton('查询', self)\n #btn.setToolTip('This is a <b>QPushButton</b> widget')\n btn.resize(btn.sizeHint())\n btn.clicked.connect(self.search)\n\n self.txt = QTextEdit()\n #self.txt.textChanged.connect(self.adjustSize)\n\n self.battle = QTextEdit()\n\n self.player_status = QTextEdit()\n\n self.create_table()\n\n\n\n # 名称不能用Quit、Exit,用了就无法显示,原因不明\n exitAction = QAction('Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('application')\n exitAction.triggered.connect(qApp.quit)\n\n #self.statusBar()\n\n #menubar = QMainWindow.menuBar()\n\n # Mac OS的状态栏显示不一样\n #menubar.setNativeMenuBar(False)\n\n #fileMenu = menubar.addMenu('&File')\n #fileMenu.addAction(exitAction)\n\n #toolbar = self.addToolBar('Exit')\n #toolbar.addAction(exitAction)\n\n grid = QGridLayout()\n grid.setSpacing(10)\n\n grid.addWidget(self.qle, 1, 0)\n grid.addWidget(btn, 2, 0)\n grid.addWidget(self.txt, 3, 0)\n grid.addWidget(self.battle, 1, 1, 3, 1)\n grid.addWidget(self.player_status, 4, 0, 2, 2)\n grid.addWidget(self.battle_table, 6, 0, 2, 2)\n\n self.setLayout(grid)\n\n self.setGeometry(600, 600, 800, 600)\n self.center()\n self.setWindowTitle(\"战绩查询\")\n\n self.show()\n\n def create_table(self):\n # 设置表\n self.battle_table = QTableWidget()\n # 表列数,行数在下方读取数据时,根据数据量建立\n self.battle_table.setColumnCount(8)\n # 设置表头\n self.battle_table.setHorizontalHeaderLabels(\n ['match_id', 'head', 'date', 'time', 'kill_count', 'death', 'support', 'score'])\n # 隔行变色\n self.battle_table.setAlternatingRowColors(True)\n # 整行选中\n self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)\n # 将列调整到跟内容大小相匹配\n # self.battle_table.resizeColumnsToContents()\n # #将行大小调整到跟内容的大小相匹配\n self.battle_table.resizeRowsToContents()\n # 点击事件\n self.battle_table.doubleClicked.connect(self.on_click)\n\n @pyqtSlot()\n def on_click(self):\n currentQTableWidgetItem = self.battle_table.selectedItems()[0]\n # 点击的行包含的比赛id\n #match_id = self.battle_table.item(currentQTableWidgetItem.row(), 0).text()\n match_id = currentQTableWidgetItem.text()\n print(match_id)\n self.showDialog(match_id)\n\n def showDialog(self, match_id):\n\n data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'.format(match_id))\n a = self.A()\n\n ## 启动爬虫,获取该场比赛所有人的数据\n #runner = CrawlerRunner(get_project_settings())\n #runner.crawl('JumpReport')\n #d = runner.join()\n #d.addBoth(lambda _: reactor.stop())\n #reactor.run() # 阻塞运行爬虫\n #\n #text, ok = QInputDialog.getText(self, 'Input Dialog',\n # 'Enter your name:')\n\n\n\n def searchd(self):\n if __name__ == '__main__':\n #print(user, '2')\n p = Process(target=self.a)\n p.start()\n p.join()\n\n def search(self):\n print(self.user)\n print(__name__)\n #print(user, '3')\n\n\n #process = CrawlerProcess(get_project_settings())\n #process.crawl('JumpReport')\n #process.start()\n #process.stop()\n #process.put()\n # 脚本执行爬虫代码\n runner = CrawlerRunner(get_project_settings())\n\n #def search(runner, keyword):\n # return runner.crawl(JumpReport, keyword)\n\n #runner = CrawlerProcess()\n #dfs = set()\n print('a')\n runner.crawl('JumpReport', user=self.user)\n print(self.user)\n d = runner.join()\n #dfs.add(d)\n #defer.DeferredList(dfs).addBoth(lambda _: reactor.stop())\n d.addBoth(lambda _: reactor.stop())\n #search(runner, \"abcd\")\n #search(runner, \"beat\")\n #runner.start()\n reactor.run() # 阻塞运行爬虫\n\n print(\"complete\")\n\n\n # runner = CrawlerRunner(get_project_settings())\n # dfs = set()\n # for domain in range(2):\n # d = runner.crawl('JumpReport')\n # dfs.add(d)\n #\n # defer.DeferredList(dfs).addBoth(lambda _: reactor.stop())\n # reactor.run() # the script will block here until all crawling jobs are finished\n\n # runner = CrawlerRunner(get_project_settings())\n #\n # @defer.inlineCallbacks\n # def crawl():\n # for domain in range(2):\n # yield runner.crawl('JumpReport')\n # reactor.stop()\n #\n # crawl()\n # reactor.run() # the script will block here until the last crawl call is finished\n\n # settings = Settings({'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'})\n # runner = CrawlerRunner(settings)\n # \n # d = runner.crawl(JumpReport)\n # d.addBoth(lambda _: reactor.stop())\n # reactor.run() # the script will block here until the crawling is finished\n\n\n # runner = CrawlerProcess(get_project_settings())\n # runner.crawl(JumpReport)\n # runner.start()\n\n name = self.qle.text()\n db = db_handle()\n with db as con:\n sql = \"select * from player where name = '{}' order by update_time\".format(name)\n con.execute(sql)\n player = con.fetchone()\n if player:\n id, name, win, match_count, strength, level, update_time, rank = player\n text = \"角色名: {}\\n胜场: {}\\n总场数: {}\\n团分: {}\\n团分排行: {}\\n等级: {}\\n更新时间: {}\".format(\n name, win, match_count, strength, rank, level, update_time)\n \n self.txt.setText(text)\n \n sql = \"select * from player_data where name = '{}' order by date\".format(name)\n con.execute(sql)\n player_data = con.fetchall()\n a = \"\"\n for data in player_data:\n a += str(data)\n a += \"\\n\"\n self.battle.setText(str(a))\n\n sql = \"select * from game_data order by match_id desc\"\n con.execute(sql)\n game_data = con.fetchall()\n a = \"\"\n l = 0\n self.battle_table.setRowCount(len(game_data))\n for data in game_data:\n a += str(data[1:])\n print(type(data))\n\n for i in range(self.battle_table.columnCount()):\n\n item = QTableWidgetItem(str(data[i + 1]))\n # 设置填入数据的排列位置(左右居中| 上下居中)\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.battle_table.setItem(l, i, item)\n\n a += \"\\n\"\n self.player_status.setText(str(a))\n l += 1\n #for i in range(len(list(a))):\n # self.battle_table.setLayout(str(a))\n\n def center(self):\n\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def closeEvent(self, event):\n\n reply = QMessageBox.question(self, 'Message', \"Quit?\", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)\n\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n\nclass BatterReport(QWidget):\n\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n self.txt = QTextEdit()\n\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n\n ex = Example()\n\n sys.exit(app.exec_())\n",
"step-ids": [
7,
8,
11,
16,
17
]
}
|
[
7,
8,
11,
16,
17
] |
from threading import Lock
from typing import Callable, Any
from remote.domain.commandCallback import CommandCallback
from remote.domain.commandStatus import CommandStatus
from remote.service.remoteService import RemoteService
from ui.domain.subroutine.iSubroutineRunner import ISubroutineRunner
class RemoteSubroutineRunner(ISubroutineRunner):
def __init__(self, remote_service: RemoteService) -> None:
self._remote_service = remote_service
self._callback: CommandCallback = None
self._busy = False
self._busy_lock = Lock()
def execute_charge_subroutine(self, callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_charge_subroutine, callback)
def execute_go_home_subroutine(self, callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_go_home_subroutine, callback)
def execute_read_qr_subroutine(self, callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_read_qr_subroutine, callback)
def execute_grab_subroutine(self, target: str, callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_grab_subroutine, callback, target=target)
def execute_drop_subroutine(self, target: str, callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_drop_subroutine, callback, target=target)
def execute_switch_light_subroutine(self, callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_switch_light_subroutine, callback)
def execute_directional_movement(self, direction: str, speed: str, distance: float,
callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_directional_movement, callback,
direction=direction, speed=speed, distance=distance)
def execute_rotational_movement(self, angle: float, callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_rotational_movement, callback, angle=angle)
def execute_activate_magnet(self, callback: CommandCallback) -> None:
self._start_command(self._remote_service.execute_activate_magnet, callback)
def execute_deactivate_magnet(self, callback: CommandCallback) -> None:
self._start_command(self._remote_service.execute_deactivate_magnet, callback)
def execute_discharge_magnet(self, callback: CommandCallback) -> None:
self._start_command(self._remote_service.execute_discharge_magnet, callback)
def execute_update_directions_subroutine(self, callback: CommandCallback) -> None:
self._start_command(self._remote_service.execute_update_directions, callback)
def execute_championship_subroutine(self, callback: CommandCallback):
self._start_command(self._remote_service.execute_championship, callback)
def execute_look_down(self, callback: CommandCallback) -> None:
self._start_command(self._remote_service.execute_look_down, callback)
def execute_look_ahead(self, callback: CommandCallback) -> None:
self._start_command(self._remote_service.execute_look_ahead, callback)
def _command_done(self, status: CommandStatus) -> None:
with self._busy_lock:
self._busy = False
self._callback(status)
def _start_command(self, function: Callable[[Any], None], callback: CommandCallback, **kwargs) -> None:
"""
:raises BlockingIOError: command already running
"""
with self._busy_lock:
if self._busy:
raise BlockingIOError()
self._busy = True
self._callback = callback
kwargs["callback"] = self._command_done
function(**kwargs)
|
normal
|
{
"blob_id": "75270fb4ed059f134b47b8937717cb7fe05d9499",
"index": 8833,
"step-1": "<mask token>\n\n\nclass RemoteSubroutineRunner(ISubroutineRunner):\n <mask token>\n\n def execute_charge_subroutine(self, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_charge_subroutine,\n callback)\n\n def execute_go_home_subroutine(self, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_go_home_subroutine,\n callback)\n\n def execute_read_qr_subroutine(self, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_read_qr_subroutine,\n callback)\n\n def execute_grab_subroutine(self, target: str, callback: CommandCallback\n ) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_grab_subroutine,\n callback, target=target)\n\n def execute_drop_subroutine(self, target: str, callback: CommandCallback\n ) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_drop_subroutine,\n callback, target=target)\n <mask token>\n\n def execute_directional_movement(self, direction: str, speed: str,\n distance: float, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.\n execute_directional_movement, callback, direction=direction,\n speed=speed, distance=distance)\n\n def execute_rotational_movement(self, angle: float, callback:\n CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.\n execute_rotational_movement, callback, angle=angle)\n\n def execute_activate_magnet(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_activate_magnet,\n callback)\n\n def execute_deactivate_magnet(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_deactivate_magnet,\n callback)\n <mask token>\n\n def execute_update_directions_subroutine(self, callback: CommandCallback\n ) ->None:\n self._start_command(self._remote_service.execute_update_directions,\n callback)\n <mask token>\n\n def execute_look_down(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_look_down, callback)\n <mask token>\n\n def _command_done(self, status: CommandStatus) ->None:\n with self._busy_lock:\n self._busy = False\n self._callback(status)\n\n def _start_command(self, function: Callable[[Any], None], callback:\n CommandCallback, **kwargs) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n with self._busy_lock:\n if self._busy:\n raise BlockingIOError()\n self._busy = True\n self._callback = callback\n kwargs['callback'] = self._command_done\n function(**kwargs)\n",
"step-2": "<mask token>\n\n\nclass RemoteSubroutineRunner(ISubroutineRunner):\n <mask token>\n\n def execute_charge_subroutine(self, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_charge_subroutine,\n callback)\n\n def execute_go_home_subroutine(self, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_go_home_subroutine,\n callback)\n\n def execute_read_qr_subroutine(self, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_read_qr_subroutine,\n callback)\n\n def execute_grab_subroutine(self, target: str, callback: CommandCallback\n ) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_grab_subroutine,\n callback, target=target)\n\n def execute_drop_subroutine(self, target: str, callback: CommandCallback\n ) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_drop_subroutine,\n callback, target=target)\n <mask token>\n\n def execute_directional_movement(self, direction: str, speed: str,\n distance: float, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.\n execute_directional_movement, callback, direction=direction,\n speed=speed, distance=distance)\n\n def execute_rotational_movement(self, angle: float, callback:\n CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.\n execute_rotational_movement, callback, angle=angle)\n\n def execute_activate_magnet(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_activate_magnet,\n callback)\n\n def execute_deactivate_magnet(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_deactivate_magnet,\n callback)\n <mask token>\n\n def execute_update_directions_subroutine(self, callback: CommandCallback\n ) ->None:\n self._start_command(self._remote_service.execute_update_directions,\n callback)\n\n def execute_championship_subroutine(self, callback: CommandCallback):\n self._start_command(self._remote_service.execute_championship, callback\n )\n\n def execute_look_down(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_look_down, callback)\n <mask token>\n\n def _command_done(self, status: CommandStatus) ->None:\n with self._busy_lock:\n self._busy = False\n self._callback(status)\n\n def _start_command(self, function: Callable[[Any], None], callback:\n CommandCallback, **kwargs) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n with self._busy_lock:\n if self._busy:\n raise BlockingIOError()\n self._busy = True\n self._callback = callback\n kwargs['callback'] = self._command_done\n function(**kwargs)\n",
"step-3": "<mask token>\n\n\nclass RemoteSubroutineRunner(ISubroutineRunner):\n <mask token>\n\n def execute_charge_subroutine(self, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_charge_subroutine,\n callback)\n\n def execute_go_home_subroutine(self, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_go_home_subroutine,\n callback)\n\n def execute_read_qr_subroutine(self, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_read_qr_subroutine,\n callback)\n\n def execute_grab_subroutine(self, target: str, callback: CommandCallback\n ) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_grab_subroutine,\n callback, target=target)\n\n def execute_drop_subroutine(self, target: str, callback: CommandCallback\n ) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_drop_subroutine,\n callback, target=target)\n <mask token>\n\n def execute_directional_movement(self, direction: str, speed: str,\n distance: float, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.\n execute_directional_movement, callback, direction=direction,\n speed=speed, distance=distance)\n\n def execute_rotational_movement(self, angle: float, callback:\n CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.\n execute_rotational_movement, callback, angle=angle)\n\n def execute_activate_magnet(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_activate_magnet,\n callback)\n\n def execute_deactivate_magnet(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_deactivate_magnet,\n callback)\n\n def execute_discharge_magnet(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_discharge_magnet,\n callback)\n\n def execute_update_directions_subroutine(self, callback: CommandCallback\n ) ->None:\n self._start_command(self._remote_service.execute_update_directions,\n callback)\n\n def execute_championship_subroutine(self, callback: CommandCallback):\n self._start_command(self._remote_service.execute_championship, callback\n )\n\n def execute_look_down(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_look_down, callback)\n\n def execute_look_ahead(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_look_ahead, callback)\n\n def _command_done(self, status: CommandStatus) ->None:\n with self._busy_lock:\n self._busy = False\n self._callback(status)\n\n def _start_command(self, function: Callable[[Any], None], callback:\n CommandCallback, **kwargs) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n with self._busy_lock:\n if self._busy:\n raise BlockingIOError()\n self._busy = True\n self._callback = callback\n kwargs['callback'] = self._command_done\n function(**kwargs)\n",
"step-4": "<mask token>\n\n\nclass RemoteSubroutineRunner(ISubroutineRunner):\n\n def __init__(self, remote_service: RemoteService) ->None:\n self._remote_service = remote_service\n self._callback: CommandCallback = None\n self._busy = False\n self._busy_lock = Lock()\n\n def execute_charge_subroutine(self, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_charge_subroutine,\n callback)\n\n def execute_go_home_subroutine(self, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_go_home_subroutine,\n callback)\n\n def execute_read_qr_subroutine(self, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_read_qr_subroutine,\n callback)\n\n def execute_grab_subroutine(self, target: str, callback: CommandCallback\n ) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_grab_subroutine,\n callback, target=target)\n\n def execute_drop_subroutine(self, target: str, callback: CommandCallback\n ) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_drop_subroutine,\n callback, target=target)\n\n def execute_switch_light_subroutine(self, callback: CommandCallback\n ) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.\n execute_switch_light_subroutine, callback)\n\n def execute_directional_movement(self, direction: str, speed: str,\n distance: float, callback: CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.\n execute_directional_movement, callback, direction=direction,\n speed=speed, distance=distance)\n\n def execute_rotational_movement(self, angle: float, callback:\n CommandCallback) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.\n execute_rotational_movement, callback, angle=angle)\n\n def execute_activate_magnet(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_activate_magnet,\n callback)\n\n def execute_deactivate_magnet(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_deactivate_magnet,\n callback)\n\n def execute_discharge_magnet(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_discharge_magnet,\n callback)\n\n def execute_update_directions_subroutine(self, callback: CommandCallback\n ) ->None:\n self._start_command(self._remote_service.execute_update_directions,\n callback)\n\n def execute_championship_subroutine(self, callback: CommandCallback):\n self._start_command(self._remote_service.execute_championship, callback\n )\n\n def execute_look_down(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_look_down, callback)\n\n def execute_look_ahead(self, callback: CommandCallback) ->None:\n self._start_command(self._remote_service.execute_look_ahead, callback)\n\n def _command_done(self, status: CommandStatus) ->None:\n with self._busy_lock:\n self._busy = False\n self._callback(status)\n\n def _start_command(self, function: Callable[[Any], None], callback:\n CommandCallback, **kwargs) ->None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n with self._busy_lock:\n if self._busy:\n raise BlockingIOError()\n self._busy = True\n self._callback = callback\n kwargs['callback'] = self._command_done\n function(**kwargs)\n",
"step-5": "from threading import Lock\nfrom typing import Callable, Any\n\nfrom remote.domain.commandCallback import CommandCallback\nfrom remote.domain.commandStatus import CommandStatus\nfrom remote.service.remoteService import RemoteService\nfrom ui.domain.subroutine.iSubroutineRunner import ISubroutineRunner\n\n\nclass RemoteSubroutineRunner(ISubroutineRunner):\n def __init__(self, remote_service: RemoteService) -> None:\n self._remote_service = remote_service\n self._callback: CommandCallback = None\n self._busy = False\n self._busy_lock = Lock()\n\n def execute_charge_subroutine(self, callback: CommandCallback) -> None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_charge_subroutine, callback)\n\n def execute_go_home_subroutine(self, callback: CommandCallback) -> None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_go_home_subroutine, callback)\n\n def execute_read_qr_subroutine(self, callback: CommandCallback) -> None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_read_qr_subroutine, callback)\n\n def execute_grab_subroutine(self, target: str, callback: CommandCallback) -> None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_grab_subroutine, callback, target=target)\n\n def execute_drop_subroutine(self, target: str, callback: CommandCallback) -> None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_drop_subroutine, callback, target=target)\n\n def execute_switch_light_subroutine(self, callback: CommandCallback) -> None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_switch_light_subroutine, callback)\n\n def execute_directional_movement(self, direction: str, speed: str, distance: float,\n callback: CommandCallback) -> None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_directional_movement, callback,\n direction=direction, speed=speed, distance=distance)\n\n def execute_rotational_movement(self, angle: float, callback: CommandCallback) -> None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n self._start_command(self._remote_service.execute_rotational_movement, callback, angle=angle)\n\n def execute_activate_magnet(self, callback: CommandCallback) -> None:\n self._start_command(self._remote_service.execute_activate_magnet, callback)\n\n def execute_deactivate_magnet(self, callback: CommandCallback) -> None:\n self._start_command(self._remote_service.execute_deactivate_magnet, callback)\n\n def execute_discharge_magnet(self, callback: CommandCallback) -> None:\n self._start_command(self._remote_service.execute_discharge_magnet, callback)\n\n def execute_update_directions_subroutine(self, callback: CommandCallback) -> None:\n self._start_command(self._remote_service.execute_update_directions, callback)\n\n def execute_championship_subroutine(self, callback: CommandCallback):\n self._start_command(self._remote_service.execute_championship, callback)\n\n def execute_look_down(self, callback: CommandCallback) -> None:\n self._start_command(self._remote_service.execute_look_down, callback)\n\n def execute_look_ahead(self, callback: CommandCallback) -> None:\n self._start_command(self._remote_service.execute_look_ahead, callback)\n\n def _command_done(self, status: CommandStatus) -> None:\n with self._busy_lock:\n self._busy = False\n self._callback(status)\n\n def _start_command(self, function: Callable[[Any], None], callback: CommandCallback, **kwargs) -> None:\n \"\"\"\n\n :raises BlockingIOError: command already running\n \"\"\"\n with self._busy_lock:\n if self._busy:\n raise BlockingIOError()\n self._busy = True\n self._callback = callback\n kwargs[\"callback\"] = self._command_done\n function(**kwargs)\n",
"step-ids": [
14,
15,
17,
19,
21
]
}
|
[
14,
15,
17,
19,
21
] |
from HiddenLayer import HiddenLayer
from Vector import Vector
import IO
import Loss
import Utils
import Activation
import Backpropagation
import Rate
# As a test, let's simulate the OR-gate with a single perceptron
""" training = []
training.append(Vector(2, arr=[1, 1]))
training.append(Vector(2, arr=[1, 0]))
training.append(Vector(2, arr=[0, 1]))
training.append(Vector(2, arr=[0, 0]))
labels = Vector(4, arr=[1, 1, 1, 0])
from Vector
left_true= Vector(2, arr=[1, 0])
both_false = Vector(2, arr=[0, 0])
print(tron.predict(both_true))
print(tron.predict(right_true))
print(tron.predict(left_true))
print(tron.predict(both_false)) """
# Testing the reading of data
""" images = Data.read_images('test')
labels = Data.read_labels('test')
UI.draw_image(images[1234], "testi")
print(labels[1234]) """
# Vector multiplication test
""" print(Vector(4, arr=[1, 2, 3, 4]) * Vector(4, arr=[1, 2, 2, 2])) """
# Neuron output test
""" n = Neuron(Utils.rand_array(4), Activation.sigmoid, Activation.sigmoid_d, 3)
x = Vector(4, arr=Utils.rand_array(4))
print(n)
print(x)
print(n.output(x)) """
# rand_array and normalization test
""" arr = Utils.rand_array(10, -5, 15)
print(arr)
print(Utils.normalize(arr, -5, 15)) """
# Testing some hidden layer basic functionality and saving/loading
""" images = IO.read_images('test')
labels = IO.read_labels('test')
weights = [Utils.rand_array(784, -1, 1) for _ in range(10)]
hl_a = HiddenLayer(10, 784, weights, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)
#IO.save_layer(hl_a, "test")
hl_b = IO.load_layer("test")
for i in range(9):
img = Vector(Utils.normalize(Utils.flatten_2d(images[i]), 0, 255))
o1 = hl_a.generate_output(img)
o2 = hl_b.generate_output(img)
#print("Picture " + str(i + 1) + ": " + str(o1) + ", " + str(o2) + ", correct answer is " + str(labels[i]))
print(o1)
print(o2) """
# Array flattening testing
""" testarr = [[1, 2, 7, 8], [3, 4, 9, 10], [5, 6, 11, 12]]
testarr = Utils.flatten_2d(testarr)
print(testarr)
testarr = Utils.deflatten_2d(testarr, 4, 3)
print(testarr) """
# Let's test multi-layer nets
""" images = IO.read_images('test')
labels = IO.read_labels('test')
img_test = images[:20]
lab_test = labels[:20]
weights_a = [Utils.rand_array(784, 0, 1) for _ in range(10)]
weights_b = [Utils.rand_array(10, 0, 1) for _ in range(10)]
hl_a = HiddenLayer(10, 784, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)
hl_b = HiddenLayer(10, 10, weights_b, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)
LEARNING_RATE = 0.5
for (i, l) in zip(images, labels):
img = Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255))
lab = Utils.onehot_label_arr(l)
o_a = hl_a.generate_output(img)
o_b = hl_b.generate_output(o_a)
grads = Backpropagation.output_layer_grads(hl_b, o_b, lab, hl_a, LEARNING_RATE)
#grad_b =
#print("Picture " + str(i + 1) + ": " + str(o1) + ", " + str(o2) + ", correct answer is " + str(labels[i]))
#print(o_a)
#print(o_b)
#print(lab)
#print()
#print("----")
for n in hl_b.neurons:
print(n.weights) """
# Let's try how well a single one-layer 10-neuron net performs!
# Read images and labels
""" images = IO.read_images('training')
labels = IO.read_labels('training')
test_images = IO.read_images('test')
test_labels = IO.read_labels('test')
print("Images & labels read!")
# Preprocess images and labels
images_flat = []
labels_oh = []
test_images_flat = []
for (i, l) in zip(images, labels):
images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255)))
labels_oh.append(Utils.onehot_label_arr(l))
for i in test_images:
test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255)))
print("Images & labels processed!")
# Initialize weights and layer
#weights_a = [Utils.rand_array(784, 0, 1) for _ in range(10)]
weights_a = [[0] * 784] * 10
hl_a = HiddenLayer(10, 784, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)
LEARNING_RATE = 0.05
iter = 1eturn super().setUp()
prev_correct = 0
#old_weights = weights_a
while True:
print("Iteration: " + str(iter))
j = 1
for (img, lab) in zip(images_flat, labels_oh):
o_a = hl_a.generate_output(img)
grads = Backpropagation.output_layer_backpropagate(hl_a, o_a, lab, img, LEARNING_RATE)
if j % 1000 == 0:
print(" " + str(j))
j += 1
right_amount = 0
for (img, lab) in zip(test_images_flat, test_labels):
o_a = hl_a.generate_output(img)
pred = Utils.make_prediction(o_a)
if pred == lab:
right_amount += 1
print("Correct predictions: " + str(right_amount))
if (iter > 10):
break
prev_correct = right_amount
iter = iter + 1 """
#IO.save_layer(hl_a, "test1_3")
# Visualize weights!
""" hl_a = IO.load_layer("test1_3")
i = 0
for n in hl_a.neurons:
weights = n.weights
weights = Utils.fit_arr(weights, 0, 255)
#print(weights)
IO.save_image(Utils.deflatten_2d(weights, 28, 28), "w" + str(i))
i += 1 """
# Final boss: a 32-16-10 multi-layer net!
images = IO.read_images('training')
labels = IO.read_labels('training')
test_images = IO.read_images('test')
test_labels = IO.read_labels('test')
print("Images & labels read!")
# Preprocess images and labels
images_flat = []
labels_oh = []
test_images_flat = []
for (i, l) in zip(images, labels):
images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))
labels_oh.append(Utils.onehot_label_arr(l))
for i in test_images:
test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))
print("Images & labels processed!")
# Don't change these two
IMAGE_INPUT_SIZE = 784
OUTPUT_LAYER_SIZE = 10
# These define how many neurons in layers A & B
LAYER_A_SIZE = 32
LAYER_B_SIZE = 16
# Initialize weights and layer
weights_a = [Utils.rand_array(IMAGE_INPUT_SIZE, -1, 1) for _ in range(LAYER_A_SIZE)]
weights_b = [Utils.rand_array(LAYER_A_SIZE, -1, 1) for _ in range(LAYER_B_SIZE)]
weights_op = [Utils.rand_array(LAYER_B_SIZE, -1, 1) for _ in range(OUTPUT_LAYER_SIZE)]
hl_a = HiddenLayer(LAYER_A_SIZE, IMAGE_INPUT_SIZE, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.mean_quadratic_d, 0)
hl_b = HiddenLayer(LAYER_B_SIZE, LAYER_A_SIZE, weights_b, Activation.sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.mean_quadratic_d, 0)
opl = HiddenLayer(OUTPUT_LAYER_SIZE, LAYER_B_SIZE, weights_op, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0)
# ---- Change these if you want to play around with the program ----
# These decide when the training stops
ITERATION_CAP = 20 # after 20 iterations or
ACCURACY_CAP = 6500 # at 65% accuracy
# These adjust the learning process
INITIAL_LEARNING_RATE = 0.05
LEARNING_DECAY_SCALAR = 0.0025
BATCH_SIZE = 100
# ----------------
learning_rate = INITIAL_LEARNING_RATE
iter = 1
prev_correct = 0
while True:
print("Iteration: " + str(iter))
learning_rate = Rate.decaying(learning_rate, iter, LEARNING_DECAY_SCALAR)
print("Learning rate: " + str(learning_rate))
j = 1
batchtracker = 0
img_sum = Vector([0] * IMAGE_INPUT_SIZE)
lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)
oa_sum = Vector([0] * LAYER_A_SIZE)
ob_sum = Vector([0] * LAYER_B_SIZE)
op_sum = Vector([0] * OUTPUT_LAYER_SIZE)
for (img, lab) in zip(images_flat, labels_oh):
o_a = hl_a.generate_output(img)
o_b = hl_b.generate_output(o_a['op'])
output = opl.generate_output(o_b['op'])
img_sum = img_sum + img
lab_sum = lab_sum + Vector(lab)
oa_sum = oa_sum + o_a['op']
ob_sum = ob_sum + o_b['op']
op_sum = op_sum + output['op']
batchtracker = batchtracker + 1
if batchtracker == BATCH_SIZE:
img_sum = img_sum * (1 / BATCH_SIZE)
lab_sum = lab_sum * (1 / BATCH_SIZE)
oa_sum = oa_sum * (1 / BATCH_SIZE)
ob_sum = ob_sum * (1 / BATCH_SIZE)
op_sum = op_sum * (1 / BATCH_SIZE)
#print(opl.loss(lab_sum, op_sum))
opl_backprop = Backpropagation.output_layer_backpropagate(opl, op_sum, lab, ob_sum, learning_rate)
hl_b_backprop = Backpropagation.hidden_layer_backpropagate(hl_b, oa_sum, ob_sum, opl_backprop, learning_rate)
hl_a_backprop = Backpropagation.hidden_layer_backpropagate(hl_a, img, oa_sum, hl_b_backprop, learning_rate)
img_sum = Vector([0] * IMAGE_INPUT_SIZE)
lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)
oa_sum = Vector([0] * LAYER_A_SIZE)
ob_sum = Vector([0] * LAYER_B_SIZE)
op_sum = Vector([0] * OUTPUT_LAYER_SIZE)
batchtracker = 0
if j % 10000 == 0:
print(" " + str(j))
j += 1
print("Iteration " + str(iter) + " done! Now testing accuracy...")
right_amount = 0
for (img_t, lab_t) in zip(test_images_flat, test_labels):
oa = hl_a.generate_output(img_t)['op']
ob = hl_b.generate_output(oa)['op']
op = opl.generate_output(ob)['op']
pred = Utils.make_prediction(op)
if pred == lab_t:
right_amount += 1
print("Correct predictions: " + str(right_amount))
if (iter >= ITERATION_CAP):
break
if (prev_correct >= ACCURACY_CAP):
break
#if (prev_correct > right_amount):
# break
prev_correct = right_amount
iter = iter + 1
IO.save_layer(hl_a, "test_layer_a")
IO.save_layer(hl_b, "test_layer_b")
IO.save_layer(opl, "test_layer_c")
|
normal
|
{
"blob_id": "1f86fe72c90c8457715a2f400dae8d355a9a97cf",
"index": 8577,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Images & labels read!')\n<mask token>\nfor i, l in zip(images, labels):\n images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\n labels_oh.append(Utils.onehot_label_arr(l))\nfor i in test_images:\n test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\nprint('Images & labels processed!')\n<mask token>\nwhile True:\n print('Iteration: ' + str(iter))\n learning_rate = Rate.decaying(learning_rate, iter, LEARNING_DECAY_SCALAR)\n print('Learning rate: ' + str(learning_rate))\n j = 1\n batchtracker = 0\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n for img, lab in zip(images_flat, labels_oh):\n o_a = hl_a.generate_output(img)\n o_b = hl_b.generate_output(o_a['op'])\n output = opl.generate_output(o_b['op'])\n img_sum = img_sum + img\n lab_sum = lab_sum + Vector(lab)\n oa_sum = oa_sum + o_a['op']\n ob_sum = ob_sum + o_b['op']\n op_sum = op_sum + output['op']\n batchtracker = batchtracker + 1\n if batchtracker == BATCH_SIZE:\n img_sum = img_sum * (1 / BATCH_SIZE)\n lab_sum = lab_sum * (1 / BATCH_SIZE)\n oa_sum = oa_sum * (1 / BATCH_SIZE)\n ob_sum = ob_sum * (1 / BATCH_SIZE)\n op_sum = op_sum * (1 / BATCH_SIZE)\n opl_backprop = Backpropagation.output_layer_backpropagate(opl,\n op_sum, lab, ob_sum, learning_rate)\n hl_b_backprop = Backpropagation.hidden_layer_backpropagate(hl_b,\n oa_sum, ob_sum, opl_backprop, learning_rate)\n hl_a_backprop = Backpropagation.hidden_layer_backpropagate(hl_a,\n img, oa_sum, hl_b_backprop, learning_rate)\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n batchtracker = 0\n if j % 10000 == 0:\n print(' ' + str(j))\n j += 1\n print('Iteration ' + str(iter) + ' done! Now testing accuracy...')\n right_amount = 0\n for img_t, lab_t in zip(test_images_flat, test_labels):\n oa = hl_a.generate_output(img_t)['op']\n ob = hl_b.generate_output(oa)['op']\n op = opl.generate_output(ob)['op']\n pred = Utils.make_prediction(op)\n if pred == lab_t:\n right_amount += 1\n print('Correct predictions: ' + str(right_amount))\n if iter >= ITERATION_CAP:\n break\n if prev_correct >= ACCURACY_CAP:\n break\n prev_correct = right_amount\n iter = iter + 1\nIO.save_layer(hl_a, 'test_layer_a')\nIO.save_layer(hl_b, 'test_layer_b')\nIO.save_layer(opl, 'test_layer_c')\n",
"step-3": "<mask token>\nimages = IO.read_images('training')\nlabels = IO.read_labels('training')\ntest_images = IO.read_images('test')\ntest_labels = IO.read_labels('test')\nprint('Images & labels read!')\nimages_flat = []\nlabels_oh = []\ntest_images_flat = []\nfor i, l in zip(images, labels):\n images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\n labels_oh.append(Utils.onehot_label_arr(l))\nfor i in test_images:\n test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\nprint('Images & labels processed!')\nIMAGE_INPUT_SIZE = 784\nOUTPUT_LAYER_SIZE = 10\nLAYER_A_SIZE = 32\nLAYER_B_SIZE = 16\nweights_a = [Utils.rand_array(IMAGE_INPUT_SIZE, -1, 1) for _ in range(\n LAYER_A_SIZE)]\nweights_b = [Utils.rand_array(LAYER_A_SIZE, -1, 1) for _ in range(LAYER_B_SIZE)\n ]\nweights_op = [Utils.rand_array(LAYER_B_SIZE, -1, 1) for _ in range(\n OUTPUT_LAYER_SIZE)]\nhl_a = HiddenLayer(LAYER_A_SIZE, IMAGE_INPUT_SIZE, weights_a, Activation.\n sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.\n mean_quadratic_d, 0)\nhl_b = HiddenLayer(LAYER_B_SIZE, LAYER_A_SIZE, weights_b, Activation.\n sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.\n mean_quadratic_d, 0)\nopl = HiddenLayer(OUTPUT_LAYER_SIZE, LAYER_B_SIZE, weights_op, Activation.\n sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0)\nITERATION_CAP = 20\nACCURACY_CAP = 6500\nINITIAL_LEARNING_RATE = 0.05\nLEARNING_DECAY_SCALAR = 0.0025\nBATCH_SIZE = 100\nlearning_rate = INITIAL_LEARNING_RATE\niter = 1\nprev_correct = 0\nwhile True:\n print('Iteration: ' + str(iter))\n learning_rate = Rate.decaying(learning_rate, iter, LEARNING_DECAY_SCALAR)\n print('Learning rate: ' + str(learning_rate))\n j = 1\n batchtracker = 0\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n for img, lab in zip(images_flat, labels_oh):\n o_a = hl_a.generate_output(img)\n o_b = hl_b.generate_output(o_a['op'])\n output = opl.generate_output(o_b['op'])\n img_sum = img_sum + img\n lab_sum = lab_sum + Vector(lab)\n oa_sum = oa_sum + o_a['op']\n ob_sum = ob_sum + o_b['op']\n op_sum = op_sum + output['op']\n batchtracker = batchtracker + 1\n if batchtracker == BATCH_SIZE:\n img_sum = img_sum * (1 / BATCH_SIZE)\n lab_sum = lab_sum * (1 / BATCH_SIZE)\n oa_sum = oa_sum * (1 / BATCH_SIZE)\n ob_sum = ob_sum * (1 / BATCH_SIZE)\n op_sum = op_sum * (1 / BATCH_SIZE)\n opl_backprop = Backpropagation.output_layer_backpropagate(opl,\n op_sum, lab, ob_sum, learning_rate)\n hl_b_backprop = Backpropagation.hidden_layer_backpropagate(hl_b,\n oa_sum, ob_sum, opl_backprop, learning_rate)\n hl_a_backprop = Backpropagation.hidden_layer_backpropagate(hl_a,\n img, oa_sum, hl_b_backprop, learning_rate)\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n batchtracker = 0\n if j % 10000 == 0:\n print(' ' + str(j))\n j += 1\n print('Iteration ' + str(iter) + ' done! Now testing accuracy...')\n right_amount = 0\n for img_t, lab_t in zip(test_images_flat, test_labels):\n oa = hl_a.generate_output(img_t)['op']\n ob = hl_b.generate_output(oa)['op']\n op = opl.generate_output(ob)['op']\n pred = Utils.make_prediction(op)\n if pred == lab_t:\n right_amount += 1\n print('Correct predictions: ' + str(right_amount))\n if iter >= ITERATION_CAP:\n break\n if prev_correct >= ACCURACY_CAP:\n break\n prev_correct = right_amount\n iter = iter + 1\nIO.save_layer(hl_a, 'test_layer_a')\nIO.save_layer(hl_b, 'test_layer_b')\nIO.save_layer(opl, 'test_layer_c')\n",
"step-4": "from HiddenLayer import HiddenLayer\nfrom Vector import Vector\nimport IO\nimport Loss\nimport Utils\nimport Activation\nimport Backpropagation\nimport Rate\n<mask token>\nimages = IO.read_images('training')\nlabels = IO.read_labels('training')\ntest_images = IO.read_images('test')\ntest_labels = IO.read_labels('test')\nprint('Images & labels read!')\nimages_flat = []\nlabels_oh = []\ntest_images_flat = []\nfor i, l in zip(images, labels):\n images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\n labels_oh.append(Utils.onehot_label_arr(l))\nfor i in test_images:\n test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\nprint('Images & labels processed!')\nIMAGE_INPUT_SIZE = 784\nOUTPUT_LAYER_SIZE = 10\nLAYER_A_SIZE = 32\nLAYER_B_SIZE = 16\nweights_a = [Utils.rand_array(IMAGE_INPUT_SIZE, -1, 1) for _ in range(\n LAYER_A_SIZE)]\nweights_b = [Utils.rand_array(LAYER_A_SIZE, -1, 1) for _ in range(LAYER_B_SIZE)\n ]\nweights_op = [Utils.rand_array(LAYER_B_SIZE, -1, 1) for _ in range(\n OUTPUT_LAYER_SIZE)]\nhl_a = HiddenLayer(LAYER_A_SIZE, IMAGE_INPUT_SIZE, weights_a, Activation.\n sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.\n mean_quadratic_d, 0)\nhl_b = HiddenLayer(LAYER_B_SIZE, LAYER_A_SIZE, weights_b, Activation.\n sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.\n mean_quadratic_d, 0)\nopl = HiddenLayer(OUTPUT_LAYER_SIZE, LAYER_B_SIZE, weights_op, Activation.\n sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0)\nITERATION_CAP = 20\nACCURACY_CAP = 6500\nINITIAL_LEARNING_RATE = 0.05\nLEARNING_DECAY_SCALAR = 0.0025\nBATCH_SIZE = 100\nlearning_rate = INITIAL_LEARNING_RATE\niter = 1\nprev_correct = 0\nwhile True:\n print('Iteration: ' + str(iter))\n learning_rate = Rate.decaying(learning_rate, iter, LEARNING_DECAY_SCALAR)\n print('Learning rate: ' + str(learning_rate))\n j = 1\n batchtracker = 0\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n for img, lab in zip(images_flat, labels_oh):\n o_a = hl_a.generate_output(img)\n o_b = hl_b.generate_output(o_a['op'])\n output = opl.generate_output(o_b['op'])\n img_sum = img_sum + img\n lab_sum = lab_sum + Vector(lab)\n oa_sum = oa_sum + o_a['op']\n ob_sum = ob_sum + o_b['op']\n op_sum = op_sum + output['op']\n batchtracker = batchtracker + 1\n if batchtracker == BATCH_SIZE:\n img_sum = img_sum * (1 / BATCH_SIZE)\n lab_sum = lab_sum * (1 / BATCH_SIZE)\n oa_sum = oa_sum * (1 / BATCH_SIZE)\n ob_sum = ob_sum * (1 / BATCH_SIZE)\n op_sum = op_sum * (1 / BATCH_SIZE)\n opl_backprop = Backpropagation.output_layer_backpropagate(opl,\n op_sum, lab, ob_sum, learning_rate)\n hl_b_backprop = Backpropagation.hidden_layer_backpropagate(hl_b,\n oa_sum, ob_sum, opl_backprop, learning_rate)\n hl_a_backprop = Backpropagation.hidden_layer_backpropagate(hl_a,\n img, oa_sum, hl_b_backprop, learning_rate)\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n batchtracker = 0\n if j % 10000 == 0:\n print(' ' + str(j))\n j += 1\n print('Iteration ' + str(iter) + ' done! Now testing accuracy...')\n right_amount = 0\n for img_t, lab_t in zip(test_images_flat, test_labels):\n oa = hl_a.generate_output(img_t)['op']\n ob = hl_b.generate_output(oa)['op']\n op = opl.generate_output(ob)['op']\n pred = Utils.make_prediction(op)\n if pred == lab_t:\n right_amount += 1\n print('Correct predictions: ' + str(right_amount))\n if iter >= ITERATION_CAP:\n break\n if prev_correct >= ACCURACY_CAP:\n break\n prev_correct = right_amount\n iter = iter + 1\nIO.save_layer(hl_a, 'test_layer_a')\nIO.save_layer(hl_b, 'test_layer_b')\nIO.save_layer(opl, 'test_layer_c')\n",
"step-5": "from HiddenLayer import HiddenLayer\nfrom Vector import Vector\nimport IO\nimport Loss\nimport Utils\nimport Activation\nimport Backpropagation\nimport Rate\n\n\n# As a test, let's simulate the OR-gate with a single perceptron\n\"\"\" training = []\ntraining.append(Vector(2, arr=[1, 1]))\ntraining.append(Vector(2, arr=[1, 0]))\ntraining.append(Vector(2, arr=[0, 1]))\ntraining.append(Vector(2, arr=[0, 0]))\n\nlabels = Vector(4, arr=[1, 1, 1, 0])\nfrom Vector \nleft_true= Vector(2, arr=[1, 0])\nboth_false = Vector(2, arr=[0, 0])\n\nprint(tron.predict(both_true))\nprint(tron.predict(right_true))\nprint(tron.predict(left_true))\nprint(tron.predict(both_false)) \"\"\"\n\n# Testing the reading of data\n\"\"\" images = Data.read_images('test')\nlabels = Data.read_labels('test')\n\nUI.draw_image(images[1234], \"testi\")\nprint(labels[1234]) \"\"\"\n\n# Vector multiplication test\n\"\"\" print(Vector(4, arr=[1, 2, 3, 4]) * Vector(4, arr=[1, 2, 2, 2])) \"\"\"\n\n# Neuron output test\n\"\"\" n = Neuron(Utils.rand_array(4), Activation.sigmoid, Activation.sigmoid_d, 3)\nx = Vector(4, arr=Utils.rand_array(4))\nprint(n)\nprint(x)\nprint(n.output(x)) \"\"\"\n\n# rand_array and normalization test\n\"\"\" arr = Utils.rand_array(10, -5, 15)\nprint(arr)\nprint(Utils.normalize(arr, -5, 15)) \"\"\"\n\n# Testing some hidden layer basic functionality and saving/loading\n\"\"\" images = IO.read_images('test')\nlabels = IO.read_labels('test')\n\nweights = [Utils.rand_array(784, -1, 1) for _ in range(10)]\nhl_a = HiddenLayer(10, 784, weights, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)\n\n#IO.save_layer(hl_a, \"test\")\nhl_b = IO.load_layer(\"test\")\n\nfor i in range(9):\n img = Vector(Utils.normalize(Utils.flatten_2d(images[i]), 0, 255))\n o1 = hl_a.generate_output(img)\n o2 = hl_b.generate_output(img)\n #print(\"Picture \" + str(i + 1) + \": \" + str(o1) + \", \" + str(o2) + \", correct answer is \" + str(labels[i]))\n print(o1)\n print(o2) \"\"\"\n\n# Array flattening testing\n\"\"\" testarr = [[1, 2, 7, 8], [3, 4, 9, 10], [5, 6, 11, 12]]\ntestarr = Utils.flatten_2d(testarr)\nprint(testarr)\ntestarr = Utils.deflatten_2d(testarr, 4, 3)\nprint(testarr) \"\"\"\n\n# Let's test multi-layer nets\n\"\"\" images = IO.read_images('test')\nlabels = IO.read_labels('test')\nimg_test = images[:20]\nlab_test = labels[:20]\n\nweights_a = [Utils.rand_array(784, 0, 1) for _ in range(10)]\nweights_b = [Utils.rand_array(10, 0, 1) for _ in range(10)]\nhl_a = HiddenLayer(10, 784, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)\nhl_b = HiddenLayer(10, 10, weights_b, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)\n\nLEARNING_RATE = 0.5\n\nfor (i, l) in zip(images, labels):\n img = Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255))\n lab = Utils.onehot_label_arr(l)\n o_a = hl_a.generate_output(img)\n o_b = hl_b.generate_output(o_a)\n grads = Backpropagation.output_layer_grads(hl_b, o_b, lab, hl_a, LEARNING_RATE)\n #grad_b = \n #print(\"Picture \" + str(i + 1) + \": \" + str(o1) + \", \" + str(o2) + \", correct answer is \" + str(labels[i]))\n #print(o_a)\n #print(o_b)\n #print(lab)\n #print()\n #print(\"----\")\n\nfor n in hl_b.neurons:\n print(n.weights) \"\"\"\n\n# Let's try how well a single one-layer 10-neuron net performs!\n# Read images and labels\n\"\"\" images = IO.read_images('training')\nlabels = IO.read_labels('training')\ntest_images = IO.read_images('test')\ntest_labels = IO.read_labels('test')\nprint(\"Images & labels read!\")\n\n# Preprocess images and labels\nimages_flat = []\nlabels_oh = []\ntest_images_flat = []\n\nfor (i, l) in zip(images, labels):\n images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255)))\n labels_oh.append(Utils.onehot_label_arr(l))\n\nfor i in test_images:\n test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 255)))\n\nprint(\"Images & labels processed!\")\n\n# Initialize weights and layer\n#weights_a = [Utils.rand_array(784, 0, 1) for _ in range(10)]\nweights_a = [[0] * 784] * 10\nhl_a = HiddenLayer(10, 784, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0.1)\n\nLEARNING_RATE = 0.05\n\niter = 1eturn super().setUp()\nprev_correct = 0\n#old_weights = weights_a\nwhile True:\n print(\"Iteration: \" + str(iter))\n\n j = 1\n for (img, lab) in zip(images_flat, labels_oh):\n o_a = hl_a.generate_output(img)\n grads = Backpropagation.output_layer_backpropagate(hl_a, o_a, lab, img, LEARNING_RATE)\n \n if j % 1000 == 0:\n print(\" \" + str(j))\n j += 1\n\n right_amount = 0\n for (img, lab) in zip(test_images_flat, test_labels):\n o_a = hl_a.generate_output(img)\n pred = Utils.make_prediction(o_a)\n if pred == lab:\n right_amount += 1\n \n print(\"Correct predictions: \" + str(right_amount))\n\n if (iter > 10):\n break\n\n prev_correct = right_amount\n iter = iter + 1 \"\"\"\n\n#IO.save_layer(hl_a, \"test1_3\")\n\n\n\n# Visualize weights!\n\"\"\" hl_a = IO.load_layer(\"test1_3\")\n\ni = 0\nfor n in hl_a.neurons:\n weights = n.weights\n weights = Utils.fit_arr(weights, 0, 255)\n #print(weights)\n IO.save_image(Utils.deflatten_2d(weights, 28, 28), \"w\" + str(i))\n i += 1 \"\"\"\n\n\n\n# Final boss: a 32-16-10 multi-layer net!\nimages = IO.read_images('training')\nlabels = IO.read_labels('training')\ntest_images = IO.read_images('test')\ntest_labels = IO.read_labels('test')\nprint(\"Images & labels read!\")\n\n# Preprocess images and labels\nimages_flat = []\nlabels_oh = []\ntest_images_flat = []\n\nfor (i, l) in zip(images, labels):\n images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\n labels_oh.append(Utils.onehot_label_arr(l))\n\nfor i in test_images:\n test_images_flat.append(Vector(Utils.normalize(Utils.flatten_2d(i), 0, 1)))\n\nprint(\"Images & labels processed!\")\n\n# Don't change these two\nIMAGE_INPUT_SIZE = 784\nOUTPUT_LAYER_SIZE = 10\n\n# These define how many neurons in layers A & B\nLAYER_A_SIZE = 32\nLAYER_B_SIZE = 16\n\n# Initialize weights and layer\nweights_a = [Utils.rand_array(IMAGE_INPUT_SIZE, -1, 1) for _ in range(LAYER_A_SIZE)]\nweights_b = [Utils.rand_array(LAYER_A_SIZE, -1, 1) for _ in range(LAYER_B_SIZE)]\nweights_op = [Utils.rand_array(LAYER_B_SIZE, -1, 1) for _ in range(OUTPUT_LAYER_SIZE)]\n\nhl_a = HiddenLayer(LAYER_A_SIZE, IMAGE_INPUT_SIZE, weights_a, Activation.sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.mean_quadratic_d, 0)\nhl_b = HiddenLayer(LAYER_B_SIZE, LAYER_A_SIZE, weights_b, Activation.sigmoid, Activation.sigmoid_d, Loss.mean_quadratic, Loss.mean_quadratic_d, 0)\nopl = HiddenLayer(OUTPUT_LAYER_SIZE, LAYER_B_SIZE, weights_op, Activation.sigmoid, Activation.sigmoid_d, Loss.quadratic, Loss.quadratic_d, 0)\n\n# ---- Change these if you want to play around with the program ----\n\n# These decide when the training stops\nITERATION_CAP = 20 # after 20 iterations or\nACCURACY_CAP = 6500 # at 65% accuracy\n\n# These adjust the learning process\nINITIAL_LEARNING_RATE = 0.05\nLEARNING_DECAY_SCALAR = 0.0025\nBATCH_SIZE = 100\n\n# ----------------\n\nlearning_rate = INITIAL_LEARNING_RATE\niter = 1\nprev_correct = 0\n\nwhile True:\n print(\"Iteration: \" + str(iter))\n\n learning_rate = Rate.decaying(learning_rate, iter, LEARNING_DECAY_SCALAR)\n\n print(\"Learning rate: \" + str(learning_rate))\n \n j = 1\n batchtracker = 0\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n\n for (img, lab) in zip(images_flat, labels_oh):\n o_a = hl_a.generate_output(img)\n o_b = hl_b.generate_output(o_a['op'])\n output = opl.generate_output(o_b['op'])\n\n img_sum = img_sum + img\n lab_sum = lab_sum + Vector(lab)\n oa_sum = oa_sum + o_a['op']\n ob_sum = ob_sum + o_b['op']\n op_sum = op_sum + output['op']\n\n batchtracker = batchtracker + 1\n\n if batchtracker == BATCH_SIZE:\n img_sum = img_sum * (1 / BATCH_SIZE)\n lab_sum = lab_sum * (1 / BATCH_SIZE)\n oa_sum = oa_sum * (1 / BATCH_SIZE)\n ob_sum = ob_sum * (1 / BATCH_SIZE)\n op_sum = op_sum * (1 / BATCH_SIZE)\n\n #print(opl.loss(lab_sum, op_sum))\n\n opl_backprop = Backpropagation.output_layer_backpropagate(opl, op_sum, lab, ob_sum, learning_rate)\n hl_b_backprop = Backpropagation.hidden_layer_backpropagate(hl_b, oa_sum, ob_sum, opl_backprop, learning_rate)\n hl_a_backprop = Backpropagation.hidden_layer_backpropagate(hl_a, img, oa_sum, hl_b_backprop, learning_rate)\n\n img_sum = Vector([0] * IMAGE_INPUT_SIZE)\n lab_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n oa_sum = Vector([0] * LAYER_A_SIZE)\n ob_sum = Vector([0] * LAYER_B_SIZE)\n op_sum = Vector([0] * OUTPUT_LAYER_SIZE)\n batchtracker = 0\n\n \n if j % 10000 == 0:\n print(\" \" + str(j))\n j += 1\n\n print(\"Iteration \" + str(iter) + \" done! Now testing accuracy...\")\n\n right_amount = 0\n for (img_t, lab_t) in zip(test_images_flat, test_labels):\n oa = hl_a.generate_output(img_t)['op']\n ob = hl_b.generate_output(oa)['op']\n op = opl.generate_output(ob)['op']\n pred = Utils.make_prediction(op)\n if pred == lab_t:\n right_amount += 1\n \n print(\"Correct predictions: \" + str(right_amount))\n\n if (iter >= ITERATION_CAP):\n break\n \n if (prev_correct >= ACCURACY_CAP):\n break\n\n #if (prev_correct > right_amount):\n # break\n\n prev_correct = right_amount\n iter = iter + 1\n\nIO.save_layer(hl_a, \"test_layer_a\")\nIO.save_layer(hl_b, \"test_layer_b\")\nIO.save_layer(opl, \"test_layer_c\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Thibaut Lapierre <[email protected]>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from shaddock.drivers.docker.api import DockerApi
from docker import errors as docker_errors
import sys
class Container(object):
"""Instance a defined container
This class instance a Docker container depending on its
name and model definition.
The basics Docker methods are implemented as well as a
Shaddock's specific one that return the information of
the concerned container.
Shaddock keep no tracks of any Container ID and rely on no
databases. THe containers are retrieve from their names.
"""
def __init__(self, svc_cfg, containers_all=None):
self.cfg = svc_cfg
self.env = dict(self.cfg)
# we may want to use func.__code__.co_varnames here to gather all
# possible arguments of the docker api and compare them with cfg
# and delete the crapy hack of the next 8 lines.
args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',
'cluster', 'images_dir', 'path', 'service_name',
'host']
for arg in args_to_delete:
try:
del self.env[arg]
except KeyError:
pass
self.env['detach'] = self.cfg.get('detach', True)
self.docker_client = None
if containers_all is None:
docker_api = DockerApi(self.cfg['api_cfg'])
self.docker_api = docker_api.connect()
self.docker_client = self.docker_api.containers
self.info = self._get_info(containers_all)
def gather_api_methods(self, func):
return func.__code__.co_varnames
def create(self):
"""Returns a Container object"""
print('Creating container: {}'.format(self.cfg['name']))
create = self.docker_client.create(**self.env)
return create['id']
def start(self):
"""Returns a Container object"""
try:
print('Starting container: {}'.format(self.cfg['name']))
start = self.docker_client.run(**self.env)
except docker_errors.APIError as error:
print(error)
print('Container {} is already running'.format(self.cfg['name']))
return self.cfg['name']
return start
def stop(self):
c = self.info.get('Container')
if c is not None:
print('Stopping container: {}'.format(self.cfg['name']))
return c.stop()
def remove(self):
self.stop()
c = self.info.get('Container')
if c is not None:
print('Removing container: {}'.format(self.cfg['name']))
try:
c.remove()
except docker_errors.NotFound:
print('Container {} does not exist'.format(self.cfg['name']))
return True
def restart(self):
self.docker_client.restart(self.info['Id'])
def return_shell(self, cmd):
if self.cfg['image'] is not None:
# "Fix" in order to not use the stream generator in Python2
c = self.info.get('Container')
if sys.version_info > (3, 0):
try:
ret = c.exec_run(cmd,
stderr=True,
stdout=True,
stream=True,
)
for line in ret[1]:
print(line.decode('utf-8').rstrip())
except (KeyboardInterrupt, SystemExit):
return True
else:
line = c.exec_run(cmd,
stderr=True,
stdout=True,
stream=False)
print(line[1])
def return_logs(self):
if self.cfg['image'] is not None:
# "Fix" in order to not use the stream generator in Python2
c = self.info.get('Container')
if sys.version_info > (3, 0):
try:
for line in c.logs(stderr=True,
stdout=True,
timestamps=False,
stream=True,
):
print(line.decode('utf-8').rstrip())
except (KeyboardInterrupt, SystemExit):
return True
else:
line = c.logs(stderr=True,
stdout=True,
timestamps=False,
stream=False)
print(line)
def _get_info(self, containers_all=None):
info = {}
if containers_all is None:
containers_all = self.docker_client.list(all=True)
try:
container = [c for c in containers_all
if (c.name in self.cfg['service_name'])][0]
api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')
api = api.connect()
infos = api.inspect_container(container.id)
info['Container'] = container
info['Id'] = container.id
info['Ip'] = infos['NetworkSettings']['IPAddress']
info['State'] = container.status
except IndexError:
# Container is not running
info = {}
return info
|
normal
|
{
"blob_id": "c2c1194ed23adda015b23897888d1a4cc11423d5",
"index": 5074,
"step-1": "<mask token>\n\n\nclass Container(object):\n <mask token>\n\n def __init__(self, svc_cfg, containers_all=None):\n self.cfg = svc_cfg\n self.env = dict(self.cfg)\n args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',\n 'cluster', 'images_dir', 'path', 'service_name', 'host']\n for arg in args_to_delete:\n try:\n del self.env[arg]\n except KeyError:\n pass\n self.env['detach'] = self.cfg.get('detach', True)\n self.docker_client = None\n if containers_all is None:\n docker_api = DockerApi(self.cfg['api_cfg'])\n self.docker_api = docker_api.connect()\n self.docker_client = self.docker_api.containers\n self.info = self._get_info(containers_all)\n\n def gather_api_methods(self, func):\n return func.__code__.co_varnames\n\n def create(self):\n \"\"\"Returns a Container object\"\"\"\n print('Creating container: {}'.format(self.cfg['name']))\n create = self.docker_client.create(**self.env)\n return create['id']\n <mask token>\n <mask token>\n\n def remove(self):\n self.stop()\n c = self.info.get('Container')\n if c is not None:\n print('Removing container: {}'.format(self.cfg['name']))\n try:\n c.remove()\n except docker_errors.NotFound:\n print('Container {} does not exist'.format(self.cfg['name']))\n return True\n\n def restart(self):\n self.docker_client.restart(self.info['Id'])\n\n def return_shell(self, cmd):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n ret = c.exec_run(cmd, stderr=True, stdout=True, stream=True\n )\n for line in ret[1]:\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.exec_run(cmd, stderr=True, stdout=True, stream=False)\n print(line[1])\n\n def return_logs(self):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n for line in c.logs(stderr=True, stdout=True, timestamps\n =False, stream=True):\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.logs(stderr=True, stdout=True, timestamps=False,\n stream=False)\n print(line)\n\n def _get_info(self, containers_all=None):\n info = {}\n if containers_all is None:\n containers_all = self.docker_client.list(all=True)\n try:\n container = [c for c in containers_all if c.name in self.cfg[\n 'service_name']][0]\n api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')\n api = api.connect()\n infos = api.inspect_container(container.id)\n info['Container'] = container\n info['Id'] = container.id\n info['Ip'] = infos['NetworkSettings']['IPAddress']\n info['State'] = container.status\n except IndexError:\n info = {}\n return info\n",
"step-2": "<mask token>\n\n\nclass Container(object):\n <mask token>\n\n def __init__(self, svc_cfg, containers_all=None):\n self.cfg = svc_cfg\n self.env = dict(self.cfg)\n args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',\n 'cluster', 'images_dir', 'path', 'service_name', 'host']\n for arg in args_to_delete:\n try:\n del self.env[arg]\n except KeyError:\n pass\n self.env['detach'] = self.cfg.get('detach', True)\n self.docker_client = None\n if containers_all is None:\n docker_api = DockerApi(self.cfg['api_cfg'])\n self.docker_api = docker_api.connect()\n self.docker_client = self.docker_api.containers\n self.info = self._get_info(containers_all)\n\n def gather_api_methods(self, func):\n return func.__code__.co_varnames\n\n def create(self):\n \"\"\"Returns a Container object\"\"\"\n print('Creating container: {}'.format(self.cfg['name']))\n create = self.docker_client.create(**self.env)\n return create['id']\n\n def start(self):\n \"\"\"Returns a Container object\"\"\"\n try:\n print('Starting container: {}'.format(self.cfg['name']))\n start = self.docker_client.run(**self.env)\n except docker_errors.APIError as error:\n print(error)\n print('Container {} is already running'.format(self.cfg['name']))\n return self.cfg['name']\n return start\n <mask token>\n\n def remove(self):\n self.stop()\n c = self.info.get('Container')\n if c is not None:\n print('Removing container: {}'.format(self.cfg['name']))\n try:\n c.remove()\n except docker_errors.NotFound:\n print('Container {} does not exist'.format(self.cfg['name']))\n return True\n\n def restart(self):\n self.docker_client.restart(self.info['Id'])\n\n def return_shell(self, cmd):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n ret = c.exec_run(cmd, stderr=True, stdout=True, stream=True\n )\n for line in ret[1]:\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.exec_run(cmd, stderr=True, stdout=True, stream=False)\n print(line[1])\n\n def return_logs(self):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n for line in c.logs(stderr=True, stdout=True, timestamps\n =False, stream=True):\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.logs(stderr=True, stdout=True, timestamps=False,\n stream=False)\n print(line)\n\n def _get_info(self, containers_all=None):\n info = {}\n if containers_all is None:\n containers_all = self.docker_client.list(all=True)\n try:\n container = [c for c in containers_all if c.name in self.cfg[\n 'service_name']][0]\n api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')\n api = api.connect()\n infos = api.inspect_container(container.id)\n info['Container'] = container\n info['Id'] = container.id\n info['Ip'] = infos['NetworkSettings']['IPAddress']\n info['State'] = container.status\n except IndexError:\n info = {}\n return info\n",
"step-3": "<mask token>\n\n\nclass Container(object):\n \"\"\"Instance a defined container\n\n This class instance a Docker container depending on its\n name and model definition.\n The basics Docker methods are implemented as well as a\n Shaddock's specific one that return the information of\n the concerned container.\n\n Shaddock keep no tracks of any Container ID and rely on no\n databases. THe containers are retrieve from their names.\n \"\"\"\n\n def __init__(self, svc_cfg, containers_all=None):\n self.cfg = svc_cfg\n self.env = dict(self.cfg)\n args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',\n 'cluster', 'images_dir', 'path', 'service_name', 'host']\n for arg in args_to_delete:\n try:\n del self.env[arg]\n except KeyError:\n pass\n self.env['detach'] = self.cfg.get('detach', True)\n self.docker_client = None\n if containers_all is None:\n docker_api = DockerApi(self.cfg['api_cfg'])\n self.docker_api = docker_api.connect()\n self.docker_client = self.docker_api.containers\n self.info = self._get_info(containers_all)\n\n def gather_api_methods(self, func):\n return func.__code__.co_varnames\n\n def create(self):\n \"\"\"Returns a Container object\"\"\"\n print('Creating container: {}'.format(self.cfg['name']))\n create = self.docker_client.create(**self.env)\n return create['id']\n\n def start(self):\n \"\"\"Returns a Container object\"\"\"\n try:\n print('Starting container: {}'.format(self.cfg['name']))\n start = self.docker_client.run(**self.env)\n except docker_errors.APIError as error:\n print(error)\n print('Container {} is already running'.format(self.cfg['name']))\n return self.cfg['name']\n return start\n\n def stop(self):\n c = self.info.get('Container')\n if c is not None:\n print('Stopping container: {}'.format(self.cfg['name']))\n return c.stop()\n\n def remove(self):\n self.stop()\n c = self.info.get('Container')\n if c is not None:\n print('Removing container: {}'.format(self.cfg['name']))\n try:\n c.remove()\n except docker_errors.NotFound:\n print('Container {} does not exist'.format(self.cfg['name']))\n return True\n\n def restart(self):\n self.docker_client.restart(self.info['Id'])\n\n def return_shell(self, cmd):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n ret = c.exec_run(cmd, stderr=True, stdout=True, stream=True\n )\n for line in ret[1]:\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.exec_run(cmd, stderr=True, stdout=True, stream=False)\n print(line[1])\n\n def return_logs(self):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n for line in c.logs(stderr=True, stdout=True, timestamps\n =False, stream=True):\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.logs(stderr=True, stdout=True, timestamps=False,\n stream=False)\n print(line)\n\n def _get_info(self, containers_all=None):\n info = {}\n if containers_all is None:\n containers_all = self.docker_client.list(all=True)\n try:\n container = [c for c in containers_all if c.name in self.cfg[\n 'service_name']][0]\n api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')\n api = api.connect()\n infos = api.inspect_container(container.id)\n info['Container'] = container\n info['Id'] = container.id\n info['Ip'] = infos['NetworkSettings']['IPAddress']\n info['State'] = container.status\n except IndexError:\n info = {}\n return info\n",
"step-4": "from shaddock.drivers.docker.api import DockerApi\nfrom docker import errors as docker_errors\nimport sys\n\n\nclass Container(object):\n \"\"\"Instance a defined container\n\n This class instance a Docker container depending on its\n name and model definition.\n The basics Docker methods are implemented as well as a\n Shaddock's specific one that return the information of\n the concerned container.\n\n Shaddock keep no tracks of any Container ID and rely on no\n databases. THe containers are retrieve from their names.\n \"\"\"\n\n def __init__(self, svc_cfg, containers_all=None):\n self.cfg = svc_cfg\n self.env = dict(self.cfg)\n args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',\n 'cluster', 'images_dir', 'path', 'service_name', 'host']\n for arg in args_to_delete:\n try:\n del self.env[arg]\n except KeyError:\n pass\n self.env['detach'] = self.cfg.get('detach', True)\n self.docker_client = None\n if containers_all is None:\n docker_api = DockerApi(self.cfg['api_cfg'])\n self.docker_api = docker_api.connect()\n self.docker_client = self.docker_api.containers\n self.info = self._get_info(containers_all)\n\n def gather_api_methods(self, func):\n return func.__code__.co_varnames\n\n def create(self):\n \"\"\"Returns a Container object\"\"\"\n print('Creating container: {}'.format(self.cfg['name']))\n create = self.docker_client.create(**self.env)\n return create['id']\n\n def start(self):\n \"\"\"Returns a Container object\"\"\"\n try:\n print('Starting container: {}'.format(self.cfg['name']))\n start = self.docker_client.run(**self.env)\n except docker_errors.APIError as error:\n print(error)\n print('Container {} is already running'.format(self.cfg['name']))\n return self.cfg['name']\n return start\n\n def stop(self):\n c = self.info.get('Container')\n if c is not None:\n print('Stopping container: {}'.format(self.cfg['name']))\n return c.stop()\n\n def remove(self):\n self.stop()\n c = self.info.get('Container')\n if c is not None:\n print('Removing container: {}'.format(self.cfg['name']))\n try:\n c.remove()\n except docker_errors.NotFound:\n print('Container {} does not exist'.format(self.cfg['name']))\n return True\n\n def restart(self):\n self.docker_client.restart(self.info['Id'])\n\n def return_shell(self, cmd):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n ret = c.exec_run(cmd, stderr=True, stdout=True, stream=True\n )\n for line in ret[1]:\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.exec_run(cmd, stderr=True, stdout=True, stream=False)\n print(line[1])\n\n def return_logs(self):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n for line in c.logs(stderr=True, stdout=True, timestamps\n =False, stream=True):\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.logs(stderr=True, stdout=True, timestamps=False,\n stream=False)\n print(line)\n\n def _get_info(self, containers_all=None):\n info = {}\n if containers_all is None:\n containers_all = self.docker_client.list(all=True)\n try:\n container = [c for c in containers_all if c.name in self.cfg[\n 'service_name']][0]\n api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')\n api = api.connect()\n infos = api.inspect_container(container.id)\n info['Container'] = container\n info['Id'] = container.id\n info['Ip'] = infos['NetworkSettings']['IPAddress']\n info['State'] = container.status\n except IndexError:\n info = {}\n return info\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (C) 2014 Thibaut Lapierre <[email protected]>. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom shaddock.drivers.docker.api import DockerApi\nfrom docker import errors as docker_errors\nimport sys\n\n\nclass Container(object):\n \"\"\"Instance a defined container\n\n This class instance a Docker container depending on its\n name and model definition.\n The basics Docker methods are implemented as well as a\n Shaddock's specific one that return the information of\n the concerned container.\n\n Shaddock keep no tracks of any Container ID and rely on no\n databases. THe containers are retrieve from their names.\n \"\"\"\n\n def __init__(self, svc_cfg, containers_all=None):\n self.cfg = svc_cfg\n self.env = dict(self.cfg)\n # we may want to use func.__code__.co_varnames here to gather all\n # possible arguments of the docker api and compare them with cfg\n # and delete the crapy hack of the next 8 lines.\n args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',\n 'cluster', 'images_dir', 'path', 'service_name',\n 'host']\n for arg in args_to_delete:\n try:\n del self.env[arg]\n except KeyError:\n pass\n self.env['detach'] = self.cfg.get('detach', True)\n self.docker_client = None\n if containers_all is None:\n docker_api = DockerApi(self.cfg['api_cfg'])\n self.docker_api = docker_api.connect()\n self.docker_client = self.docker_api.containers\n self.info = self._get_info(containers_all)\n\n def gather_api_methods(self, func):\n return func.__code__.co_varnames\n\n def create(self):\n \"\"\"Returns a Container object\"\"\"\n print('Creating container: {}'.format(self.cfg['name']))\n create = self.docker_client.create(**self.env)\n return create['id']\n\n def start(self):\n \"\"\"Returns a Container object\"\"\"\n try:\n print('Starting container: {}'.format(self.cfg['name']))\n start = self.docker_client.run(**self.env)\n except docker_errors.APIError as error:\n print(error)\n print('Container {} is already running'.format(self.cfg['name']))\n return self.cfg['name']\n\n return start\n\n def stop(self):\n c = self.info.get('Container')\n if c is not None:\n print('Stopping container: {}'.format(self.cfg['name']))\n return c.stop()\n\n def remove(self):\n self.stop()\n c = self.info.get('Container')\n if c is not None:\n print('Removing container: {}'.format(self.cfg['name']))\n try:\n c.remove()\n except docker_errors.NotFound:\n print('Container {} does not exist'.format(self.cfg['name']))\n return True\n\n def restart(self):\n self.docker_client.restart(self.info['Id'])\n\n def return_shell(self, cmd):\n if self.cfg['image'] is not None:\n # \"Fix\" in order to not use the stream generator in Python2\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n ret = c.exec_run(cmd,\n stderr=True,\n stdout=True,\n stream=True,\n )\n for line in ret[1]:\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.exec_run(cmd,\n stderr=True,\n stdout=True,\n stream=False)\n print(line[1])\n\n def return_logs(self):\n if self.cfg['image'] is not None:\n # \"Fix\" in order to not use the stream generator in Python2\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n for line in c.logs(stderr=True,\n stdout=True,\n timestamps=False,\n stream=True,\n ):\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.logs(stderr=True,\n stdout=True,\n timestamps=False,\n stream=False)\n print(line)\n\n def _get_info(self, containers_all=None):\n info = {}\n if containers_all is None:\n containers_all = self.docker_client.list(all=True)\n try:\n container = [c for c in containers_all\n if (c.name in self.cfg['service_name'])][0]\n\n api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')\n api = api.connect()\n infos = api.inspect_container(container.id)\n\n info['Container'] = container\n info['Id'] = container.id\n info['Ip'] = infos['NetworkSettings']['IPAddress']\n info['State'] = container.status\n\n except IndexError:\n # Container is not running\n info = {}\n return info\n",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
from .chair_model import run_chair_simulation, init_omega_t, \
JumpingModel, H_to_L
from .utils import load_hcp_peaks, Condition, average_peak_counts
|
normal
|
{
"blob_id": "9087a7bf42070fdb8639c616fdf7f09ad3903656",
"index": 6755,
"step-1": "<mask token>\n",
"step-2": "from .chair_model import run_chair_simulation, init_omega_t, JumpingModel, H_to_L\nfrom .utils import load_hcp_peaks, Condition, average_peak_counts\n",
"step-3": "from .chair_model import run_chair_simulation, init_omega_t, \\\n JumpingModel, H_to_L\nfrom .utils import load_hcp_peaks, Condition, average_peak_counts\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
#
#
#
basedir = '/n/regal/pfister_lab/haehn/CREMITEST/'
testA = basedir + 'testA.npz.npy'
testA_targets = basedir + 'testA_targets.npz.npy'
testB = basedir + 'testB.npz.npy'
testB_targets = basedir + 'testB_targets.npz.npy'
testC = basedir + 'testC.npz.npy'
testC_targets = basedir + 'testC_targets.npz.npy'
counter = 0
# testA = np.load(testA, mmap_mode='r')
# testA_count = testA.shape[0]
# testB = np.load(testB, mmap_mode='r')
# testB_count = testB.shape[0]
# testC = np.load(testC, mmap_mode='r')
# testC_count = testC.shape[0]
# all_count = testA_count + testB_count + testC_count
# #
# # allocate large array
# #
# PATCH_BYTES = 75*75
# NO_PATCHES = all_count
# P_SIZE = (NO_PATCHES, 4, 75,75) # rather than raveled right now
# p_rgba = np.zeros(P_SIZE, dtype=np.float32)
# p_rgba[0:testA_count] = testA
# p_rgba[testA_count:testA_count+testB_count] = testB
# p_rgba[testB_count:testB_count+testC_count] = testC
# # now store this bad boy
# np.save(basedir+'test.npy', p_rgba)
# print 'STORED BIG BOY!'
p_rgba = None # free them all
#
# same for targets
#
testA_targets = np.load(testA_targets)
testA_count = testA_targets.shape[0]
testB_targets = np.load(testB_targets)
testB_count = testB_targets.shape[0]
testC_targets = np.load(testC_targets)
testC_count = testC_targets.shape[0]
all_count = testA_count + testB_count + testC_count
NO_PATCHES = all_count
p_target = np.zeros(NO_PATCHES)
p_target[0:testA_count] = testA_targets
p_target[testA_count:testA_count+testB_count] = testB_targets
p_target[testB_count:testB_count+testC_count] = testC_targets
# now store this lady boy
np.save(basedir+'test_targets.npy', p_target)
print 'ALL DONE!'
# import numpy as np
# #
# #
# #
# basedir = '/n/regal/pfister_lab/haehn/CREMITEST/'
# testA = basedir + 'testA.npz.npy'
# testA_targets = basedir + 'testA_targets.npz.npy'
# testB = basedir + 'testB.npz.npy'
# testB_targets = basedir + 'testB_targets.npz.npy'
# testC = basedir + 'testC.npz.npy'
# testC_targets = basedir + 'testC_targets.npz.npy'
# counter = 0
# testA = np.load(testA, mmap_mode='r')
# testA_count = testA.shape[0]
# testB = np.load(testB, mmap_mode='r')
# testB_count = testB.shape[0]
# testC = np.load(testC, mmap_mode='r')
# testC_count = testC.shape[0]
# all_count = testA_count + testB_count + testC_count
# #
# # allocate large array
# #
# PATCH_BYTES = 75*75
# NO_PATCHES = all_count
# P_SIZE = (NO_PATCHES, 4, 75,75) # rather than raveled right now
# p_rgba = np.zeros(P_SIZE, dtype=np.float32)
# p_rgba[0:testA_count] = testA
# p_rgba[testA_count:testA_count+testB_count] = testB
# p_rgba[testB_count:testB_count+testC_count] = testC
# # now store this bad boy
# np.save(basedir+'test.npy', p_rgba)
# print 'STORED BIG BOY!'
# p_rgba = None # free them all
# #
# # same for targets
# #
# testA_targets = np.load(testA_targets)
# testB_targets = np.load(testB_targets)
# testC_targets = np.load(testC_targets)
# p_target = np.zeros(NO_PATCHES)
# p_target[0:testA_count] = testA_targets
# p_target[testA_count:testA_count+testB_count] = testB_targets
# p_target[testB_count:testB_count+testC_count] = testC_targets
# # now store this lady boy
# np.save(basedir+'test_targets.npy', p_target)
# print 'ALL DONE!'
|
normal
|
{
"blob_id": "5cb7af5ded532058db7f5520d48ff418ba856f04",
"index": 6150,
"step-1": "import numpy as np\n\n#\n#\n#\n\nbasedir = '/n/regal/pfister_lab/haehn/CREMITEST/'\n\ntestA = basedir + 'testA.npz.npy'\ntestA_targets = basedir + 'testA_targets.npz.npy'\ntestB = basedir + 'testB.npz.npy'\ntestB_targets = basedir + 'testB_targets.npz.npy'\ntestC = basedir + 'testC.npz.npy'\ntestC_targets = basedir + 'testC_targets.npz.npy'\n\ncounter = 0\n\n# testA = np.load(testA, mmap_mode='r')\n# testA_count = testA.shape[0]\n\n# testB = np.load(testB, mmap_mode='r')\n# testB_count = testB.shape[0]\n\n# testC = np.load(testC, mmap_mode='r')\n# testC_count = testC.shape[0]\n\n# all_count = testA_count + testB_count + testC_count\n\n# #\n# # allocate large array\n# # \n# PATCH_BYTES = 75*75\n# NO_PATCHES = all_count\n# P_SIZE = (NO_PATCHES, 4, 75,75) # rather than raveled right now\n\n# p_rgba = np.zeros(P_SIZE, dtype=np.float32)\n\n# p_rgba[0:testA_count] = testA\n# p_rgba[testA_count:testA_count+testB_count] = testB\n# p_rgba[testB_count:testB_count+testC_count] = testC\n\n# # now store this bad boy\n# np.save(basedir+'test.npy', p_rgba)\n\n# print 'STORED BIG BOY!'\np_rgba = None # free them all\n\n#\n# same for targets\n#\ntestA_targets = np.load(testA_targets)\ntestA_count = testA_targets.shape[0]\ntestB_targets = np.load(testB_targets)\ntestB_count = testB_targets.shape[0]\ntestC_targets = np.load(testC_targets)\ntestC_count = testC_targets.shape[0]\n\nall_count = testA_count + testB_count + testC_count\nNO_PATCHES = all_count\n\np_target = np.zeros(NO_PATCHES)\n\np_target[0:testA_count] = testA_targets\np_target[testA_count:testA_count+testB_count] = testB_targets\np_target[testB_count:testB_count+testC_count] = testC_targets\n\n# now store this lady boy\nnp.save(basedir+'test_targets.npy', p_target)\n\nprint 'ALL DONE!'\n\n\n\n# import numpy as np\n\n# #\n# #\n# #\n\n# basedir = '/n/regal/pfister_lab/haehn/CREMITEST/'\n\n# testA = basedir + 'testA.npz.npy'\n# testA_targets = basedir + 'testA_targets.npz.npy'\n# testB = basedir + 'testB.npz.npy'\n# testB_targets = basedir + 'testB_targets.npz.npy'\n# testC = basedir + 'testC.npz.npy'\n# testC_targets = basedir + 'testC_targets.npz.npy'\n\n# counter = 0\n\n# testA = np.load(testA, mmap_mode='r')\n# testA_count = testA.shape[0]\n\n# testB = np.load(testB, mmap_mode='r')\n# testB_count = testB.shape[0]\n\n# testC = np.load(testC, mmap_mode='r')\n# testC_count = testC.shape[0]\n\n# all_count = testA_count + testB_count + testC_count\n\n# #\n# # allocate large array\n# # \n# PATCH_BYTES = 75*75\n# NO_PATCHES = all_count\n# P_SIZE = (NO_PATCHES, 4, 75,75) # rather than raveled right now\n\n# p_rgba = np.zeros(P_SIZE, dtype=np.float32)\n\n# p_rgba[0:testA_count] = testA\n# p_rgba[testA_count:testA_count+testB_count] = testB\n# p_rgba[testB_count:testB_count+testC_count] = testC\n\n# # now store this bad boy\n# np.save(basedir+'test.npy', p_rgba)\n\n# print 'STORED BIG BOY!'\n# p_rgba = None # free them all\n\n# #\n# # same for targets\n# #\n# testA_targets = np.load(testA_targets)\n# testB_targets = np.load(testB_targets)\n# testC_targets = np.load(testC_targets)\n\n# p_target = np.zeros(NO_PATCHES)\n\n# p_target[0:testA_count] = testA_targets\n# p_target[testA_count:testA_count+testB_count] = testB_targets\n# p_target[testB_count:testB_count+testC_count] = testC_targets\n\n# # now store this lady boy\n# np.save(basedir+'test_targets.npy', p_target)\n\n# print 'ALL DONE!'\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('-' * 10)
print('NY State has:', cities['NY'])
print('OR State has : ', cities['OR'])
print('-' * 10)
print("Michigan's abbreviation is: ", states['Michigan'])
print("Flordia's abreviation is :", states['Flordia'])
print('-' * 10)
print('Michigan has : ', cities[states['Michigan']])
print('Flordia has: ', cities[states['Flordia']])
print('-' * 10)
for state, abbrev in list(states.items()):
print(f'{state} is abbreviated {abbrev}')
print('-' * 10)
for abbrev, city in list(cities.items()):
print(f'{abbrev} has the city {city} ')
print('-' * 10)
for state, abbrev in list(states.items()):
print(f'{state}state is abbreviated {abbrev}')
print(f'and has city {cities[abbrev]}')
print('-' * 10)
<|reserved_special_token_1|>
states = {'Oregon': 'OR', 'Flordia': 'FL', 'California': 'CA', 'New York':
'NY', 'Michigan': 'MI'}
cities = {'CA': 'San Fransisco', 'MI': 'Detroit', 'FL': 'Jacksonville'}
cities['NY'] = 'New York'
cities['OR'] = 'PortLand'
print('-' * 10)
print('NY State has:', cities['NY'])
print('OR State has : ', cities['OR'])
print('-' * 10)
print("Michigan's abbreviation is: ", states['Michigan'])
print("Flordia's abreviation is :", states['Flordia'])
print('-' * 10)
print('Michigan has : ', cities[states['Michigan']])
print('Flordia has: ', cities[states['Flordia']])
print('-' * 10)
for state, abbrev in list(states.items()):
print(f'{state} is abbreviated {abbrev}')
print('-' * 10)
for abbrev, city in list(cities.items()):
print(f'{abbrev} has the city {city} ')
print('-' * 10)
for state, abbrev in list(states.items()):
print(f'{state}state is abbreviated {abbrev}')
print(f'and has city {cities[abbrev]}')
print('-' * 10)
<|reserved_special_token_1|>
#Adds states to the list
states = {
'Oregon' : 'OR' ,
'Flordia': 'FL' ,
'California':'CA',
'New York':'NY',
'Michigan': 'MI',
}
#Adds cities to the list
cities = {
'CA':'San Fransisco',
'MI': 'Detroit',
'FL': 'Jacksonville'
}
cities['NY'] = 'New York'
cities['OR'] = 'PortLand'
#Prints cities
print('-' * 10)
print("NY State has:", cities['NY'])
print("OR State has : ",cities['OR'])
#prints states
print('-' * 10)
print("Michigan's abbreviation is: " , states['Michigan'])
print("Flordia's abreviation is :" , states['Flordia'])
print('-' * 10)
print("Michigan has : ", cities[states['Michigan']])
print("Flordia has: " , cities[states['Flordia']])
print('-' * 10)
for state , abbrev in list(states.items()):
print(f"{state} is abbreviated {abbrev}")
print('-'* 10)
for abbrev, city in list(cities.items()):
print(f"{abbrev} has the city {city} ")
print('-' * 10)
for state, abbrev in list(states.items()):
print(f"{state}state is abbreviated {abbrev}")
print(f"and has city {cities[abbrev]}")
#carefullly aquires state that may not be there
print('-' * 10)
|
flexible
|
{
"blob_id": "1bdc1274cceba994524442c7a0065498a9c1d7bc",
"index": 8919,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('-' * 10)\nprint('NY State has:', cities['NY'])\nprint('OR State has : ', cities['OR'])\nprint('-' * 10)\nprint(\"Michigan's abbreviation is: \", states['Michigan'])\nprint(\"Flordia's abreviation is :\", states['Flordia'])\nprint('-' * 10)\nprint('Michigan has : ', cities[states['Michigan']])\nprint('Flordia has: ', cities[states['Flordia']])\nprint('-' * 10)\nfor state, abbrev in list(states.items()):\n print(f'{state} is abbreviated {abbrev}')\nprint('-' * 10)\nfor abbrev, city in list(cities.items()):\n print(f'{abbrev} has the city {city} ')\nprint('-' * 10)\nfor state, abbrev in list(states.items()):\n print(f'{state}state is abbreviated {abbrev}')\n print(f'and has city {cities[abbrev]}')\nprint('-' * 10)\n",
"step-3": "states = {'Oregon': 'OR', 'Flordia': 'FL', 'California': 'CA', 'New York':\n 'NY', 'Michigan': 'MI'}\ncities = {'CA': 'San Fransisco', 'MI': 'Detroit', 'FL': 'Jacksonville'}\ncities['NY'] = 'New York'\ncities['OR'] = 'PortLand'\nprint('-' * 10)\nprint('NY State has:', cities['NY'])\nprint('OR State has : ', cities['OR'])\nprint('-' * 10)\nprint(\"Michigan's abbreviation is: \", states['Michigan'])\nprint(\"Flordia's abreviation is :\", states['Flordia'])\nprint('-' * 10)\nprint('Michigan has : ', cities[states['Michigan']])\nprint('Flordia has: ', cities[states['Flordia']])\nprint('-' * 10)\nfor state, abbrev in list(states.items()):\n print(f'{state} is abbreviated {abbrev}')\nprint('-' * 10)\nfor abbrev, city in list(cities.items()):\n print(f'{abbrev} has the city {city} ')\nprint('-' * 10)\nfor state, abbrev in list(states.items()):\n print(f'{state}state is abbreviated {abbrev}')\n print(f'and has city {cities[abbrev]}')\nprint('-' * 10)\n",
"step-4": "#Adds states to the list\nstates = {\n 'Oregon' : 'OR' ,\n 'Flordia': 'FL' ,\n 'California':'CA',\n 'New York':'NY',\n 'Michigan': 'MI',\n }\n \n#Adds cities to the list \ncities = {\n 'CA':'San Fransisco',\n 'MI': 'Detroit',\n 'FL': 'Jacksonville'\n}\n\ncities['NY'] = 'New York'\ncities['OR'] = 'PortLand'\n\n#Prints cities\nprint('-' * 10)\nprint(\"NY State has:\", cities['NY'])\nprint(\"OR State has : \",cities['OR'])\n#prints states\nprint('-' * 10)\nprint(\"Michigan's abbreviation is: \" , states['Michigan'])\nprint(\"Flordia's abreviation is :\" , states['Flordia'])\n\n\nprint('-' * 10)\nprint(\"Michigan has : \", cities[states['Michigan']])\nprint(\"Flordia has: \" , cities[states['Flordia']])\n\nprint('-' * 10)\nfor state , abbrev in list(states.items()):\n print(f\"{state} is abbreviated {abbrev}\")\n\nprint('-'* 10)\nfor abbrev, city in list(cities.items()):\n print(f\"{abbrev} has the city {city} \")\n\nprint('-' * 10)\nfor state, abbrev in list(states.items()):\n print(f\"{state}state is abbreviated {abbrev}\")\n print(f\"and has city {cities[abbrev]}\")\n#carefullly aquires state that may not be there \nprint('-' * 10)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while n > 0:
arr.append(n)
n -= 1
while len(arr) + len(sub) > 1:
while len(arr) > 1:
arr.pop()
sub.append(arr.pop())
arr = sub[::-1] + arr
sub = []
print(arr[0])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
arr = []
sub = []
n = int(input())
while n > 0:
arr.append(n)
n -= 1
while len(arr) + len(sub) > 1:
while len(arr) > 1:
arr.pop()
sub.append(arr.pop())
arr = sub[::-1] + arr
sub = []
print(arr[0])
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 17 22:28:30 2019
@author: donsdev
"""
arr = []
sub = []
n = int(input())
while n > 0:
arr.append(n)
n-=1
while len(arr) + len(sub) > 1:
while len(arr) > 1:
arr.pop()
sub.append(arr.pop())
arr = sub[::-1] + arr
sub = []
print(arr[0])
|
flexible
|
{
"blob_id": "d5d31920f7fd4ed2913c5880dba61c2015181be9",
"index": 5760,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile n > 0:\n arr.append(n)\n n -= 1\nwhile len(arr) + len(sub) > 1:\n while len(arr) > 1:\n arr.pop()\n sub.append(arr.pop())\n arr = sub[::-1] + arr\n sub = []\nprint(arr[0])\n",
"step-3": "<mask token>\narr = []\nsub = []\nn = int(input())\nwhile n > 0:\n arr.append(n)\n n -= 1\nwhile len(arr) + len(sub) > 1:\n while len(arr) > 1:\n arr.pop()\n sub.append(arr.pop())\n arr = sub[::-1] + arr\n sub = []\nprint(arr[0])\n",
"step-4": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 17 22:28:30 2019\n\n@author: donsdev\n\"\"\"\n\narr = []\nsub = []\nn = int(input())\nwhile n > 0:\n arr.append(n)\n n-=1\nwhile len(arr) + len(sub) > 1:\n while len(arr) > 1:\n arr.pop()\n sub.append(arr.pop())\n arr = sub[::-1] + arr\n sub = []\nprint(arr[0])",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
onfiguration name="test3" type="PythonConfigurationType" factoryName="Python" temporary="true">
<module name="hori_check" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test3.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="test4" type="PythonConfigurationType" factoryName="Python" temporary="true">
<module name="hori_check" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test4.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<list>
<item itemvalue="Python.test1" />
<item itemvalue="Python.test2" />
<item itemvalue="Python.test3" />
<item itemvalue="Python.dir_cut" />
<item itemvalue="Python.test4" />
</list>
<recent_temporary>
<list>
<item itemvalue="Python.test4" />
<item itemvalue="Python.dir_cut" />
<item itemvalue="Python.test1" />
<item itemvalue="Python.test2" />
<item itemvalue="Python.test3" />
</list>
</recent_temporary>
</component>
<component name="SvnConfiguration">
<configuration />
</component>
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="b9acfeb2-5104-4c03-bdda-fe9dd331ff17" name="Default Changelist" comment="" />
<created>1539654879943</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1539654879943</updated>
</task>
<servers />
</component>
<component name="ToolWindowManager">
<frame x="-8" y="-8" width="1382" height="744" extended-state="6" />
<editor active="true" />
<layout>
<window_info content_ui="combo" id="Project" order="0" visible="true" weight
|
normal
|
{
"blob_id": "48affa1b823a2543b6bbda615247324f5c249a69",
"index": 5831,
"step-1": "onfiguration name=\"test3\" type=\"PythonConfigurationType\" factoryName=\"Python\" temporary=\"true\">\n <module name=\"hori_check\" />\n <option name=\"INTERPRETER_OPTIONS\" value=\"\" />\n <option name=\"PARENT_ENVS\" value=\"true\" />\n <envs>\n <env name=\"PYTHONUNBUFFERED\" value=\"1\" />\n </envs>\n <option name=\"SDK_HOME\" value=\"\" />\n <option name=\"WORKING_DIRECTORY\" value=\"$PROJECT_DIR$\" />\n <option name=\"IS_MODULE_SDK\" value=\"true\" />\n <option name=\"ADD_CONTENT_ROOTS\" value=\"true\" />\n <option name=\"ADD_SOURCE_ROOTS\" value=\"true\" />\n <option name=\"SCRIPT_NAME\" value=\"$PROJECT_DIR$/test3.py\" />\n <option name=\"PARAMETERS\" value=\"\" />\n <option name=\"SHOW_COMMAND_LINE\" value=\"false\" />\n <option name=\"EMULATE_TERMINAL\" value=\"false\" />\n <option name=\"MODULE_MODE\" value=\"false\" />\n <option name=\"REDIRECT_INPUT\" value=\"false\" />\n <option name=\"INPUT_FILE\" value=\"\" />\n <method v=\"2\" />\n </configuration>\n <configuration name=\"test4\" type=\"PythonConfigurationType\" factoryName=\"Python\" temporary=\"true\">\n <module name=\"hori_check\" />\n <option name=\"INTERPRETER_OPTIONS\" value=\"\" />\n <option name=\"PARENT_ENVS\" value=\"true\" />\n <envs>\n <env name=\"PYTHONUNBUFFERED\" value=\"1\" />\n </envs>\n <option name=\"SDK_HOME\" value=\"\" />\n <option name=\"WORKING_DIRECTORY\" value=\"$PROJECT_DIR$\" />\n <option name=\"IS_MODULE_SDK\" value=\"true\" />\n <option name=\"ADD_CONTENT_ROOTS\" value=\"true\" />\n <option name=\"ADD_SOURCE_ROOTS\" value=\"true\" />\n <option name=\"SCRIPT_NAME\" value=\"$PROJECT_DIR$/test4.py\" />\n <option name=\"PARAMETERS\" value=\"\" />\n <option name=\"SHOW_COMMAND_LINE\" value=\"false\" />\n <option name=\"EMULATE_TERMINAL\" value=\"false\" />\n <option name=\"MODULE_MODE\" value=\"false\" />\n <option name=\"REDIRECT_INPUT\" value=\"false\" />\n <option name=\"INPUT_FILE\" value=\"\" />\n <method v=\"2\" />\n </configuration>\n <list>\n <item itemvalue=\"Python.test1\" />\n <item itemvalue=\"Python.test2\" />\n <item itemvalue=\"Python.test3\" />\n <item itemvalue=\"Python.dir_cut\" />\n <item itemvalue=\"Python.test4\" />\n </list>\n <recent_temporary>\n <list>\n <item itemvalue=\"Python.test4\" />\n <item itemvalue=\"Python.dir_cut\" />\n <item itemvalue=\"Python.test1\" />\n <item itemvalue=\"Python.test2\" />\n <item itemvalue=\"Python.test3\" />\n </list>\n </recent_temporary>\n </component>\n <component name=\"SvnConfiguration\">\n <configuration />\n </component>\n <component name=\"TaskManager\">\n <task active=\"true\" id=\"Default\" summary=\"Default task\">\n <changelist id=\"b9acfeb2-5104-4c03-bdda-fe9dd331ff17\" name=\"Default Changelist\" comment=\"\" />\n <created>1539654879943</created>\n <option name=\"number\" value=\"Default\" />\n <option name=\"presentableId\" value=\"Default\" />\n <updated>1539654879943</updated>\n </task>\n <servers />\n </component>\n <component name=\"ToolWindowManager\">\n <frame x=\"-8\" y=\"-8\" width=\"1382\" height=\"744\" extended-state=\"6\" />\n <editor active=\"true\" />\n <layout>\n <window_info content_ui=\"combo\" id=\"Project\" order=\"0\" visible=\"true\" weight",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import asyncio
import logging
import os.path
from serial_asyncio import open_serial_connection
from typing import NewType, cast
# Type annotations and converters
AsciiBytes = NewType('AsciiBytes', bytes)
def to_ascii(s: str) -> AsciiBytes:
if s[-1] != '\n':
s += '\n'
return cast(AsciiBytes, s.encode(encoding='ascii'))
class USBHandler:
"""Reads from and writes to the underlying MDB USB board.
Users can either obtain an asyncio.Queue that the handler will push
messages to using listen(), or it can ask for a one-time read using read().
For sending messages, if no reply is expected or there is a poller waiting
for any response, send() can be used, otherwise sendread() will send the
message and wait for a one-time reply. Having a listener and waiting for a
single message at the same time is an error. See the Sniffer class for an
example of both usages."""
def __init__(self):
self.initialized = False
self.run_task = None
self.waiters = {}
self.queues = {}
self.logger = logging.getLogger('.'.join((__name__,
self.__class__.__name__)))
async def initialize(self, device_path: str) -> None:
assert os.path.exists(device_path)
self.logger.info("Initializing USBReader.")
self.logger.debug("Opening serial connection to device at %s",
device_path)
self.serial_reader, self.serial_writer = \
await open_serial_connection(url=device_path, baudrate=115200)
self.initialized = True
self.logger.debug("Connected to serial device at %s.", device_path)
async def _run(self) -> None:
while True:
message = await self.serial_reader.readuntil(separator=b'\r\n')
stripped_message = message.decode(encoding='ascii').rstrip('\n\r')
self.logger.debug("Read '%s' from MDB board.", stripped_message)
message_type = stripped_message[0]
if message_type in self.waiters:
self.waiters[message_type].set_result(stripped_message)
del self.waiters[message_type]
# Lets the waiter run.
await asyncio.sleep(0)
elif message_type in self.queues:
try:
self.queues[message_type].put_nowait(stripped_message)
except asyncio.QueueFull:
self.logger.warning('Queue for message type %s is full. '
'Scheduling the put in another task.',
message_type)
asyncio.create_task(
self.queues[message_type].put(stripped_message))
else:
self.logger.error("Unhandled message: %s", stripped_message)
async def run(self) -> None:
assert self.initialized
self.logger.info('Starting runner.')
self.run_task = asyncio.create_task(self._run())
try:
await self.run_task
except asyncio.CancelledError:
self.logger.info('Runner cancelled.')
async def send(self, message: AsciiBytes, _drain=True) -> None:
assert self.initialized
self.logger.info("Sending message to MDB board: %s", message)
self.serial_writer.write(message)
if _drain:
await self.serial_writer.drain()
self.logger.info("Sent message to MDB board: %s", message)
def _read_internal(self, prefix: str) -> asyncio.Future:
assert len(prefix) == 1
if prefix in self.queues or prefix in self.waiters:
raise RuntimeError(f"Tried to wait for message type {prefix}"
" when there was already a queue listening to "
"all messages")
fut = asyncio.get_running_loop().create_future()
self.waiters[prefix] = fut
return fut
async def sendread(self, message: AsciiBytes, prefix: str) -> str:
await self.send(message, _drain=False)
fut = self._read_internal(prefix)
self.logger.info("Waiting for a single message of type: %s", prefix)
try:
await self.serial_writer.drain()
self.logger.info("Sent message to MDB board: %s", message)
await fut
except asyncio.CancelledError as e:
self.logger.warning("Got cancelled while sending message %r or "
"waiting on prefix %s", message, prefix,
exc_info=e)
del self.waiters[prefix]
raise
self.logger.info("Got message: %s", fut.result())
return fut.result()
async def read(self, prefix: str) -> str:
fut = self._read_internal(prefix)
self.logger.info("Waiting for a single message of type: %s", prefix)
try:
await fut
except asyncio.CancelledError as e:
self.logger.warning("Got cancelled while waiting for message on "
"%s", prefix, exc_info=e)
del self.waiters[prefix]
raise
self.logger.info("Got message: %s", fut.result())
return fut.result()
def listen(self, prefix: str) -> asyncio.Queue:
assert len(prefix) == 1
if prefix in self.waiters or prefix in self.queues:
raise RuntimeError("Tried to get a queue for message type "
f"{prefix} when there was already someone"
"waiting on it.")
self.queues[prefix] = asyncio.Queue()
self.logger.info("Polling for messages of type: %s", prefix)
return self.queues[prefix]
def unlisten(self, prefix: str) -> None:
"""Stops pushing messages with this prefix character to a Queue."""
assert len(prefix) == 1
del self.queues[prefix]
self.logger.info("No longer polling for message type: %s", prefix)
async def shutdown(self):
if not self.initialized:
return
self.logger.info("Shutting down.")
if self.run_task:
self.run_task.cancel()
self.run_task = None
for fut in self.waiters.values():
fut.cancel()
self.serial_writer.close()
await self.serial_writer.wait_closed()
self.logger.info("Shutdown complete.")
self.initialized = False
__all__ = (USBHandler, to_ascii)
|
normal
|
{
"blob_id": "50b630b762251f8646044b234ac4b82b8e4b645b",
"index": 8460,
"step-1": "<mask token>\n\n\nclass USBHandler:\n <mask token>\n\n def __init__(self):\n self.initialized = False\n self.run_task = None\n self.waiters = {}\n self.queues = {}\n self.logger = logging.getLogger('.'.join((__name__, self.__class__.\n __name__)))\n\n async def initialize(self, device_path: str) ->None:\n assert os.path.exists(device_path)\n self.logger.info('Initializing USBReader.')\n self.logger.debug('Opening serial connection to device at %s',\n device_path)\n self.serial_reader, self.serial_writer = await open_serial_connection(\n url=device_path, baudrate=115200)\n self.initialized = True\n self.logger.debug('Connected to serial device at %s.', device_path)\n\n async def _run(self) ->None:\n while True:\n message = await self.serial_reader.readuntil(separator=b'\\r\\n')\n stripped_message = message.decode(encoding='ascii').rstrip('\\n\\r')\n self.logger.debug(\"Read '%s' from MDB board.\", stripped_message)\n message_type = stripped_message[0]\n if message_type in self.waiters:\n self.waiters[message_type].set_result(stripped_message)\n del self.waiters[message_type]\n await asyncio.sleep(0)\n elif message_type in self.queues:\n try:\n self.queues[message_type].put_nowait(stripped_message)\n except asyncio.QueueFull:\n self.logger.warning(\n 'Queue for message type %s is full. Scheduling the put in another task.'\n , message_type)\n asyncio.create_task(self.queues[message_type].put(\n stripped_message))\n else:\n self.logger.error('Unhandled message: %s', stripped_message)\n\n async def run(self) ->None:\n assert self.initialized\n self.logger.info('Starting runner.')\n self.run_task = asyncio.create_task(self._run())\n try:\n await self.run_task\n except asyncio.CancelledError:\n self.logger.info('Runner cancelled.')\n\n async def send(self, message: AsciiBytes, _drain=True) ->None:\n assert self.initialized\n self.logger.info('Sending message to MDB board: %s', message)\n self.serial_writer.write(message)\n if _drain:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n <mask token>\n\n async def sendread(self, message: AsciiBytes, prefix: str) ->str:\n await self.send(message, _drain=False)\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning(\n 'Got cancelled while sending message %r or waiting on prefix %s'\n , message, prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n async def read(self, prefix: str) ->str:\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning('Got cancelled while waiting for message on %s'\n , prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n def listen(self, prefix: str) ->asyncio.Queue:\n assert len(prefix) == 1\n if prefix in self.waiters or prefix in self.queues:\n raise RuntimeError(\n f'Tried to get a queue for message type {prefix} when there was already someonewaiting on it.'\n )\n self.queues[prefix] = asyncio.Queue()\n self.logger.info('Polling for messages of type: %s', prefix)\n return self.queues[prefix]\n <mask token>\n\n async def shutdown(self):\n if not self.initialized:\n return\n self.logger.info('Shutting down.')\n if self.run_task:\n self.run_task.cancel()\n self.run_task = None\n for fut in self.waiters.values():\n fut.cancel()\n self.serial_writer.close()\n await self.serial_writer.wait_closed()\n self.logger.info('Shutdown complete.')\n self.initialized = False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef to_ascii(s: str) ->AsciiBytes:\n if s[-1] != '\\n':\n s += '\\n'\n return cast(AsciiBytes, s.encode(encoding='ascii'))\n\n\nclass USBHandler:\n \"\"\"Reads from and writes to the underlying MDB USB board.\n\n Users can either obtain an asyncio.Queue that the handler will push\n messages to using listen(), or it can ask for a one-time read using read().\n For sending messages, if no reply is expected or there is a poller waiting\n for any response, send() can be used, otherwise sendread() will send the\n message and wait for a one-time reply. Having a listener and waiting for a\n single message at the same time is an error. See the Sniffer class for an\n example of both usages.\"\"\"\n\n def __init__(self):\n self.initialized = False\n self.run_task = None\n self.waiters = {}\n self.queues = {}\n self.logger = logging.getLogger('.'.join((__name__, self.__class__.\n __name__)))\n\n async def initialize(self, device_path: str) ->None:\n assert os.path.exists(device_path)\n self.logger.info('Initializing USBReader.')\n self.logger.debug('Opening serial connection to device at %s',\n device_path)\n self.serial_reader, self.serial_writer = await open_serial_connection(\n url=device_path, baudrate=115200)\n self.initialized = True\n self.logger.debug('Connected to serial device at %s.', device_path)\n\n async def _run(self) ->None:\n while True:\n message = await self.serial_reader.readuntil(separator=b'\\r\\n')\n stripped_message = message.decode(encoding='ascii').rstrip('\\n\\r')\n self.logger.debug(\"Read '%s' from MDB board.\", stripped_message)\n message_type = stripped_message[0]\n if message_type in self.waiters:\n self.waiters[message_type].set_result(stripped_message)\n del self.waiters[message_type]\n await asyncio.sleep(0)\n elif message_type in self.queues:\n try:\n self.queues[message_type].put_nowait(stripped_message)\n except asyncio.QueueFull:\n self.logger.warning(\n 'Queue for message type %s is full. Scheduling the put in another task.'\n , message_type)\n asyncio.create_task(self.queues[message_type].put(\n stripped_message))\n else:\n self.logger.error('Unhandled message: %s', stripped_message)\n\n async def run(self) ->None:\n assert self.initialized\n self.logger.info('Starting runner.')\n self.run_task = asyncio.create_task(self._run())\n try:\n await self.run_task\n except asyncio.CancelledError:\n self.logger.info('Runner cancelled.')\n\n async def send(self, message: AsciiBytes, _drain=True) ->None:\n assert self.initialized\n self.logger.info('Sending message to MDB board: %s', message)\n self.serial_writer.write(message)\n if _drain:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n\n def _read_internal(self, prefix: str) ->asyncio.Future:\n assert len(prefix) == 1\n if prefix in self.queues or prefix in self.waiters:\n raise RuntimeError(\n f'Tried to wait for message type {prefix} when there was already a queue listening to all messages'\n )\n fut = asyncio.get_running_loop().create_future()\n self.waiters[prefix] = fut\n return fut\n\n async def sendread(self, message: AsciiBytes, prefix: str) ->str:\n await self.send(message, _drain=False)\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning(\n 'Got cancelled while sending message %r or waiting on prefix %s'\n , message, prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n async def read(self, prefix: str) ->str:\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning('Got cancelled while waiting for message on %s'\n , prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n def listen(self, prefix: str) ->asyncio.Queue:\n assert len(prefix) == 1\n if prefix in self.waiters or prefix in self.queues:\n raise RuntimeError(\n f'Tried to get a queue for message type {prefix} when there was already someonewaiting on it.'\n )\n self.queues[prefix] = asyncio.Queue()\n self.logger.info('Polling for messages of type: %s', prefix)\n return self.queues[prefix]\n\n def unlisten(self, prefix: str) ->None:\n \"\"\"Stops pushing messages with this prefix character to a Queue.\"\"\"\n assert len(prefix) == 1\n del self.queues[prefix]\n self.logger.info('No longer polling for message type: %s', prefix)\n\n async def shutdown(self):\n if not self.initialized:\n return\n self.logger.info('Shutting down.')\n if self.run_task:\n self.run_task.cancel()\n self.run_task = None\n for fut in self.waiters.values():\n fut.cancel()\n self.serial_writer.close()\n await self.serial_writer.wait_closed()\n self.logger.info('Shutdown complete.')\n self.initialized = False\n\n\n<mask token>\n",
"step-3": "<mask token>\nAsciiBytes = NewType('AsciiBytes', bytes)\n\n\ndef to_ascii(s: str) ->AsciiBytes:\n if s[-1] != '\\n':\n s += '\\n'\n return cast(AsciiBytes, s.encode(encoding='ascii'))\n\n\nclass USBHandler:\n \"\"\"Reads from and writes to the underlying MDB USB board.\n\n Users can either obtain an asyncio.Queue that the handler will push\n messages to using listen(), or it can ask for a one-time read using read().\n For sending messages, if no reply is expected or there is a poller waiting\n for any response, send() can be used, otherwise sendread() will send the\n message and wait for a one-time reply. Having a listener and waiting for a\n single message at the same time is an error. See the Sniffer class for an\n example of both usages.\"\"\"\n\n def __init__(self):\n self.initialized = False\n self.run_task = None\n self.waiters = {}\n self.queues = {}\n self.logger = logging.getLogger('.'.join((__name__, self.__class__.\n __name__)))\n\n async def initialize(self, device_path: str) ->None:\n assert os.path.exists(device_path)\n self.logger.info('Initializing USBReader.')\n self.logger.debug('Opening serial connection to device at %s',\n device_path)\n self.serial_reader, self.serial_writer = await open_serial_connection(\n url=device_path, baudrate=115200)\n self.initialized = True\n self.logger.debug('Connected to serial device at %s.', device_path)\n\n async def _run(self) ->None:\n while True:\n message = await self.serial_reader.readuntil(separator=b'\\r\\n')\n stripped_message = message.decode(encoding='ascii').rstrip('\\n\\r')\n self.logger.debug(\"Read '%s' from MDB board.\", stripped_message)\n message_type = stripped_message[0]\n if message_type in self.waiters:\n self.waiters[message_type].set_result(stripped_message)\n del self.waiters[message_type]\n await asyncio.sleep(0)\n elif message_type in self.queues:\n try:\n self.queues[message_type].put_nowait(stripped_message)\n except asyncio.QueueFull:\n self.logger.warning(\n 'Queue for message type %s is full. Scheduling the put in another task.'\n , message_type)\n asyncio.create_task(self.queues[message_type].put(\n stripped_message))\n else:\n self.logger.error('Unhandled message: %s', stripped_message)\n\n async def run(self) ->None:\n assert self.initialized\n self.logger.info('Starting runner.')\n self.run_task = asyncio.create_task(self._run())\n try:\n await self.run_task\n except asyncio.CancelledError:\n self.logger.info('Runner cancelled.')\n\n async def send(self, message: AsciiBytes, _drain=True) ->None:\n assert self.initialized\n self.logger.info('Sending message to MDB board: %s', message)\n self.serial_writer.write(message)\n if _drain:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n\n def _read_internal(self, prefix: str) ->asyncio.Future:\n assert len(prefix) == 1\n if prefix in self.queues or prefix in self.waiters:\n raise RuntimeError(\n f'Tried to wait for message type {prefix} when there was already a queue listening to all messages'\n )\n fut = asyncio.get_running_loop().create_future()\n self.waiters[prefix] = fut\n return fut\n\n async def sendread(self, message: AsciiBytes, prefix: str) ->str:\n await self.send(message, _drain=False)\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning(\n 'Got cancelled while sending message %r or waiting on prefix %s'\n , message, prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n async def read(self, prefix: str) ->str:\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning('Got cancelled while waiting for message on %s'\n , prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n def listen(self, prefix: str) ->asyncio.Queue:\n assert len(prefix) == 1\n if prefix in self.waiters or prefix in self.queues:\n raise RuntimeError(\n f'Tried to get a queue for message type {prefix} when there was already someonewaiting on it.'\n )\n self.queues[prefix] = asyncio.Queue()\n self.logger.info('Polling for messages of type: %s', prefix)\n return self.queues[prefix]\n\n def unlisten(self, prefix: str) ->None:\n \"\"\"Stops pushing messages with this prefix character to a Queue.\"\"\"\n assert len(prefix) == 1\n del self.queues[prefix]\n self.logger.info('No longer polling for message type: %s', prefix)\n\n async def shutdown(self):\n if not self.initialized:\n return\n self.logger.info('Shutting down.')\n if self.run_task:\n self.run_task.cancel()\n self.run_task = None\n for fut in self.waiters.values():\n fut.cancel()\n self.serial_writer.close()\n await self.serial_writer.wait_closed()\n self.logger.info('Shutdown complete.')\n self.initialized = False\n\n\n__all__ = USBHandler, to_ascii\n",
"step-4": "import asyncio\nimport logging\nimport os.path\nfrom serial_asyncio import open_serial_connection\nfrom typing import NewType, cast\nAsciiBytes = NewType('AsciiBytes', bytes)\n\n\ndef to_ascii(s: str) ->AsciiBytes:\n if s[-1] != '\\n':\n s += '\\n'\n return cast(AsciiBytes, s.encode(encoding='ascii'))\n\n\nclass USBHandler:\n \"\"\"Reads from and writes to the underlying MDB USB board.\n\n Users can either obtain an asyncio.Queue that the handler will push\n messages to using listen(), or it can ask for a one-time read using read().\n For sending messages, if no reply is expected or there is a poller waiting\n for any response, send() can be used, otherwise sendread() will send the\n message and wait for a one-time reply. Having a listener and waiting for a\n single message at the same time is an error. See the Sniffer class for an\n example of both usages.\"\"\"\n\n def __init__(self):\n self.initialized = False\n self.run_task = None\n self.waiters = {}\n self.queues = {}\n self.logger = logging.getLogger('.'.join((__name__, self.__class__.\n __name__)))\n\n async def initialize(self, device_path: str) ->None:\n assert os.path.exists(device_path)\n self.logger.info('Initializing USBReader.')\n self.logger.debug('Opening serial connection to device at %s',\n device_path)\n self.serial_reader, self.serial_writer = await open_serial_connection(\n url=device_path, baudrate=115200)\n self.initialized = True\n self.logger.debug('Connected to serial device at %s.', device_path)\n\n async def _run(self) ->None:\n while True:\n message = await self.serial_reader.readuntil(separator=b'\\r\\n')\n stripped_message = message.decode(encoding='ascii').rstrip('\\n\\r')\n self.logger.debug(\"Read '%s' from MDB board.\", stripped_message)\n message_type = stripped_message[0]\n if message_type in self.waiters:\n self.waiters[message_type].set_result(stripped_message)\n del self.waiters[message_type]\n await asyncio.sleep(0)\n elif message_type in self.queues:\n try:\n self.queues[message_type].put_nowait(stripped_message)\n except asyncio.QueueFull:\n self.logger.warning(\n 'Queue for message type %s is full. Scheduling the put in another task.'\n , message_type)\n asyncio.create_task(self.queues[message_type].put(\n stripped_message))\n else:\n self.logger.error('Unhandled message: %s', stripped_message)\n\n async def run(self) ->None:\n assert self.initialized\n self.logger.info('Starting runner.')\n self.run_task = asyncio.create_task(self._run())\n try:\n await self.run_task\n except asyncio.CancelledError:\n self.logger.info('Runner cancelled.')\n\n async def send(self, message: AsciiBytes, _drain=True) ->None:\n assert self.initialized\n self.logger.info('Sending message to MDB board: %s', message)\n self.serial_writer.write(message)\n if _drain:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n\n def _read_internal(self, prefix: str) ->asyncio.Future:\n assert len(prefix) == 1\n if prefix in self.queues or prefix in self.waiters:\n raise RuntimeError(\n f'Tried to wait for message type {prefix} when there was already a queue listening to all messages'\n )\n fut = asyncio.get_running_loop().create_future()\n self.waiters[prefix] = fut\n return fut\n\n async def sendread(self, message: AsciiBytes, prefix: str) ->str:\n await self.send(message, _drain=False)\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await self.serial_writer.drain()\n self.logger.info('Sent message to MDB board: %s', message)\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning(\n 'Got cancelled while sending message %r or waiting on prefix %s'\n , message, prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n async def read(self, prefix: str) ->str:\n fut = self._read_internal(prefix)\n self.logger.info('Waiting for a single message of type: %s', prefix)\n try:\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning('Got cancelled while waiting for message on %s'\n , prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info('Got message: %s', fut.result())\n return fut.result()\n\n def listen(self, prefix: str) ->asyncio.Queue:\n assert len(prefix) == 1\n if prefix in self.waiters or prefix in self.queues:\n raise RuntimeError(\n f'Tried to get a queue for message type {prefix} when there was already someonewaiting on it.'\n )\n self.queues[prefix] = asyncio.Queue()\n self.logger.info('Polling for messages of type: %s', prefix)\n return self.queues[prefix]\n\n def unlisten(self, prefix: str) ->None:\n \"\"\"Stops pushing messages with this prefix character to a Queue.\"\"\"\n assert len(prefix) == 1\n del self.queues[prefix]\n self.logger.info('No longer polling for message type: %s', prefix)\n\n async def shutdown(self):\n if not self.initialized:\n return\n self.logger.info('Shutting down.')\n if self.run_task:\n self.run_task.cancel()\n self.run_task = None\n for fut in self.waiters.values():\n fut.cancel()\n self.serial_writer.close()\n await self.serial_writer.wait_closed()\n self.logger.info('Shutdown complete.')\n self.initialized = False\n\n\n__all__ = USBHandler, to_ascii\n",
"step-5": "import asyncio\nimport logging\nimport os.path\nfrom serial_asyncio import open_serial_connection\nfrom typing import NewType, cast\n\n# Type annotations and converters\nAsciiBytes = NewType('AsciiBytes', bytes)\n\n\ndef to_ascii(s: str) -> AsciiBytes:\n if s[-1] != '\\n':\n s += '\\n'\n return cast(AsciiBytes, s.encode(encoding='ascii'))\n\n\nclass USBHandler:\n \"\"\"Reads from and writes to the underlying MDB USB board.\n\n Users can either obtain an asyncio.Queue that the handler will push\n messages to using listen(), or it can ask for a one-time read using read().\n For sending messages, if no reply is expected or there is a poller waiting\n for any response, send() can be used, otherwise sendread() will send the\n message and wait for a one-time reply. Having a listener and waiting for a\n single message at the same time is an error. See the Sniffer class for an\n example of both usages.\"\"\"\n\n def __init__(self):\n self.initialized = False\n self.run_task = None\n self.waiters = {}\n self.queues = {}\n self.logger = logging.getLogger('.'.join((__name__,\n self.__class__.__name__)))\n\n async def initialize(self, device_path: str) -> None:\n assert os.path.exists(device_path)\n self.logger.info(\"Initializing USBReader.\")\n self.logger.debug(\"Opening serial connection to device at %s\",\n device_path)\n self.serial_reader, self.serial_writer = \\\n await open_serial_connection(url=device_path, baudrate=115200)\n self.initialized = True\n self.logger.debug(\"Connected to serial device at %s.\", device_path)\n\n async def _run(self) -> None:\n while True:\n message = await self.serial_reader.readuntil(separator=b'\\r\\n')\n stripped_message = message.decode(encoding='ascii').rstrip('\\n\\r')\n self.logger.debug(\"Read '%s' from MDB board.\", stripped_message)\n message_type = stripped_message[0]\n if message_type in self.waiters:\n self.waiters[message_type].set_result(stripped_message)\n del self.waiters[message_type]\n # Lets the waiter run.\n await asyncio.sleep(0)\n elif message_type in self.queues:\n try:\n self.queues[message_type].put_nowait(stripped_message)\n except asyncio.QueueFull:\n self.logger.warning('Queue for message type %s is full. '\n 'Scheduling the put in another task.',\n message_type)\n asyncio.create_task(\n self.queues[message_type].put(stripped_message))\n else:\n self.logger.error(\"Unhandled message: %s\", stripped_message)\n\n async def run(self) -> None:\n assert self.initialized\n self.logger.info('Starting runner.')\n self.run_task = asyncio.create_task(self._run())\n try:\n await self.run_task\n except asyncio.CancelledError:\n self.logger.info('Runner cancelled.')\n\n async def send(self, message: AsciiBytes, _drain=True) -> None:\n assert self.initialized\n self.logger.info(\"Sending message to MDB board: %s\", message)\n self.serial_writer.write(message)\n if _drain:\n await self.serial_writer.drain()\n self.logger.info(\"Sent message to MDB board: %s\", message)\n\n def _read_internal(self, prefix: str) -> asyncio.Future:\n assert len(prefix) == 1\n if prefix in self.queues or prefix in self.waiters:\n raise RuntimeError(f\"Tried to wait for message type {prefix}\"\n \" when there was already a queue listening to \"\n \"all messages\")\n fut = asyncio.get_running_loop().create_future()\n self.waiters[prefix] = fut\n return fut\n\n async def sendread(self, message: AsciiBytes, prefix: str) -> str:\n await self.send(message, _drain=False)\n fut = self._read_internal(prefix)\n self.logger.info(\"Waiting for a single message of type: %s\", prefix)\n try:\n await self.serial_writer.drain()\n self.logger.info(\"Sent message to MDB board: %s\", message)\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning(\"Got cancelled while sending message %r or \"\n \"waiting on prefix %s\", message, prefix,\n exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info(\"Got message: %s\", fut.result())\n return fut.result()\n\n async def read(self, prefix: str) -> str:\n fut = self._read_internal(prefix)\n self.logger.info(\"Waiting for a single message of type: %s\", prefix)\n try:\n await fut\n except asyncio.CancelledError as e:\n self.logger.warning(\"Got cancelled while waiting for message on \"\n \"%s\", prefix, exc_info=e)\n del self.waiters[prefix]\n raise\n self.logger.info(\"Got message: %s\", fut.result())\n return fut.result()\n\n def listen(self, prefix: str) -> asyncio.Queue:\n assert len(prefix) == 1\n if prefix in self.waiters or prefix in self.queues:\n raise RuntimeError(\"Tried to get a queue for message type \"\n f\"{prefix} when there was already someone\"\n \"waiting on it.\")\n self.queues[prefix] = asyncio.Queue()\n self.logger.info(\"Polling for messages of type: %s\", prefix)\n return self.queues[prefix]\n\n def unlisten(self, prefix: str) -> None:\n \"\"\"Stops pushing messages with this prefix character to a Queue.\"\"\"\n assert len(prefix) == 1\n del self.queues[prefix]\n self.logger.info(\"No longer polling for message type: %s\", prefix)\n\n async def shutdown(self):\n if not self.initialized:\n return\n self.logger.info(\"Shutting down.\")\n if self.run_task:\n self.run_task.cancel()\n self.run_task = None\n for fut in self.waiters.values():\n fut.cancel()\n self.serial_writer.close()\n await self.serial_writer.wait_closed()\n self.logger.info(\"Shutdown complete.\")\n self.initialized = False\n\n\n__all__ = (USBHandler, to_ascii)\n",
"step-ids": [
3,
7,
8,
9,
10
]
}
|
[
3,
7,
8,
9,
10
] |
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
from matplotlib.pylab import rcParams
#from pandas import datetime
#from pandas.tseries.t
from sklearn.preprocessing import MinMaxScaler
#from statsmodels.tsa.seasonal import seasonal_decompose
from pandas import Series
data = pd.read_csv(
r'E:\Thesis Content\ukdale\house_1\channel_7.dat',
delimiter=' ',
header=None,
names=['date', 'KWh'],
dtype={'date': np.int64, 'KWh': np.float64},
index_col='date'
) #initially KWh column contains Ws in 6 second interval, later it will be converted to KWh
data.index = pd.to_datetime((data.index.values), unit='s')
#data.head(5)
#before_process = data
after_process=data
#before_process = before_process.resample('d').sum()
#before_process['KWh'] = round(((before_process.KWh * 6) / (1000 * 3600)) , 3)
#before_process.head(5)
after_process = after_process.drop(after_process[(after_process.KWh < 10) | (after_process.KWh > 4000) ].index)
after_process = after_process.resample('d').sum()
#after_process.head(5)
after_process['KWh'] = round(((after_process.KWh * 6) / (1000 * 3600)) , 3)
after_process.head(5)
after_process.to_csv(path_or_buf=r'E:\Thesis Content\ukdale CSV\Without Noise\Tvday.csv', sep = ',' , index_label = 'date')
#rcParams['figure.figsize'] = 16, 10
#plt.subplot(2, 1, 1)
#plt.scatter(before_process.index ,before_process['KWh'].values, s=10)
#plt.title('Before and After Pre Processing')
#plt.ylabel('KWh')
#plt.subplot(2, 1, 2)
#plt.scatter(after_process.index ,after_process['KWh'].values, s=10)
#plt.xlabel('Date')
#plt.ylabel('KWh')
#plt.show()
|
normal
|
{
"blob_id": "19c0c3156488ce99316ce40f32e84e476b7afdac",
"index": 2754,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nafter_process.head(5)\nafter_process.to_csv(path_or_buf=\n 'E:\\\\Thesis Content\\\\ukdale CSV\\\\Without Noise\\\\Tvday.csv', sep=',',\n index_label='date')\n",
"step-3": "<mask token>\ndata = pd.read_csv('E:\\\\Thesis Content\\\\ukdale\\\\house_1\\\\channel_7.dat',\n delimiter=' ', header=None, names=['date', 'KWh'], dtype={'date': np.\n int64, 'KWh': np.float64}, index_col='date')\ndata.index = pd.to_datetime(data.index.values, unit='s')\nafter_process = data\nafter_process = after_process.drop(after_process[(after_process.KWh < 10) |\n (after_process.KWh > 4000)].index)\nafter_process = after_process.resample('d').sum()\nafter_process['KWh'] = round(after_process.KWh * 6 / (1000 * 3600), 3)\nafter_process.head(5)\nafter_process.to_csv(path_or_buf=\n 'E:\\\\Thesis Content\\\\ukdale CSV\\\\Without Noise\\\\Tvday.csv', sep=',',\n index_label='date')\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom matplotlib.pylab import rcParams\nfrom sklearn.preprocessing import MinMaxScaler\nfrom pandas import Series\ndata = pd.read_csv('E:\\\\Thesis Content\\\\ukdale\\\\house_1\\\\channel_7.dat',\n delimiter=' ', header=None, names=['date', 'KWh'], dtype={'date': np.\n int64, 'KWh': np.float64}, index_col='date')\ndata.index = pd.to_datetime(data.index.values, unit='s')\nafter_process = data\nafter_process = after_process.drop(after_process[(after_process.KWh < 10) |\n (after_process.KWh > 4000)].index)\nafter_process = after_process.resample('d').sum()\nafter_process['KWh'] = round(after_process.KWh * 6 / (1000 * 3600), 3)\nafter_process.head(5)\nafter_process.to_csv(path_or_buf=\n 'E:\\\\Thesis Content\\\\ukdale CSV\\\\Without Noise\\\\Tvday.csv', sep=',',\n index_label='date')\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom matplotlib.pylab import rcParams\n#from pandas import datetime\n#from pandas.tseries.t\nfrom sklearn.preprocessing import MinMaxScaler\n#from statsmodels.tsa.seasonal import seasonal_decompose\nfrom pandas import Series\n\ndata = pd.read_csv(\n r'E:\\Thesis Content\\ukdale\\house_1\\channel_7.dat',\n delimiter=' ',\n header=None,\n names=['date', 'KWh'],\n dtype={'date': np.int64, 'KWh': np.float64},\n index_col='date'\n ) #initially KWh column contains Ws in 6 second interval, later it will be converted to KWh\n\ndata.index = pd.to_datetime((data.index.values), unit='s')\n#data.head(5)\n#before_process = data\nafter_process=data\n#before_process = before_process.resample('d').sum()\n#before_process['KWh'] = round(((before_process.KWh * 6) / (1000 * 3600)) , 3)\n#before_process.head(5)\nafter_process = after_process.drop(after_process[(after_process.KWh < 10) | (after_process.KWh > 4000) ].index)\nafter_process = after_process.resample('d').sum()\n#after_process.head(5)\nafter_process['KWh'] = round(((after_process.KWh * 6) / (1000 * 3600)) , 3)\nafter_process.head(5)\n\nafter_process.to_csv(path_or_buf=r'E:\\Thesis Content\\ukdale CSV\\Without Noise\\Tvday.csv', sep = ',' , index_label = 'date')\n\n\n#rcParams['figure.figsize'] = 16, 10\n#plt.subplot(2, 1, 1)\n#plt.scatter(before_process.index ,before_process['KWh'].values, s=10)\n#plt.title('Before and After Pre Processing')\n#plt.ylabel('KWh')\n#plt.subplot(2, 1, 2)\n#plt.scatter(after_process.index ,after_process['KWh'].values, s=10)\n#plt.xlabel('Date')\n#plt.ylabel('KWh')\n#plt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
letters = ['a', 'b', 'c']
def delete_head(letters):
del letters[0]
print letters
print delete_head(letters)
|
normal
|
{
"blob_id": "e0c10dfa4074b0de4d78fc78a6f373074ef4dadd",
"index": 3971,
"step-1": "letters = ['a', 'b', 'c']\ndef delete_head(letters):\n\tdel letters[0]\n\tprint letters\nprint delete_head(letters)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(int(abs(K))) if K.is_integer() else print('IMPOSSIBLE')
<|reserved_special_token_1|>
A, B = map(int, input().split())
K = (B ** 2 - A ** 2) / (2 * A - 2 * B)
print(int(abs(K))) if K.is_integer() else print('IMPOSSIBLE')
|
flexible
|
{
"blob_id": "36a7d3ed28348e56e54ce4bfa937363a64ee718f",
"index": 6981,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(int(abs(K))) if K.is_integer() else print('IMPOSSIBLE')\n",
"step-3": "A, B = map(int, input().split())\nK = (B ** 2 - A ** 2) / (2 * A - 2 * B)\nprint(int(abs(K))) if K.is_integer() else print('IMPOSSIBLE')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
import os
import sys
import click
import logging
from signal import signal, SIGPIPE, SIG_DFL
from ..helpers.file_helpers import return_filehandle
from ..helpers.sequence_helpers import get_seqio_fastq_record
signal(SIGPIPE, SIG_DFL)
def subset_fastq(fastq, subset):
'''Subset FASTQ file. Pick 1/subset reads.
If reverse, fasta <= length
'''
seqio_in = sys.stdin
fh = ''
count = 0
total = 0
if not fastq: # Check STDIN
for record in get_seqio_fastq_record(seqio_in): # get SeqIO record
count += 1
if count == subset:
count = 0
total += 1
sys.stdout.write(record.format('fastq'))
sys.stdout.flush()
else: # Check FASTA
fh = return_filehandle(fastq)
for record in get_seqio_fastq_record(fh): # Get SeqIO record
count += 1
if count == subset:
count = 0
total += 1
sys.stdout.write(record.format('fastq'))
sys.stdout.flush()
return 'Output {} reads'.format(total)
@click.command()
@click.option('--fastq',
help='''FASTQ file to subset, can be compressed''')
@click.option('--subset', metavar = '<INT>',
help='''Take every N reads (default:10)''', default=10)
@click.option('--log_file', metavar = '<FILE>', default='./subset_fastq.log',
help='''File to write log to. (default:./subset_fastq.log)''')
@click.option('--log_level', default='INFO',
help='''Log level: DEBUG, INFO, WARNING, ERROR, CRITICAL (default:INFO)''')
def main(fastq, subset, log_file, log_level):
'''Subset FASTQ Files.
cat input*.fastq | subset_fastq.py
or
subset_fastq.py --fastq input.fastq
'''
log_level = getattr(logging, log_level.upper(), logging.INFO)
msg_format = '%(asctime)s|%(name)s|[%(levelname)s]: %(message)s'
logging.basicConfig(format=msg_format, datefmt='%m-%d %H:%M',
level=log_level)
log_handler = logging.FileHandler(log_file, mode='w')
formatter = logging.Formatter(msg_format)
log_handler.setFormatter(formatter)
logger = logging.getLogger('subset_fastq')
logger.addHandler(log_handler)
if fastq:
fastq = os.path.abspath(fastq)
logger.info(subset_fastq(fastq, subset))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "873a53983e3aeb66bd290450fb9c15a552bd163c",
"index": 4017,
"step-1": "<mask token>\n\n\[email protected]()\[email protected]('--fastq', help='FASTQ file to subset, can be compressed')\[email protected]('--subset', metavar='<INT>', help=\n 'Take every N reads (default:10)', default=10)\[email protected]('--log_file', metavar='<FILE>', default='./subset_fastq.log',\n help='File to write log to. (default:./subset_fastq.log)')\[email protected]('--log_level', default='INFO', help=\n 'Log level: DEBUG, INFO, WARNING, ERROR, CRITICAL (default:INFO)')\ndef main(fastq, subset, log_file, log_level):\n \"\"\"Subset FASTQ Files.\n\n cat input*.fastq | subset_fastq.py\n\n or\n\n subset_fastq.py --fastq input.fastq\n \"\"\"\n log_level = getattr(logging, log_level.upper(), logging.INFO)\n msg_format = '%(asctime)s|%(name)s|[%(levelname)s]: %(message)s'\n logging.basicConfig(format=msg_format, datefmt='%m-%d %H:%M', level=\n log_level)\n log_handler = logging.FileHandler(log_file, mode='w')\n formatter = logging.Formatter(msg_format)\n log_handler.setFormatter(formatter)\n logger = logging.getLogger('subset_fastq')\n logger.addHandler(log_handler)\n if fastq:\n fastq = os.path.abspath(fastq)\n logger.info(subset_fastq(fastq, subset))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef subset_fastq(fastq, subset):\n \"\"\"Subset FASTQ file. Pick 1/subset reads.\n\n If reverse, fasta <= length\n \"\"\"\n seqio_in = sys.stdin\n fh = ''\n count = 0\n total = 0\n if not fastq:\n for record in get_seqio_fastq_record(seqio_in):\n count += 1\n if count == subset:\n count = 0\n total += 1\n sys.stdout.write(record.format('fastq'))\n sys.stdout.flush()\n else:\n fh = return_filehandle(fastq)\n for record in get_seqio_fastq_record(fh):\n count += 1\n if count == subset:\n count = 0\n total += 1\n sys.stdout.write(record.format('fastq'))\n sys.stdout.flush()\n return 'Output {} reads'.format(total)\n\n\[email protected]()\[email protected]('--fastq', help='FASTQ file to subset, can be compressed')\[email protected]('--subset', metavar='<INT>', help=\n 'Take every N reads (default:10)', default=10)\[email protected]('--log_file', metavar='<FILE>', default='./subset_fastq.log',\n help='File to write log to. (default:./subset_fastq.log)')\[email protected]('--log_level', default='INFO', help=\n 'Log level: DEBUG, INFO, WARNING, ERROR, CRITICAL (default:INFO)')\ndef main(fastq, subset, log_file, log_level):\n \"\"\"Subset FASTQ Files.\n\n cat input*.fastq | subset_fastq.py\n\n or\n\n subset_fastq.py --fastq input.fastq\n \"\"\"\n log_level = getattr(logging, log_level.upper(), logging.INFO)\n msg_format = '%(asctime)s|%(name)s|[%(levelname)s]: %(message)s'\n logging.basicConfig(format=msg_format, datefmt='%m-%d %H:%M', level=\n log_level)\n log_handler = logging.FileHandler(log_file, mode='w')\n formatter = logging.Formatter(msg_format)\n log_handler.setFormatter(formatter)\n logger = logging.getLogger('subset_fastq')\n logger.addHandler(log_handler)\n if fastq:\n fastq = os.path.abspath(fastq)\n logger.info(subset_fastq(fastq, subset))\n\n\n<mask token>\n",
"step-3": "<mask token>\nsignal(SIGPIPE, SIG_DFL)\n\n\ndef subset_fastq(fastq, subset):\n \"\"\"Subset FASTQ file. Pick 1/subset reads.\n\n If reverse, fasta <= length\n \"\"\"\n seqio_in = sys.stdin\n fh = ''\n count = 0\n total = 0\n if not fastq:\n for record in get_seqio_fastq_record(seqio_in):\n count += 1\n if count == subset:\n count = 0\n total += 1\n sys.stdout.write(record.format('fastq'))\n sys.stdout.flush()\n else:\n fh = return_filehandle(fastq)\n for record in get_seqio_fastq_record(fh):\n count += 1\n if count == subset:\n count = 0\n total += 1\n sys.stdout.write(record.format('fastq'))\n sys.stdout.flush()\n return 'Output {} reads'.format(total)\n\n\[email protected]()\[email protected]('--fastq', help='FASTQ file to subset, can be compressed')\[email protected]('--subset', metavar='<INT>', help=\n 'Take every N reads (default:10)', default=10)\[email protected]('--log_file', metavar='<FILE>', default='./subset_fastq.log',\n help='File to write log to. (default:./subset_fastq.log)')\[email protected]('--log_level', default='INFO', help=\n 'Log level: DEBUG, INFO, WARNING, ERROR, CRITICAL (default:INFO)')\ndef main(fastq, subset, log_file, log_level):\n \"\"\"Subset FASTQ Files.\n\n cat input*.fastq | subset_fastq.py\n\n or\n\n subset_fastq.py --fastq input.fastq\n \"\"\"\n log_level = getattr(logging, log_level.upper(), logging.INFO)\n msg_format = '%(asctime)s|%(name)s|[%(levelname)s]: %(message)s'\n logging.basicConfig(format=msg_format, datefmt='%m-%d %H:%M', level=\n log_level)\n log_handler = logging.FileHandler(log_file, mode='w')\n formatter = logging.Formatter(msg_format)\n log_handler.setFormatter(formatter)\n logger = logging.getLogger('subset_fastq')\n logger.addHandler(log_handler)\n if fastq:\n fastq = os.path.abspath(fastq)\n logger.info(subset_fastq(fastq, subset))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nimport sys\nimport click\nimport logging\nfrom signal import signal, SIGPIPE, SIG_DFL\nfrom ..helpers.file_helpers import return_filehandle\nfrom ..helpers.sequence_helpers import get_seqio_fastq_record\nsignal(SIGPIPE, SIG_DFL)\n\n\ndef subset_fastq(fastq, subset):\n \"\"\"Subset FASTQ file. Pick 1/subset reads.\n\n If reverse, fasta <= length\n \"\"\"\n seqio_in = sys.stdin\n fh = ''\n count = 0\n total = 0\n if not fastq:\n for record in get_seqio_fastq_record(seqio_in):\n count += 1\n if count == subset:\n count = 0\n total += 1\n sys.stdout.write(record.format('fastq'))\n sys.stdout.flush()\n else:\n fh = return_filehandle(fastq)\n for record in get_seqio_fastq_record(fh):\n count += 1\n if count == subset:\n count = 0\n total += 1\n sys.stdout.write(record.format('fastq'))\n sys.stdout.flush()\n return 'Output {} reads'.format(total)\n\n\[email protected]()\[email protected]('--fastq', help='FASTQ file to subset, can be compressed')\[email protected]('--subset', metavar='<INT>', help=\n 'Take every N reads (default:10)', default=10)\[email protected]('--log_file', metavar='<FILE>', default='./subset_fastq.log',\n help='File to write log to. (default:./subset_fastq.log)')\[email protected]('--log_level', default='INFO', help=\n 'Log level: DEBUG, INFO, WARNING, ERROR, CRITICAL (default:INFO)')\ndef main(fastq, subset, log_file, log_level):\n \"\"\"Subset FASTQ Files.\n\n cat input*.fastq | subset_fastq.py\n\n or\n\n subset_fastq.py --fastq input.fastq\n \"\"\"\n log_level = getattr(logging, log_level.upper(), logging.INFO)\n msg_format = '%(asctime)s|%(name)s|[%(levelname)s]: %(message)s'\n logging.basicConfig(format=msg_format, datefmt='%m-%d %H:%M', level=\n log_level)\n log_handler = logging.FileHandler(log_file, mode='w')\n formatter = logging.Formatter(msg_format)\n log_handler.setFormatter(formatter)\n logger = logging.getLogger('subset_fastq')\n logger.addHandler(log_handler)\n if fastq:\n fastq = os.path.abspath(fastq)\n logger.info(subset_fastq(fastq, subset))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport click\nimport logging\nfrom signal import signal, SIGPIPE, SIG_DFL\nfrom ..helpers.file_helpers import return_filehandle\nfrom ..helpers.sequence_helpers import get_seqio_fastq_record\n\nsignal(SIGPIPE, SIG_DFL)\n\n\ndef subset_fastq(fastq, subset):\n '''Subset FASTQ file. Pick 1/subset reads.\n\n If reverse, fasta <= length\n '''\n seqio_in = sys.stdin\n fh = ''\n count = 0\n total = 0\n if not fastq: # Check STDIN\n for record in get_seqio_fastq_record(seqio_in): # get SeqIO record\n count += 1\n if count == subset:\n count = 0\n total += 1\n sys.stdout.write(record.format('fastq'))\n sys.stdout.flush()\n else: # Check FASTA\n fh = return_filehandle(fastq)\n for record in get_seqio_fastq_record(fh): # Get SeqIO record\n count += 1\n if count == subset:\n count = 0\n total += 1\n sys.stdout.write(record.format('fastq'))\n sys.stdout.flush()\n return 'Output {} reads'.format(total)\n\n\[email protected]() \[email protected]('--fastq',\n help='''FASTQ file to subset, can be compressed''')\[email protected]('--subset', metavar = '<INT>',\n help='''Take every N reads (default:10)''', default=10)\[email protected]('--log_file', metavar = '<FILE>', default='./subset_fastq.log',\n help='''File to write log to. (default:./subset_fastq.log)''')\[email protected]('--log_level', default='INFO',\n help='''Log level: DEBUG, INFO, WARNING, ERROR, CRITICAL (default:INFO)''')\ndef main(fastq, subset, log_file, log_level):\n '''Subset FASTQ Files.\n\n cat input*.fastq | subset_fastq.py\n\n or\n\n subset_fastq.py --fastq input.fastq\n '''\n log_level = getattr(logging, log_level.upper(), logging.INFO)\n msg_format = '%(asctime)s|%(name)s|[%(levelname)s]: %(message)s'\n logging.basicConfig(format=msg_format, datefmt='%m-%d %H:%M',\n level=log_level)\n log_handler = logging.FileHandler(log_file, mode='w')\n formatter = logging.Formatter(msg_format)\n log_handler.setFormatter(formatter)\n logger = logging.getLogger('subset_fastq')\n logger.addHandler(log_handler)\n if fastq:\n fastq = os.path.abspath(fastq)\n logger.info(subset_fastq(fastq, subset))\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def auth(role):
from core import admin_view, student_view, teacher_view
def deco(func):
def wrapper(*args, **kwargs):
if role == 'admin':
if admin_view.admin_user == None:
admin_view.login()
else:
res = func(*args, **kwargs)
return res
if role == 'student':
if student_view.student_user == None:
student_view.login()
else:
res = func(*args, **kwargs)
return res
if role == 'teacher':
if teacher_view.teacher_user == None:
teacher_view.login()
else:
res = func(*args, **kwargs)
return res
return wrapper
return deco
<|reserved_special_token_1|>
# 多角色认证装饰器
def auth(role):
from core import admin_view,student_view,teacher_view
def deco(func):
def wrapper(*args,**kwargs):
if role == 'admin':
if admin_view.admin_user == None:
admin_view.login()
else:
res = func(*args,**kwargs)
return res
if role == 'student':
if student_view.student_user == None:
student_view.login()
else:
res = func(*args,**kwargs)
return res
if role == 'teacher':
if teacher_view.teacher_user == None:
teacher_view.login()
else:
res = func(*args,**kwargs)
return res
return wrapper
return deco
|
flexible
|
{
"blob_id": "e247ffb5b6e4319ff17d0b8ae9f67e10c282c4ff",
"index": 7348,
"step-1": "<mask token>\n",
"step-2": "def auth(role):\n from core import admin_view, student_view, teacher_view\n\n def deco(func):\n\n def wrapper(*args, **kwargs):\n if role == 'admin':\n if admin_view.admin_user == None:\n admin_view.login()\n else:\n res = func(*args, **kwargs)\n return res\n if role == 'student':\n if student_view.student_user == None:\n student_view.login()\n else:\n res = func(*args, **kwargs)\n return res\n if role == 'teacher':\n if teacher_view.teacher_user == None:\n teacher_view.login()\n else:\n res = func(*args, **kwargs)\n return res\n return wrapper\n return deco\n",
"step-3": "\n# 多角色认证装饰器\n\ndef auth(role):\n\n from core import admin_view,student_view,teacher_view\n def deco(func):\n def wrapper(*args,**kwargs):\n\n if role == 'admin':\n if admin_view.admin_user == None:\n admin_view.login()\n else:\n res = func(*args,**kwargs)\n return res\n\n if role == 'student':\n if student_view.student_user == None:\n student_view.login()\n else:\n res = func(*args,**kwargs)\n return res\n\n\n if role == 'teacher':\n if teacher_view.teacher_user == None:\n teacher_view.login()\n else:\n res = func(*args,**kwargs)\n return res\n\n\n return wrapper\n return deco",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if os.path.exists(DATA_DIR):
override = input('Data exist, override (delete and re-parse)? (Y/n): ')
if override.lower() == 'y':
shutil.rmtree(DATA_DIR)
else:
parse = False
os.makedirs(DATA_DIR, exist_ok=True)
<|reserved_special_token_0|>
if parse:
with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:
raw_papers = stream.readlines()
papers = [paper.strip().split('##SENT##') for paper in raw_papers]
with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r'
) as stream:
print(
'Make sure you have change the following line to absolute path to',
os.path.abspath(MEAD_DID))
print('line 18 of', os.path.join(MEAD_FORMATTING_ADDONS,
'MEAD_ADDONS_UTIL.pm'))
print(stream.readlines()[17])
with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:
print(
'Make sure you have change the following line to absolute path to',
os.path.abspath(MEAD_DIR))
print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))
print(stream.readlines()[30])
print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))
os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))
os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)
print('Converting src to raw text...')
for i, paper in tqdm(enumerate(papers), total=len(papers)):
did = f'{i + 1}'
text_file = os.path.join(DATA_DIR, did)
with open(text_file, 'w') as stream:
stream.write('\n'.join(paper))
print('Clean up stuff that might influence XML parsing...')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/</</g"')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/&/&/g"')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/>/>/g"')
print('Create cluster and docsent files...')
os.system(f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')
if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:
print(
'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl'
)
print("Currently, it has bug and can't create file")
os.system(
f'find {DATA_DIR} -name "*.cluster" | xargs sed -i "s/<?xml version=\'1.0\'?>/<?xml version=\'1.0\' encoding=\'utf-8\'?>/g"'
)
os.system(
f'find {DATA_DIR} -name "*.docsent" | xargs sed -i "s/<?xml version=\'1.0\'?>/<?xml version=\'1.0\' encoding=\'utf-8\'?>/g"'
)
<|reserved_special_token_0|>
if os.path.exists(OUTPUT_DIR):
override = input('Result exist, do you want to re-run? (Y/n): ')
if override.lower() == 'y':
shutil.rmtree(OUTPUT_DIR)
os.makedirs(OUTPUT_DIR, exist_ok=True)
<|reserved_special_token_0|>
os.system(
f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}'
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DATAPATH = '../../../data/test'
MEAD_DIR = os.path.abspath('mead')
MEAD_DATA_PATH = f'{MEAD_DIR}/data'
MEAD_BIN = f'{MEAD_DIR}/bin'
MEAD_LIB = f'{MEAD_DIR}/lib'
MEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting'
MEAD_DID = f'{MEAD_DIR}/did'
TARGET = 'MEAD_TEST'
DATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET)
parse = True
if os.path.exists(DATA_DIR):
override = input('Data exist, override (delete and re-parse)? (Y/n): ')
if override.lower() == 'y':
shutil.rmtree(DATA_DIR)
else:
parse = False
os.makedirs(DATA_DIR, exist_ok=True)
cluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster')
config_file = os.path.join(DATA_DIR, 'MEAD_TEST.config')
CONFIG = f"""<?xml version='1.0' encoding='utf-8'?>
<MEAD-CONFIG LANG="ENG" TARGET="MEAD_TEST" CLUSTER-PATH="{DATA_DIR}" DOC-DIRECTORY="{DATA_DIR}/docsent">
<FEATURE-SET BASE-DIRECTORY="{DATA_DIR}/feature">
<FEATURE NAME="Position" SCRIPT="{MEAD_BIN}/feature-scripts/Position.pl" />
<FEATURE NAME="Length" SCRIPT="{MEAD_BIN}/feature-scripts/Length.pl" />
<FEATURE NAME="Centroid" SCRIPT="{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG" />
</FEATURE-SET>
<CLASSIFIER COMMAND-LINE="{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0" SYSTEM="MEADORIG" />
<COMPRESSION BASIS="sentences" PERCENT="1" />
</MEAD-CONFIG>
"""
if parse:
with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:
raw_papers = stream.readlines()
papers = [paper.strip().split('##SENT##') for paper in raw_papers]
with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r'
) as stream:
print(
'Make sure you have change the following line to absolute path to',
os.path.abspath(MEAD_DID))
print('line 18 of', os.path.join(MEAD_FORMATTING_ADDONS,
'MEAD_ADDONS_UTIL.pm'))
print(stream.readlines()[17])
with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:
print(
'Make sure you have change the following line to absolute path to',
os.path.abspath(MEAD_DIR))
print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))
print(stream.readlines()[30])
print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))
os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))
os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)
print('Converting src to raw text...')
for i, paper in tqdm(enumerate(papers), total=len(papers)):
did = f'{i + 1}'
text_file = os.path.join(DATA_DIR, did)
with open(text_file, 'w') as stream:
stream.write('\n'.join(paper))
print('Clean up stuff that might influence XML parsing...')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/</</g"')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/&/&/g"')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/>/>/g"')
print('Create cluster and docsent files...')
os.system(f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')
if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:
print(
'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl'
)
print("Currently, it has bug and can't create file")
os.system(
f'find {DATA_DIR} -name "*.cluster" | xargs sed -i "s/<?xml version=\'1.0\'?>/<?xml version=\'1.0\' encoding=\'utf-8\'?>/g"'
)
os.system(
f'find {DATA_DIR} -name "*.docsent" | xargs sed -i "s/<?xml version=\'1.0\'?>/<?xml version=\'1.0\' encoding=\'utf-8\'?>/g"'
)
OUTPUT_PATH = '../output'
OUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead')
if os.path.exists(OUTPUT_DIR):
override = input('Result exist, do you want to re-run? (Y/n): ')
if override.lower() == 'y':
shutil.rmtree(OUTPUT_DIR)
os.makedirs(OUTPUT_DIR, exist_ok=True)
summary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary')
extract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract')
shared_parameters = f'-sentences -percent {PERCENT}'
os.system(
f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}'
)
<|reserved_special_token_1|>
import os
import shutil
from tqdm import tqdm
from pathlib import Path
from eval_mead import PERCENT
DATAPATH = '../../../data/test'
MEAD_DIR = os.path.abspath('mead')
MEAD_DATA_PATH = f'{MEAD_DIR}/data'
MEAD_BIN = f'{MEAD_DIR}/bin'
MEAD_LIB = f'{MEAD_DIR}/lib'
MEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting'
MEAD_DID = f'{MEAD_DIR}/did'
TARGET = 'MEAD_TEST'
DATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET)
parse = True
if os.path.exists(DATA_DIR):
override = input('Data exist, override (delete and re-parse)? (Y/n): ')
if override.lower() == 'y':
shutil.rmtree(DATA_DIR)
else:
parse = False
os.makedirs(DATA_DIR, exist_ok=True)
cluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster')
config_file = os.path.join(DATA_DIR, 'MEAD_TEST.config')
CONFIG = f"""<?xml version='1.0' encoding='utf-8'?>
<MEAD-CONFIG LANG="ENG" TARGET="MEAD_TEST" CLUSTER-PATH="{DATA_DIR}" DOC-DIRECTORY="{DATA_DIR}/docsent">
<FEATURE-SET BASE-DIRECTORY="{DATA_DIR}/feature">
<FEATURE NAME="Position" SCRIPT="{MEAD_BIN}/feature-scripts/Position.pl" />
<FEATURE NAME="Length" SCRIPT="{MEAD_BIN}/feature-scripts/Length.pl" />
<FEATURE NAME="Centroid" SCRIPT="{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG" />
</FEATURE-SET>
<CLASSIFIER COMMAND-LINE="{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0" SYSTEM="MEADORIG" />
<COMPRESSION BASIS="sentences" PERCENT="1" />
</MEAD-CONFIG>
"""
if parse:
with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:
raw_papers = stream.readlines()
papers = [paper.strip().split('##SENT##') for paper in raw_papers]
with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r'
) as stream:
print(
'Make sure you have change the following line to absolute path to',
os.path.abspath(MEAD_DID))
print('line 18 of', os.path.join(MEAD_FORMATTING_ADDONS,
'MEAD_ADDONS_UTIL.pm'))
print(stream.readlines()[17])
with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:
print(
'Make sure you have change the following line to absolute path to',
os.path.abspath(MEAD_DIR))
print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))
print(stream.readlines()[30])
print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))
os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))
os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)
print('Converting src to raw text...')
for i, paper in tqdm(enumerate(papers), total=len(papers)):
did = f'{i + 1}'
text_file = os.path.join(DATA_DIR, did)
with open(text_file, 'w') as stream:
stream.write('\n'.join(paper))
print('Clean up stuff that might influence XML parsing...')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/</</g"')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/&/&/g"')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/>/>/g"')
print('Create cluster and docsent files...')
os.system(f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')
if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:
print(
'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl'
)
print("Currently, it has bug and can't create file")
os.system(
f'find {DATA_DIR} -name "*.cluster" | xargs sed -i "s/<?xml version=\'1.0\'?>/<?xml version=\'1.0\' encoding=\'utf-8\'?>/g"'
)
os.system(
f'find {DATA_DIR} -name "*.docsent" | xargs sed -i "s/<?xml version=\'1.0\'?>/<?xml version=\'1.0\' encoding=\'utf-8\'?>/g"'
)
OUTPUT_PATH = '../output'
OUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead')
if os.path.exists(OUTPUT_DIR):
override = input('Result exist, do you want to re-run? (Y/n): ')
if override.lower() == 'y':
shutil.rmtree(OUTPUT_DIR)
os.makedirs(OUTPUT_DIR, exist_ok=True)
summary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary')
extract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract')
shared_parameters = f'-sentences -percent {PERCENT}'
os.system(
f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}'
)
<|reserved_special_token_1|>
import os
import shutil
from tqdm import tqdm
from pathlib import Path
from eval_mead import PERCENT
DATAPATH = '../../../data/test'
# MEAD_DIR = 'mead'
MEAD_DIR = os.path.abspath('mead')
MEAD_DATA_PATH = f'{MEAD_DIR}/data'
MEAD_BIN = f'{MEAD_DIR}/bin'
MEAD_LIB = f'{MEAD_DIR}/lib'
MEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting'
MEAD_DID = f'{MEAD_DIR}/did'
TARGET = 'MEAD_TEST'
DATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET)
parse = True
if os.path.exists(DATA_DIR):
override = input('Data exist, override (delete and re-parse)? (Y/n): ')
if override.lower() == 'y':
shutil.rmtree(DATA_DIR)
else:
parse = False
os.makedirs(DATA_DIR, exist_ok=True)
cluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster')
config_file = os.path.join(DATA_DIR, 'MEAD_TEST.config')
CONFIG = f"""<?xml version='1.0' encoding='utf-8'?>
<MEAD-CONFIG LANG="ENG" TARGET="MEAD_TEST" CLUSTER-PATH="{DATA_DIR}" DOC-DIRECTORY="{DATA_DIR}/docsent">
<FEATURE-SET BASE-DIRECTORY="{DATA_DIR}/feature">
<FEATURE NAME="Position" SCRIPT="{MEAD_BIN}/feature-scripts/Position.pl" />
<FEATURE NAME="Length" SCRIPT="{MEAD_BIN}/feature-scripts/Length.pl" />
<FEATURE NAME="Centroid" SCRIPT="{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG" />
</FEATURE-SET>
<CLASSIFIER COMMAND-LINE="{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0" SYSTEM="MEADORIG" />
<COMPRESSION BASIS="sentences" PERCENT="1" />
</MEAD-CONFIG>
"""
if parse:
### Get raw text ###
with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:
raw_papers = stream.readlines()
papers = [paper.strip().split('##SENT##') for paper in raw_papers]
# Setting Env. Var.
with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r') as stream:
print('Make sure you have change the following line to absolute path to',
os.path.abspath(MEAD_DID))
print('line 18 of', os.path.join(
MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'))
print(stream.readlines()[17])
with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:
print('Make sure you have change the following line to absolute path to',
os.path.abspath(MEAD_DIR))
print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))
print(stream.readlines()[30])
print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))
os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))
os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)
# Write raw text, cluster file
# This stuff should be generated by text2cluster.pl
# cluster_lines = []
# cluster_lines.append("<?xml version = '1.0' encoding='utf-8'?>\n")
# cluster_lines.append("<CLUSTER LANG='ENG'>\n")
print('Converting src to raw text...')
for i, paper in tqdm(enumerate(papers), total=len(papers)):
# did = f'raw_text_{i+1}.txt'
did = f'{i+1}'
text_file = os.path.join(DATA_DIR, did)
with open(text_file, 'w') as stream:
# make sure the sent split are the same as our annotation
stream.write('\n'.join(paper))
# delete </ pattern or XML might break
# os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/<\///g"')
# https://stackoverflow.com/questions/8914435/awk-sed-how-to-remove-parentheses-in-simple-text-file
# os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/[><]//g"')
# https://validator.w3.org/feed/docs/error/SAXError.html
# https://www.w3.org/TR/REC-xml/#dt-chardata
print('Clean up stuff that might influence XML parsing...')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/</</g"')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/&/&/g"')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/>/>/g"')
# cluster_lines.append(f"\t<D DID='{did}' />\n")
# cluster_lines.append('</CLUSTER>\n')
# Get docsent
# with open(cluster_file, 'w') as stream:
# stream.writelines(cluster_lines)
# Path(cluster_file).touch()
print('Create cluster and docsent files...')
os.system(
f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')
if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:
print(
'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl')
print("Currently, it has bug and can't create file")
# Run config
# with open(config_file, 'w') as stream:
# stream.write(CONFIG)
# extract_file = os.path.join(DATA_DIR, f'{TARGET}.extract')
# os.system(
# f'cat {config_file} | {MEAD_BIN}/driver.pl > {extract_file}')
# https://askubuntu.com/questions/20414/find-and-replace-text-within-a-file-using-commands
os.system(
f'find {DATA_DIR} -name "*.cluster" | xargs sed -i "s/<?xml version=\'1.0\'?>/<?xml version=\'1.0\' encoding=\'utf-8\'?>/g"')
os.system(
f'find {DATA_DIR} -name "*.docsent" | xargs sed -i "s/<?xml version=\'1.0\'?>/<?xml version=\'1.0\' encoding=\'utf-8\'?>/g"')
OUTPUT_PATH = '../output'
OUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead')
if os.path.exists(OUTPUT_DIR):
override = input('Result exist, do you want to re-run? (Y/n): ')
if override.lower() == 'y':
shutil.rmtree(OUTPUT_DIR)
os.makedirs(OUTPUT_DIR, exist_ok=True)
summary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary')
extract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract')
# compression basis is "sentence", and give PERCENT% summary
shared_parameters = f'-sentences -percent {PERCENT}'
# os.system(
# f'perl {MEAD_BIN}/mead.pl {shared_parameters} -summary -output {summary_file} {TARGET}')
os.system(
f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}')
|
flexible
|
{
"blob_id": "887ae9b7c629be679bf4f5fb4311c31bff605c73",
"index": 8874,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif os.path.exists(DATA_DIR):\n override = input('Data exist, override (delete and re-parse)? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(DATA_DIR)\n else:\n parse = False\nos.makedirs(DATA_DIR, exist_ok=True)\n<mask token>\nif parse:\n with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:\n raw_papers = stream.readlines()\n papers = [paper.strip().split('##SENT##') for paper in raw_papers]\n with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r'\n ) as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DID))\n print('line 18 of', os.path.join(MEAD_FORMATTING_ADDONS,\n 'MEAD_ADDONS_UTIL.pm'))\n print(stream.readlines()[17])\n with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DIR))\n print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))\n print(stream.readlines()[30])\n print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)\n print('Converting src to raw text...')\n for i, paper in tqdm(enumerate(papers), total=len(papers)):\n did = f'{i + 1}'\n text_file = os.path.join(DATA_DIR, did)\n with open(text_file, 'w') as stream:\n stream.write('\\n'.join(paper))\n print('Clean up stuff that might influence XML parsing...')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/</</g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/&/&/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/>/>/g\"')\n print('Create cluster and docsent files...')\n os.system(f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')\n if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:\n print(\n 'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl'\n )\n print(\"Currently, it has bug and can't create file\")\n os.system(\n f'find {DATA_DIR} -name \"*.cluster\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\n os.system(\n f'find {DATA_DIR} -name \"*.docsent\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\n<mask token>\nif os.path.exists(OUTPUT_DIR):\n override = input('Result exist, do you want to re-run? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(OUTPUT_DIR)\nos.makedirs(OUTPUT_DIR, exist_ok=True)\n<mask token>\nos.system(\n f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}'\n )\n",
"step-3": "<mask token>\nDATAPATH = '../../../data/test'\nMEAD_DIR = os.path.abspath('mead')\nMEAD_DATA_PATH = f'{MEAD_DIR}/data'\nMEAD_BIN = f'{MEAD_DIR}/bin'\nMEAD_LIB = f'{MEAD_DIR}/lib'\nMEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting'\nMEAD_DID = f'{MEAD_DIR}/did'\nTARGET = 'MEAD_TEST'\nDATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET)\nparse = True\nif os.path.exists(DATA_DIR):\n override = input('Data exist, override (delete and re-parse)? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(DATA_DIR)\n else:\n parse = False\nos.makedirs(DATA_DIR, exist_ok=True)\ncluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster')\nconfig_file = os.path.join(DATA_DIR, 'MEAD_TEST.config')\nCONFIG = f\"\"\"<?xml version='1.0' encoding='utf-8'?>\n<MEAD-CONFIG LANG=\"ENG\" TARGET=\"MEAD_TEST\" CLUSTER-PATH=\"{DATA_DIR}\" DOC-DIRECTORY=\"{DATA_DIR}/docsent\">\n<FEATURE-SET BASE-DIRECTORY=\"{DATA_DIR}/feature\">\n<FEATURE NAME=\"Position\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Position.pl\" />\n<FEATURE NAME=\"Length\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Length.pl\" />\n<FEATURE NAME=\"Centroid\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG\" />\n</FEATURE-SET>\n<CLASSIFIER COMMAND-LINE=\"{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0\" SYSTEM=\"MEADORIG\" />\n<COMPRESSION BASIS=\"sentences\" PERCENT=\"1\" />\n</MEAD-CONFIG>\n\"\"\"\nif parse:\n with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:\n raw_papers = stream.readlines()\n papers = [paper.strip().split('##SENT##') for paper in raw_papers]\n with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r'\n ) as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DID))\n print('line 18 of', os.path.join(MEAD_FORMATTING_ADDONS,\n 'MEAD_ADDONS_UTIL.pm'))\n print(stream.readlines()[17])\n with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DIR))\n print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))\n print(stream.readlines()[30])\n print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)\n print('Converting src to raw text...')\n for i, paper in tqdm(enumerate(papers), total=len(papers)):\n did = f'{i + 1}'\n text_file = os.path.join(DATA_DIR, did)\n with open(text_file, 'w') as stream:\n stream.write('\\n'.join(paper))\n print('Clean up stuff that might influence XML parsing...')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/</</g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/&/&/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/>/>/g\"')\n print('Create cluster and docsent files...')\n os.system(f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')\n if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:\n print(\n 'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl'\n )\n print(\"Currently, it has bug and can't create file\")\n os.system(\n f'find {DATA_DIR} -name \"*.cluster\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\n os.system(\n f'find {DATA_DIR} -name \"*.docsent\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\nOUTPUT_PATH = '../output'\nOUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead')\nif os.path.exists(OUTPUT_DIR):\n override = input('Result exist, do you want to re-run? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(OUTPUT_DIR)\nos.makedirs(OUTPUT_DIR, exist_ok=True)\nsummary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary')\nextract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract')\nshared_parameters = f'-sentences -percent {PERCENT}'\nos.system(\n f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}'\n )\n",
"step-4": "import os\nimport shutil\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom eval_mead import PERCENT\nDATAPATH = '../../../data/test'\nMEAD_DIR = os.path.abspath('mead')\nMEAD_DATA_PATH = f'{MEAD_DIR}/data'\nMEAD_BIN = f'{MEAD_DIR}/bin'\nMEAD_LIB = f'{MEAD_DIR}/lib'\nMEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting'\nMEAD_DID = f'{MEAD_DIR}/did'\nTARGET = 'MEAD_TEST'\nDATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET)\nparse = True\nif os.path.exists(DATA_DIR):\n override = input('Data exist, override (delete and re-parse)? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(DATA_DIR)\n else:\n parse = False\nos.makedirs(DATA_DIR, exist_ok=True)\ncluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster')\nconfig_file = os.path.join(DATA_DIR, 'MEAD_TEST.config')\nCONFIG = f\"\"\"<?xml version='1.0' encoding='utf-8'?>\n<MEAD-CONFIG LANG=\"ENG\" TARGET=\"MEAD_TEST\" CLUSTER-PATH=\"{DATA_DIR}\" DOC-DIRECTORY=\"{DATA_DIR}/docsent\">\n<FEATURE-SET BASE-DIRECTORY=\"{DATA_DIR}/feature\">\n<FEATURE NAME=\"Position\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Position.pl\" />\n<FEATURE NAME=\"Length\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Length.pl\" />\n<FEATURE NAME=\"Centroid\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG\" />\n</FEATURE-SET>\n<CLASSIFIER COMMAND-LINE=\"{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0\" SYSTEM=\"MEADORIG\" />\n<COMPRESSION BASIS=\"sentences\" PERCENT=\"1\" />\n</MEAD-CONFIG>\n\"\"\"\nif parse:\n with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:\n raw_papers = stream.readlines()\n papers = [paper.strip().split('##SENT##') for paper in raw_papers]\n with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r'\n ) as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DID))\n print('line 18 of', os.path.join(MEAD_FORMATTING_ADDONS,\n 'MEAD_ADDONS_UTIL.pm'))\n print(stream.readlines()[17])\n with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DIR))\n print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))\n print(stream.readlines()[30])\n print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)\n print('Converting src to raw text...')\n for i, paper in tqdm(enumerate(papers), total=len(papers)):\n did = f'{i + 1}'\n text_file = os.path.join(DATA_DIR, did)\n with open(text_file, 'w') as stream:\n stream.write('\\n'.join(paper))\n print('Clean up stuff that might influence XML parsing...')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/</</g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/&/&/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/>/>/g\"')\n print('Create cluster and docsent files...')\n os.system(f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')\n if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:\n print(\n 'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl'\n )\n print(\"Currently, it has bug and can't create file\")\n os.system(\n f'find {DATA_DIR} -name \"*.cluster\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\n os.system(\n f'find {DATA_DIR} -name \"*.docsent\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\nOUTPUT_PATH = '../output'\nOUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead')\nif os.path.exists(OUTPUT_DIR):\n override = input('Result exist, do you want to re-run? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(OUTPUT_DIR)\nos.makedirs(OUTPUT_DIR, exist_ok=True)\nsummary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary')\nextract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract')\nshared_parameters = f'-sentences -percent {PERCENT}'\nos.system(\n f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}'\n )\n",
"step-5": "import os\nimport shutil\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom eval_mead import PERCENT\n\nDATAPATH = '../../../data/test'\n# MEAD_DIR = 'mead'\nMEAD_DIR = os.path.abspath('mead')\nMEAD_DATA_PATH = f'{MEAD_DIR}/data'\nMEAD_BIN = f'{MEAD_DIR}/bin'\nMEAD_LIB = f'{MEAD_DIR}/lib'\nMEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting'\nMEAD_DID = f'{MEAD_DIR}/did'\nTARGET = 'MEAD_TEST'\n\n\nDATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET)\nparse = True\nif os.path.exists(DATA_DIR):\n override = input('Data exist, override (delete and re-parse)? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(DATA_DIR)\n else:\n parse = False\nos.makedirs(DATA_DIR, exist_ok=True)\n\ncluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster')\nconfig_file = os.path.join(DATA_DIR, 'MEAD_TEST.config')\n\nCONFIG = f\"\"\"<?xml version='1.0' encoding='utf-8'?>\n<MEAD-CONFIG LANG=\"ENG\" TARGET=\"MEAD_TEST\" CLUSTER-PATH=\"{DATA_DIR}\" DOC-DIRECTORY=\"{DATA_DIR}/docsent\">\n<FEATURE-SET BASE-DIRECTORY=\"{DATA_DIR}/feature\">\n<FEATURE NAME=\"Position\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Position.pl\" />\n<FEATURE NAME=\"Length\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Length.pl\" />\n<FEATURE NAME=\"Centroid\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG\" />\n</FEATURE-SET>\n<CLASSIFIER COMMAND-LINE=\"{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0\" SYSTEM=\"MEADORIG\" />\n<COMPRESSION BASIS=\"sentences\" PERCENT=\"1\" />\n</MEAD-CONFIG>\n\"\"\"\n\nif parse:\n\n ### Get raw text ###\n\n with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:\n raw_papers = stream.readlines()\n\n papers = [paper.strip().split('##SENT##') for paper in raw_papers]\n\n # Setting Env. Var.\n\n with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r') as stream:\n print('Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DID))\n print('line 18 of', os.path.join(\n MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'))\n print(stream.readlines()[17])\n with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:\n print('Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DIR))\n print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))\n print(stream.readlines()[30])\n\n print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)\n\n # Write raw text, cluster file\n\n # This stuff should be generated by text2cluster.pl\n # cluster_lines = []\n # cluster_lines.append(\"<?xml version = '1.0' encoding='utf-8'?>\\n\")\n # cluster_lines.append(\"<CLUSTER LANG='ENG'>\\n\")\n\n print('Converting src to raw text...')\n for i, paper in tqdm(enumerate(papers), total=len(papers)):\n\n # did = f'raw_text_{i+1}.txt'\n did = f'{i+1}'\n text_file = os.path.join(DATA_DIR, did)\n with open(text_file, 'w') as stream:\n # make sure the sent split are the same as our annotation\n stream.write('\\n'.join(paper))\n\n # delete </ pattern or XML might break\n # os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/<\\///g\"')\n # https://stackoverflow.com/questions/8914435/awk-sed-how-to-remove-parentheses-in-simple-text-file\n # os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/[><]//g\"')\n\n # https://validator.w3.org/feed/docs/error/SAXError.html\n # https://www.w3.org/TR/REC-xml/#dt-chardata\n print('Clean up stuff that might influence XML parsing...')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/</</g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/&/&/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/>/>/g\"')\n\n # cluster_lines.append(f\"\\t<D DID='{did}' />\\n\")\n # cluster_lines.append('</CLUSTER>\\n')\n\n # Get docsent\n\n # with open(cluster_file, 'w') as stream:\n # stream.writelines(cluster_lines)\n\n # Path(cluster_file).touch()\n\n print('Create cluster and docsent files...')\n os.system(\n f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')\n\n if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:\n print(\n 'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl')\n print(\"Currently, it has bug and can't create file\")\n\n # Run config\n\n # with open(config_file, 'w') as stream:\n # stream.write(CONFIG)\n\n # extract_file = os.path.join(DATA_DIR, f'{TARGET}.extract')\n # os.system(\n # f'cat {config_file} | {MEAD_BIN}/driver.pl > {extract_file}')\n\n # https://askubuntu.com/questions/20414/find-and-replace-text-within-a-file-using-commands\n os.system(\n f'find {DATA_DIR} -name \"*.cluster\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"')\n os.system(\n f'find {DATA_DIR} -name \"*.docsent\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"')\n\n\nOUTPUT_PATH = '../output'\nOUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead')\nif os.path.exists(OUTPUT_DIR):\n override = input('Result exist, do you want to re-run? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(OUTPUT_DIR)\nos.makedirs(OUTPUT_DIR, exist_ok=True)\n\nsummary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary')\nextract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract')\n# compression basis is \"sentence\", and give PERCENT% summary\nshared_parameters = f'-sentences -percent {PERCENT}'\n\n# os.system(\n# f'perl {MEAD_BIN}/mead.pl {shared_parameters} -summary -output {summary_file} {TARGET}')\nos.system(\n f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def Hello_worlder(x):
a = []
for i in range(x):
a.append('Hello world')
for i in a:
print(i)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def Hello_worlder(x):
a = []
for i in range(x):
a.append('Hello world')
for i in a:
print(i)
Hello_worlder(10)
|
flexible
|
{
"blob_id": "4f116f3eec9198a56a047ab42ed8e018ebb794bb",
"index": 3528,
"step-1": "<mask token>\n",
"step-2": "def Hello_worlder(x):\n a = []\n for i in range(x):\n a.append('Hello world')\n for i in a:\n print(i)\n\n\n<mask token>\n",
"step-3": "def Hello_worlder(x):\n a = []\n for i in range(x):\n a.append('Hello world')\n for i in a:\n print(i)\n\n\nHello_worlder(10)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import sys
import math
from random import randrange
from utilities import *
from EffectiveThueLemma import *
def getZ(value):
s = str(value)
p10 = 1
if s[0] != '0':
p10 = 10
for i in range(1, len(s)):
if s[i] == '.':
break
p10 *= 10
z = []
first = int(s[0] == '0')
for i in range(first, len(s)):
if s[i] != '.':
z.append(int(s[i]))
return (p10, z)
def Theorem4_9(n, b, R):
if R >= n:
raise ValueError("r* >= n")
if b < 0 or b >= n:
raise ValueError("b < 0 or b >= n")
r, rr = n, b # r0, r1
s, ss = 1, 0 # s0, s1
t, tt = 0, 1 # t0, t1
if r < R:
return (r, s, t)
if rr < R:
return (rr, ss, tt)
while rr != 0:
q = r/rr
rrr = r % rr
r, s, t, rr, ss, tt = rr, ss, tt, rrr, (s-ss*q), (t-tt*q)
if rr < R:
return (rr, ss, tt)
return None
def gcd(a, b):
if b == 0:
return a
return gcd(b, a%b)
def RationalReconstruction(value, M = int(1e9)):
# check if value is already an integer
if value.is_integer():
return (value, 1)
# get additional 10^x and z array
p10, z = getZ(value)
print(z)
k = len(z)
# 1. Compute n = 10^k and b = sum(z(i-1) * 10^(k-i)) with i = 1..k
n = pow(10, k)
b = 0
for i in range(1, k+1):
b += z[i-1] * pow(10, k-i)
# make sure 10^k > 2(M^2)
while M >= 10 and 2*(M**2) >= n:
M /= 10
# 2. Run the extended Euclidean algorithm on input n, b to obtain EEA(n, b)
# and then apply Theorem 4.9 with n, b, and r* = t* = M to obtain the values r', s', t'.
EEA(n, b)
print(n, b, M)
rr, ss, tt = Theorem4_9(n, b, M)
# 3. Output the rational number -s'/t'
if tt < 0:
ss, tt = -ss, -tt
ss *= p10
g = gcd(abs(ss), abs(tt))
ss /= g
tt /= g
return (-ss, tt)
def main():
if (len(sys.argv) < 2):
return
value = float(sys.argv[1])
M = int(1e9)
if len(sys.argv) > 2:
M = int(sys.argv[2])
p, q = RationalReconstruction(value, M)
print("p = %ld" %(p))
print("q = %ld" %(q))
print("p/q = %.20lf" %(1.0*p/q))
print("val = %.20lf" %(value))
main()
|
normal
|
{
"blob_id": "2b3a7d0c28d1bf7d4400b0e5558b0527a96af781",
"index": 7658,
"step-1": "<mask token>\n\n\ndef Theorem4_9(n, b, R):\n if R >= n:\n raise ValueError('r* >= n')\n if b < 0 or b >= n:\n raise ValueError('b < 0 or b >= n')\n r, rr = n, b\n s, ss = 1, 0\n t, tt = 0, 1\n if r < R:\n return r, s, t\n if rr < R:\n return rr, ss, tt\n while rr != 0:\n q = r / rr\n rrr = r % rr\n r, s, t, rr, ss, tt = rr, ss, tt, rrr, s - ss * q, t - tt * q\n if rr < R:\n return rr, ss, tt\n return None\n\n\ndef gcd(a, b):\n if b == 0:\n return a\n return gcd(b, a % b)\n\n\n<mask token>\n\n\ndef main():\n if len(sys.argv) < 2:\n return\n value = float(sys.argv[1])\n M = int(1000000000.0)\n if len(sys.argv) > 2:\n M = int(sys.argv[2])\n p, q = RationalReconstruction(value, M)\n print('p = %ld' % p)\n print('q = %ld' % q)\n print('p/q = %.20lf' % (1.0 * p / q))\n print('val = %.20lf' % value)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getZ(value):\n s = str(value)\n p10 = 1\n if s[0] != '0':\n p10 = 10\n for i in range(1, len(s)):\n if s[i] == '.':\n break\n p10 *= 10\n z = []\n first = int(s[0] == '0')\n for i in range(first, len(s)):\n if s[i] != '.':\n z.append(int(s[i]))\n return p10, z\n\n\ndef Theorem4_9(n, b, R):\n if R >= n:\n raise ValueError('r* >= n')\n if b < 0 or b >= n:\n raise ValueError('b < 0 or b >= n')\n r, rr = n, b\n s, ss = 1, 0\n t, tt = 0, 1\n if r < R:\n return r, s, t\n if rr < R:\n return rr, ss, tt\n while rr != 0:\n q = r / rr\n rrr = r % rr\n r, s, t, rr, ss, tt = rr, ss, tt, rrr, s - ss * q, t - tt * q\n if rr < R:\n return rr, ss, tt\n return None\n\n\ndef gcd(a, b):\n if b == 0:\n return a\n return gcd(b, a % b)\n\n\ndef RationalReconstruction(value, M=int(1000000000.0)):\n if value.is_integer():\n return value, 1\n p10, z = getZ(value)\n print(z)\n k = len(z)\n n = pow(10, k)\n b = 0\n for i in range(1, k + 1):\n b += z[i - 1] * pow(10, k - i)\n while M >= 10 and 2 * M ** 2 >= n:\n M /= 10\n EEA(n, b)\n print(n, b, M)\n rr, ss, tt = Theorem4_9(n, b, M)\n if tt < 0:\n ss, tt = -ss, -tt\n ss *= p10\n g = gcd(abs(ss), abs(tt))\n ss /= g\n tt /= g\n return -ss, tt\n\n\ndef main():\n if len(sys.argv) < 2:\n return\n value = float(sys.argv[1])\n M = int(1000000000.0)\n if len(sys.argv) > 2:\n M = int(sys.argv[2])\n p, q = RationalReconstruction(value, M)\n print('p = %ld' % p)\n print('q = %ld' % q)\n print('p/q = %.20lf' % (1.0 * p / q))\n print('val = %.20lf' % value)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getZ(value):\n s = str(value)\n p10 = 1\n if s[0] != '0':\n p10 = 10\n for i in range(1, len(s)):\n if s[i] == '.':\n break\n p10 *= 10\n z = []\n first = int(s[0] == '0')\n for i in range(first, len(s)):\n if s[i] != '.':\n z.append(int(s[i]))\n return p10, z\n\n\ndef Theorem4_9(n, b, R):\n if R >= n:\n raise ValueError('r* >= n')\n if b < 0 or b >= n:\n raise ValueError('b < 0 or b >= n')\n r, rr = n, b\n s, ss = 1, 0\n t, tt = 0, 1\n if r < R:\n return r, s, t\n if rr < R:\n return rr, ss, tt\n while rr != 0:\n q = r / rr\n rrr = r % rr\n r, s, t, rr, ss, tt = rr, ss, tt, rrr, s - ss * q, t - tt * q\n if rr < R:\n return rr, ss, tt\n return None\n\n\ndef gcd(a, b):\n if b == 0:\n return a\n return gcd(b, a % b)\n\n\ndef RationalReconstruction(value, M=int(1000000000.0)):\n if value.is_integer():\n return value, 1\n p10, z = getZ(value)\n print(z)\n k = len(z)\n n = pow(10, k)\n b = 0\n for i in range(1, k + 1):\n b += z[i - 1] * pow(10, k - i)\n while M >= 10 and 2 * M ** 2 >= n:\n M /= 10\n EEA(n, b)\n print(n, b, M)\n rr, ss, tt = Theorem4_9(n, b, M)\n if tt < 0:\n ss, tt = -ss, -tt\n ss *= p10\n g = gcd(abs(ss), abs(tt))\n ss /= g\n tt /= g\n return -ss, tt\n\n\ndef main():\n if len(sys.argv) < 2:\n return\n value = float(sys.argv[1])\n M = int(1000000000.0)\n if len(sys.argv) > 2:\n M = int(sys.argv[2])\n p, q = RationalReconstruction(value, M)\n print('p = %ld' % p)\n print('q = %ld' % q)\n print('p/q = %.20lf' % (1.0 * p / q))\n print('val = %.20lf' % value)\n\n\nmain()\n",
"step-4": "import sys\nimport math\nfrom random import randrange\nfrom utilities import *\nfrom EffectiveThueLemma import *\n\n\ndef getZ(value):\n s = str(value)\n p10 = 1\n if s[0] != '0':\n p10 = 10\n for i in range(1, len(s)):\n if s[i] == '.':\n break\n p10 *= 10\n z = []\n first = int(s[0] == '0')\n for i in range(first, len(s)):\n if s[i] != '.':\n z.append(int(s[i]))\n return p10, z\n\n\ndef Theorem4_9(n, b, R):\n if R >= n:\n raise ValueError('r* >= n')\n if b < 0 or b >= n:\n raise ValueError('b < 0 or b >= n')\n r, rr = n, b\n s, ss = 1, 0\n t, tt = 0, 1\n if r < R:\n return r, s, t\n if rr < R:\n return rr, ss, tt\n while rr != 0:\n q = r / rr\n rrr = r % rr\n r, s, t, rr, ss, tt = rr, ss, tt, rrr, s - ss * q, t - tt * q\n if rr < R:\n return rr, ss, tt\n return None\n\n\ndef gcd(a, b):\n if b == 0:\n return a\n return gcd(b, a % b)\n\n\ndef RationalReconstruction(value, M=int(1000000000.0)):\n if value.is_integer():\n return value, 1\n p10, z = getZ(value)\n print(z)\n k = len(z)\n n = pow(10, k)\n b = 0\n for i in range(1, k + 1):\n b += z[i - 1] * pow(10, k - i)\n while M >= 10 and 2 * M ** 2 >= n:\n M /= 10\n EEA(n, b)\n print(n, b, M)\n rr, ss, tt = Theorem4_9(n, b, M)\n if tt < 0:\n ss, tt = -ss, -tt\n ss *= p10\n g = gcd(abs(ss), abs(tt))\n ss /= g\n tt /= g\n return -ss, tt\n\n\ndef main():\n if len(sys.argv) < 2:\n return\n value = float(sys.argv[1])\n M = int(1000000000.0)\n if len(sys.argv) > 2:\n M = int(sys.argv[2])\n p, q = RationalReconstruction(value, M)\n print('p = %ld' % p)\n print('q = %ld' % q)\n print('p/q = %.20lf' % (1.0 * p / q))\n print('val = %.20lf' % value)\n\n\nmain()\n",
"step-5": "import sys\nimport math\nfrom random import randrange\nfrom utilities import *\nfrom EffectiveThueLemma import *\n\n\ndef getZ(value):\n\ts = str(value)\n\tp10 = 1\n\tif s[0] != '0':\n\t\tp10 = 10\n\tfor i in range(1, len(s)):\n\t\tif s[i] == '.':\n\t\t\tbreak\n\t\tp10 *= 10\n\tz = []\n\tfirst = int(s[0] == '0')\n\tfor i in range(first, len(s)):\n\t\tif s[i] != '.':\n\t\t\tz.append(int(s[i]))\n\treturn (p10, z)\n\n\ndef Theorem4_9(n, b, R):\n\tif R >= n:\n\t\traise ValueError(\"r* >= n\")\n\tif b < 0 or b >= n:\n\t\traise ValueError(\"b < 0 or b >= n\")\n\tr, rr = n, b\t# r0, r1\n\ts, ss = 1, 0\t# s0, s1\n\tt, tt = 0, 1\t# t0, t1\n\tif r < R:\n\t\treturn (r, s, t)\n\tif rr < R:\n\t\treturn (rr, ss, tt)\n\twhile rr != 0:\n\t\tq = r/rr\n\t\trrr = r % rr\n\t\tr, s, t, rr, ss, tt = rr, ss, tt, rrr, (s-ss*q), (t-tt*q)\n\t\tif rr < R:\n\t\t\treturn (rr, ss, tt)\n\treturn None\n\n\ndef gcd(a, b):\n\tif b == 0:\n\t\treturn a\n\treturn gcd(b, a%b)\n\n\ndef RationalReconstruction(value, M = int(1e9)):\n\t# check if value is already an integer\n\tif value.is_integer():\n\t\treturn (value, 1)\n\n\t# get additional 10^x and z array\n\tp10, z = getZ(value)\n\tprint(z)\n\tk = len(z)\n\n\t# 1. Compute n = 10^k and b = sum(z(i-1) * 10^(k-i)) with i = 1..k\n\tn = pow(10, k)\n\tb = 0\n\tfor i in range(1, k+1):\n\t\tb += z[i-1] * pow(10, k-i)\n\n\t# make sure 10^k > 2(M^2)\n\twhile M >= 10 and 2*(M**2) >= n:\n\t\tM /= 10\n\n\t# 2. Run the extended Euclidean algorithm on input n, b to obtain EEA(n, b)\n\t# and then apply Theorem 4.9 with n, b, and r* = t* = M to obtain the values r', s', t'.\n\tEEA(n, b)\n\tprint(n, b, M)\n\trr, ss, tt = Theorem4_9(n, b, M)\n\n\t# 3. Output the rational number -s'/t'\n\tif tt < 0:\n\t\tss, tt = -ss, -tt\n\tss *= p10\n\tg = gcd(abs(ss), abs(tt))\n\tss /= g\n\ttt /= g\n\treturn (-ss, tt)\n\n\ndef main():\n\tif (len(sys.argv) < 2):\n\t\treturn\n\tvalue = float(sys.argv[1])\n\tM = int(1e9)\n\tif len(sys.argv) > 2:\n\t\tM = int(sys.argv[2])\n\tp, q = RationalReconstruction(value, M)\n\tprint(\"p = %ld\" %(p))\n\tprint(\"q = %ld\" %(q))\n\tprint(\"p/q = %.20lf\" %(1.0*p/q))\n\tprint(\"val = %.20lf\" %(value))\n\n\nmain()",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
if button_a.is_pressed():
music.pitch(400, 500)
<|reserved_special_token_1|>
from microbit import *
import music
while True:
if button_a.is_pressed():
music.pitch(400, 500)
|
flexible
|
{
"blob_id": "356c817e254d8885beb447aa10759fff6a45ca25",
"index": 9454,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n if button_a.is_pressed():\n music.pitch(400, 500)\n",
"step-3": "from microbit import *\nimport music\nwhile True:\n if button_a.is_pressed():\n music.pitch(400, 500)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras.models import load_model
from utils import resize_to_fit, clear_chunks, stack_windows
from imutils import paths
import numpy as np
import imutils
import cv2 as cv2
import pickle
from tqdm import tqdm
c1_correct = 0
c2_correct = 0
c3_correct = 0
c4_correct = 0
c5_correct = 0
total_correct = 0
incorrectly_segmented = 0
correct_guesses_dict = {}
MODEL_FILENAME = "captcha_model.hdf5"
MODEL_LABELS_FILENAME = "model_labels.dat"
CAPTCHA_IMAGE_FOLDER = "test captchas"
# Load up the model labels (so we can translate model predictions to actual letters)
with open(MODEL_LABELS_FILENAME, "rb") as f:
lb = pickle.load(f)
# Load the trained neural network
model = load_model(MODEL_FILENAME)
for root, dirs, files in os.walk(CAPTCHA_IMAGE_FOLDER):
for name in tqdm(files, desc='Solving captchas'):
kernel = (5,5)
#load image
image = cv2.imread(os.path.join(root, name))
image = cv2.cvtColor(image, cv2.COLOR_RGBA2GRAY)
#add padding
image = cv2.copyMakeBorder(image, 8, 8, 8, 8, cv2.BORDER_CONSTANT, None, 255)
#blur
k = np.ones((5,5),np.float32)/25
image = cv2.filter2D(image,-1,k)
# threshhold image
ret, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)
# clear white dots
clear_chunks(image,0,50)
# erosion
image = cv2.erode(image, kernel, iterations=1)
# get contours
contours = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
#segment letters
letter_image_regions = [] #(x, y, w ,h)
contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
contours = contours[:5]
for contour in contours:
if cv2.contourArea(contour) < 60:
continue
(x, y, w, h) = cv2.boundingRect(contour)
if w / h > 1.5:
half_width = int(w / 2)
letter_image_regions.append((x, y, half_width, h))
letter_image_regions.append((x + half_width, y, half_width, h))
else:
letter_image_regions.append((x, y, w, h))
if len(letter_image_regions) != 5:
incorrectly_segmented += 1
continue
print(f"Found {len(letter_image_regions)} letter regions instead of 5 , the guess will likely be incorrect")
letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])
chars = []
i=0
for (x,y,w,h) in letter_image_regions:
letter = image[y-2:y+h+2, x-2:x+w+2]
chars.append(letter)
i+=1
predictions = []
for letter in chars:
# Re-size the letter image to 20x20 pixels to match training data
letter = resize_to_fit(letter, 20, 20)
# Turn the single image into a 4d list of images to make Keras happy
letter = np.expand_dims(letter, axis=2)
letter = np.expand_dims(letter, axis=0)
# Ask the neural network to make a prediction
prediction = model.predict(letter)
# Convert the one-hot-encoded prediction back to a normal letter
letter_text = lb.inverse_transform(prediction)[0]
predictions.append(letter_text)
gc1, gc2, gc3, gc4, gc5 = predictions
c1, c2, c3, c4, c5, e1, e2, e3, e4 = name
correct_guesses = 0
if c1 == gc1:
c1_correct += 1
correct_guesses += 1
if c2 == gc2:
c2_correct += 1
correct_guesses += 1
if c3 == gc3:
c3_correct += 1
correct_guesses += 1
if c4 == gc4:
c4_correct += 1
correct_guesses += 1
if c5 == gc5:
c5_correct += 1
correct_guesses += 1
if ''.join(predictions) == ''.join([c1,c2,c3,c4,c5]):
total_correct += 1
n = correct_guesses_dict.get(correct_guesses, 0) + 1
correct_guesses_dict[correct_guesses] = n
print(f"Prediction for {name}: {''.join(predictions)}")
print(f"correct c1: {c1_correct}")
print(f"correct c2: {c2_correct}")
print(f"correct c3: {c3_correct}")
print(f"correct c4: {c4_correct}")
print(f"correct c5: {c5_correct}")
print(f"correct total: {total_correct}")
print(f"correctly segmented: {10000 - incorrectly_segmented}")
print(correct_guesses_dict)
|
normal
|
{
"blob_id": "c2ddf31bce4a5f3ae2b0d5455bbc9942f92bff40",
"index": 275,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(MODEL_LABELS_FILENAME, 'rb') as f:\n lb = pickle.load(f)\n<mask token>\nfor root, dirs, files in os.walk(CAPTCHA_IMAGE_FOLDER):\n for name in tqdm(files, desc='Solving captchas'):\n kernel = 5, 5\n image = cv2.imread(os.path.join(root, name))\n image = cv2.cvtColor(image, cv2.COLOR_RGBA2GRAY)\n image = cv2.copyMakeBorder(image, 8, 8, 8, 8, cv2.BORDER_CONSTANT,\n None, 255)\n k = np.ones((5, 5), np.float32) / 25\n image = cv2.filter2D(image, -1, k)\n ret, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)\n clear_chunks(image, 0, 50)\n image = cv2.erode(image, kernel, iterations=1)\n contours = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n letter_image_regions = []\n contours = sorted(contours, key=lambda x: cv2.contourArea(x),\n reverse=True)\n contours = contours[:5]\n for contour in contours:\n if cv2.contourArea(contour) < 60:\n continue\n x, y, w, h = cv2.boundingRect(contour)\n if w / h > 1.5:\n half_width = int(w / 2)\n letter_image_regions.append((x, y, half_width, h))\n letter_image_regions.append((x + half_width, y, half_width, h))\n else:\n letter_image_regions.append((x, y, w, h))\n if len(letter_image_regions) != 5:\n incorrectly_segmented += 1\n continue\n print(\n f'Found {len(letter_image_regions)} letter regions instead of 5 , the guess will likely be incorrect'\n )\n letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])\n chars = []\n i = 0\n for x, y, w, h in letter_image_regions:\n letter = image[y - 2:y + h + 2, x - 2:x + w + 2]\n chars.append(letter)\n i += 1\n predictions = []\n for letter in chars:\n letter = resize_to_fit(letter, 20, 20)\n letter = np.expand_dims(letter, axis=2)\n letter = np.expand_dims(letter, axis=0)\n prediction = model.predict(letter)\n letter_text = lb.inverse_transform(prediction)[0]\n predictions.append(letter_text)\n gc1, gc2, gc3, gc4, gc5 = predictions\n c1, c2, c3, c4, c5, e1, e2, e3, e4 = name\n correct_guesses = 0\n if c1 == gc1:\n c1_correct += 1\n correct_guesses += 1\n if c2 == gc2:\n c2_correct += 1\n correct_guesses += 1\n if c3 == gc3:\n c3_correct += 1\n correct_guesses += 1\n if c4 == gc4:\n c4_correct += 1\n correct_guesses += 1\n if c5 == gc5:\n c5_correct += 1\n correct_guesses += 1\n if ''.join(predictions) == ''.join([c1, c2, c3, c4, c5]):\n total_correct += 1\n n = correct_guesses_dict.get(correct_guesses, 0) + 1\n correct_guesses_dict[correct_guesses] = n\n print(f\"Prediction for {name}: {''.join(predictions)}\")\nprint(f'correct c1: {c1_correct}')\nprint(f'correct c2: {c2_correct}')\nprint(f'correct c3: {c3_correct}')\nprint(f'correct c4: {c4_correct}')\nprint(f'correct c5: {c5_correct}')\nprint(f'correct total: {total_correct}')\nprint(f'correctly segmented: {10000 - incorrectly_segmented}')\nprint(correct_guesses_dict)\n",
"step-3": "<mask token>\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n<mask token>\nc1_correct = 0\nc2_correct = 0\nc3_correct = 0\nc4_correct = 0\nc5_correct = 0\ntotal_correct = 0\nincorrectly_segmented = 0\ncorrect_guesses_dict = {}\nMODEL_FILENAME = 'captcha_model.hdf5'\nMODEL_LABELS_FILENAME = 'model_labels.dat'\nCAPTCHA_IMAGE_FOLDER = 'test captchas'\nwith open(MODEL_LABELS_FILENAME, 'rb') as f:\n lb = pickle.load(f)\nmodel = load_model(MODEL_FILENAME)\nfor root, dirs, files in os.walk(CAPTCHA_IMAGE_FOLDER):\n for name in tqdm(files, desc='Solving captchas'):\n kernel = 5, 5\n image = cv2.imread(os.path.join(root, name))\n image = cv2.cvtColor(image, cv2.COLOR_RGBA2GRAY)\n image = cv2.copyMakeBorder(image, 8, 8, 8, 8, cv2.BORDER_CONSTANT,\n None, 255)\n k = np.ones((5, 5), np.float32) / 25\n image = cv2.filter2D(image, -1, k)\n ret, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)\n clear_chunks(image, 0, 50)\n image = cv2.erode(image, kernel, iterations=1)\n contours = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n letter_image_regions = []\n contours = sorted(contours, key=lambda x: cv2.contourArea(x),\n reverse=True)\n contours = contours[:5]\n for contour in contours:\n if cv2.contourArea(contour) < 60:\n continue\n x, y, w, h = cv2.boundingRect(contour)\n if w / h > 1.5:\n half_width = int(w / 2)\n letter_image_regions.append((x, y, half_width, h))\n letter_image_regions.append((x + half_width, y, half_width, h))\n else:\n letter_image_regions.append((x, y, w, h))\n if len(letter_image_regions) != 5:\n incorrectly_segmented += 1\n continue\n print(\n f'Found {len(letter_image_regions)} letter regions instead of 5 , the guess will likely be incorrect'\n )\n letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])\n chars = []\n i = 0\n for x, y, w, h in letter_image_regions:\n letter = image[y - 2:y + h + 2, x - 2:x + w + 2]\n chars.append(letter)\n i += 1\n predictions = []\n for letter in chars:\n letter = resize_to_fit(letter, 20, 20)\n letter = np.expand_dims(letter, axis=2)\n letter = np.expand_dims(letter, axis=0)\n prediction = model.predict(letter)\n letter_text = lb.inverse_transform(prediction)[0]\n predictions.append(letter_text)\n gc1, gc2, gc3, gc4, gc5 = predictions\n c1, c2, c3, c4, c5, e1, e2, e3, e4 = name\n correct_guesses = 0\n if c1 == gc1:\n c1_correct += 1\n correct_guesses += 1\n if c2 == gc2:\n c2_correct += 1\n correct_guesses += 1\n if c3 == gc3:\n c3_correct += 1\n correct_guesses += 1\n if c4 == gc4:\n c4_correct += 1\n correct_guesses += 1\n if c5 == gc5:\n c5_correct += 1\n correct_guesses += 1\n if ''.join(predictions) == ''.join([c1, c2, c3, c4, c5]):\n total_correct += 1\n n = correct_guesses_dict.get(correct_guesses, 0) + 1\n correct_guesses_dict[correct_guesses] = n\n print(f\"Prediction for {name}: {''.join(predictions)}\")\nprint(f'correct c1: {c1_correct}')\nprint(f'correct c2: {c2_correct}')\nprint(f'correct c3: {c3_correct}')\nprint(f'correct c4: {c4_correct}')\nprint(f'correct c5: {c5_correct}')\nprint(f'correct total: {total_correct}')\nprint(f'correctly segmented: {10000 - incorrectly_segmented}')\nprint(correct_guesses_dict)\n",
"step-4": "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nfrom keras.models import load_model\nfrom utils import resize_to_fit, clear_chunks, stack_windows\nfrom imutils import paths\nimport numpy as np\nimport imutils\nimport cv2 as cv2\nimport pickle\nfrom tqdm import tqdm\nc1_correct = 0\nc2_correct = 0\nc3_correct = 0\nc4_correct = 0\nc5_correct = 0\ntotal_correct = 0\nincorrectly_segmented = 0\ncorrect_guesses_dict = {}\nMODEL_FILENAME = 'captcha_model.hdf5'\nMODEL_LABELS_FILENAME = 'model_labels.dat'\nCAPTCHA_IMAGE_FOLDER = 'test captchas'\nwith open(MODEL_LABELS_FILENAME, 'rb') as f:\n lb = pickle.load(f)\nmodel = load_model(MODEL_FILENAME)\nfor root, dirs, files in os.walk(CAPTCHA_IMAGE_FOLDER):\n for name in tqdm(files, desc='Solving captchas'):\n kernel = 5, 5\n image = cv2.imread(os.path.join(root, name))\n image = cv2.cvtColor(image, cv2.COLOR_RGBA2GRAY)\n image = cv2.copyMakeBorder(image, 8, 8, 8, 8, cv2.BORDER_CONSTANT,\n None, 255)\n k = np.ones((5, 5), np.float32) / 25\n image = cv2.filter2D(image, -1, k)\n ret, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)\n clear_chunks(image, 0, 50)\n image = cv2.erode(image, kernel, iterations=1)\n contours = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n letter_image_regions = []\n contours = sorted(contours, key=lambda x: cv2.contourArea(x),\n reverse=True)\n contours = contours[:5]\n for contour in contours:\n if cv2.contourArea(contour) < 60:\n continue\n x, y, w, h = cv2.boundingRect(contour)\n if w / h > 1.5:\n half_width = int(w / 2)\n letter_image_regions.append((x, y, half_width, h))\n letter_image_regions.append((x + half_width, y, half_width, h))\n else:\n letter_image_regions.append((x, y, w, h))\n if len(letter_image_regions) != 5:\n incorrectly_segmented += 1\n continue\n print(\n f'Found {len(letter_image_regions)} letter regions instead of 5 , the guess will likely be incorrect'\n )\n letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])\n chars = []\n i = 0\n for x, y, w, h in letter_image_regions:\n letter = image[y - 2:y + h + 2, x - 2:x + w + 2]\n chars.append(letter)\n i += 1\n predictions = []\n for letter in chars:\n letter = resize_to_fit(letter, 20, 20)\n letter = np.expand_dims(letter, axis=2)\n letter = np.expand_dims(letter, axis=0)\n prediction = model.predict(letter)\n letter_text = lb.inverse_transform(prediction)[0]\n predictions.append(letter_text)\n gc1, gc2, gc3, gc4, gc5 = predictions\n c1, c2, c3, c4, c5, e1, e2, e3, e4 = name\n correct_guesses = 0\n if c1 == gc1:\n c1_correct += 1\n correct_guesses += 1\n if c2 == gc2:\n c2_correct += 1\n correct_guesses += 1\n if c3 == gc3:\n c3_correct += 1\n correct_guesses += 1\n if c4 == gc4:\n c4_correct += 1\n correct_guesses += 1\n if c5 == gc5:\n c5_correct += 1\n correct_guesses += 1\n if ''.join(predictions) == ''.join([c1, c2, c3, c4, c5]):\n total_correct += 1\n n = correct_guesses_dict.get(correct_guesses, 0) + 1\n correct_guesses_dict[correct_guesses] = n\n print(f\"Prediction for {name}: {''.join(predictions)}\")\nprint(f'correct c1: {c1_correct}')\nprint(f'correct c2: {c2_correct}')\nprint(f'correct c3: {c3_correct}')\nprint(f'correct c4: {c4_correct}')\nprint(f'correct c5: {c5_correct}')\nprint(f'correct total: {total_correct}')\nprint(f'correctly segmented: {10000 - incorrectly_segmented}')\nprint(correct_guesses_dict)\n",
"step-5": "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nfrom keras.models import load_model\nfrom utils import resize_to_fit, clear_chunks, stack_windows\nfrom imutils import paths\nimport numpy as np\nimport imutils\nimport cv2 as cv2\nimport pickle\nfrom tqdm import tqdm\n\nc1_correct = 0\nc2_correct = 0\nc3_correct = 0\nc4_correct = 0\nc5_correct = 0\n\ntotal_correct = 0\nincorrectly_segmented = 0\n\ncorrect_guesses_dict = {}\n\nMODEL_FILENAME = \"captcha_model.hdf5\"\nMODEL_LABELS_FILENAME = \"model_labels.dat\"\nCAPTCHA_IMAGE_FOLDER = \"test captchas\"\n\n\n# Load up the model labels (so we can translate model predictions to actual letters)\nwith open(MODEL_LABELS_FILENAME, \"rb\") as f:\n lb = pickle.load(f)\n\n# Load the trained neural network\nmodel = load_model(MODEL_FILENAME)\n\n\nfor root, dirs, files in os.walk(CAPTCHA_IMAGE_FOLDER):\n for name in tqdm(files, desc='Solving captchas'):\n \n kernel = (5,5)\n\n #load image\n image = cv2.imread(os.path.join(root, name))\n image = cv2.cvtColor(image, cv2.COLOR_RGBA2GRAY)\n \n #add padding\n image = cv2.copyMakeBorder(image, 8, 8, 8, 8, cv2.BORDER_CONSTANT, None, 255)\n\n #blur\n k = np.ones((5,5),np.float32)/25\n image = cv2.filter2D(image,-1,k)\n\n # threshhold image\n ret, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)\n\n # clear white dots\n clear_chunks(image,0,50)\n\n # erosion\n image = cv2.erode(image, kernel, iterations=1)\n\n # get contours\n contours = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n #segment letters\n letter_image_regions = [] #(x, y, w ,h)\n \n \n contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)\n contours = contours[:5]\n \n for contour in contours:\n \n if cv2.contourArea(contour) < 60:\n continue\n\n \n (x, y, w, h) = cv2.boundingRect(contour)\n\n if w / h > 1.5:\n half_width = int(w / 2)\n letter_image_regions.append((x, y, half_width, h))\n letter_image_regions.append((x + half_width, y, half_width, h))\n else:\n letter_image_regions.append((x, y, w, h))\n\n if len(letter_image_regions) != 5:\n incorrectly_segmented += 1\n continue\n print(f\"Found {len(letter_image_regions)} letter regions instead of 5 , the guess will likely be incorrect\")\n \n \n letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])\n\n chars = []\n i=0\n for (x,y,w,h) in letter_image_regions:\n letter = image[y-2:y+h+2, x-2:x+w+2]\n chars.append(letter)\n i+=1\n\n predictions = []\n\n for letter in chars:\n # Re-size the letter image to 20x20 pixels to match training data\n letter = resize_to_fit(letter, 20, 20)\n\n # Turn the single image into a 4d list of images to make Keras happy\n letter = np.expand_dims(letter, axis=2)\n letter = np.expand_dims(letter, axis=0)\n\n # Ask the neural network to make a prediction\n prediction = model.predict(letter)\n\n # Convert the one-hot-encoded prediction back to a normal letter\n letter_text = lb.inverse_transform(prediction)[0]\n predictions.append(letter_text)\n\n gc1, gc2, gc3, gc4, gc5 = predictions\n c1, c2, c3, c4, c5, e1, e2, e3, e4 = name \n\n correct_guesses = 0\n\n if c1 == gc1:\n c1_correct += 1\n correct_guesses += 1\n if c2 == gc2:\n c2_correct += 1\n correct_guesses += 1\n if c3 == gc3:\n c3_correct += 1\n correct_guesses += 1\n if c4 == gc4:\n c4_correct += 1\n correct_guesses += 1\n if c5 == gc5:\n c5_correct += 1\n correct_guesses += 1\n\n if ''.join(predictions) == ''.join([c1,c2,c3,c4,c5]):\n total_correct += 1\n\n n = correct_guesses_dict.get(correct_guesses, 0) + 1\n correct_guesses_dict[correct_guesses] = n\n\n print(f\"Prediction for {name}: {''.join(predictions)}\")\n \nprint(f\"correct c1: {c1_correct}\")\nprint(f\"correct c2: {c2_correct}\")\nprint(f\"correct c3: {c3_correct}\")\nprint(f\"correct c4: {c4_correct}\")\nprint(f\"correct c5: {c5_correct}\")\n\nprint(f\"correct total: {total_correct}\")\n\nprint(f\"correctly segmented: {10000 - incorrectly_segmented}\")\n\nprint(correct_guesses_dict)\n \n \n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import re, glob, os
lst = []
def rename(dir, pattern, titlePattern):
for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
#title = title[22:]
#hexa = []
#hexb = []
hexa = title[:2]
hexb = title[2:4]
#title = title[4:]
title = (title[4:] + '_' + str(int(hexa,16)) + '_' + str(int(hexb, 16)))
#print(title)
#lst.append(title)
os.rename(pathAndFilename,
os.path.join(dir, titlePattern % title + ext))
def renamer(files, pattern, replacement):
for pathname in glob.glob(files):
basename= os.path.basename(pathname)
new_filename= re.sub(pattern, replacement, basename)
if new_filename != basename:
os.rename(
pathname,
os.path.join(os.path.dirname(pathname), new_filename))
rename(r'C:\test', r'*.jpeg', r'%s')
#print(lst)
|
normal
|
{
"blob_id": "22aa6042b77c3cfd1f102a0ea22a43223e366d2f",
"index": 1476,
"step-1": "<mask token>\n\n\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n hexa = title[:2]\n hexb = title[2:4]\n title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))\n os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +\n ext))\n\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename = os.path.basename(pathname)\n new_filename = re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(pathname, os.path.join(os.path.dirname(pathname),\n new_filename))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n hexa = title[:2]\n hexb = title[2:4]\n title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))\n os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +\n ext))\n\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename = os.path.basename(pathname)\n new_filename = re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(pathname, os.path.join(os.path.dirname(pathname),\n new_filename))\n\n\nrename('C:\\\\test', '*.jpeg', '%s')\n",
"step-3": "<mask token>\nlst = []\n\n\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n hexa = title[:2]\n hexb = title[2:4]\n title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))\n os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +\n ext))\n\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename = os.path.basename(pathname)\n new_filename = re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(pathname, os.path.join(os.path.dirname(pathname),\n new_filename))\n\n\nrename('C:\\\\test', '*.jpeg', '%s')\n",
"step-4": "import re, glob, os\nlst = []\n\n\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n hexa = title[:2]\n hexb = title[2:4]\n title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))\n os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +\n ext))\n\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename = os.path.basename(pathname)\n new_filename = re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(pathname, os.path.join(os.path.dirname(pathname),\n new_filename))\n\n\nrename('C:\\\\test', '*.jpeg', '%s')\n",
"step-5": "import re, glob, os\nlst = []\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n #title = title[22:]\n #hexa = []\n #hexb = []\n hexa = title[:2]\n hexb = title[2:4]\n #title = title[4:]\n\n title = (title[4:] + '_' + str(int(hexa,16)) + '_' + str(int(hexb, 16)))\n \n #print(title)\n #lst.append(title)\n os.rename(pathAndFilename, \n os.path.join(dir, titlePattern % title + ext))\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename= os.path.basename(pathname)\n new_filename= re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(\n pathname,\n os.path.join(os.path.dirname(pathname), new_filename))\n\n\nrename(r'C:\\test', r'*.jpeg', r'%s')\n#print(lst)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def demo(myAPI):
myAPI.setAttr()
video_capture = cv2.VideoCapture(0)
print('Press q to quit: ')
while True:
ret, frame = video_capture.read()
frame = cv2.resize(frame, (320, 240))
key = cv2.waitKey(100) & 255
if key == ord('q'):
break
elif key == ord('r'):
pass
frame = myAPI.simple_demo(frame)
cv2.imshow('Video', frame)
video_capture.release()
cv2.destroyAllWindows()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def demo(myAPI):
myAPI.setAttr()
video_capture = cv2.VideoCapture(0)
print('Press q to quit: ')
while True:
ret, frame = video_capture.read()
frame = cv2.resize(frame, (320, 240))
key = cv2.waitKey(100) & 255
if key == ord('q'):
break
elif key == ord('r'):
pass
frame = myAPI.simple_demo(frame)
cv2.imshow('Video', frame)
video_capture.release()
cv2.destroyAllWindows()
demo(API.FacePlusPlus())
<|reserved_special_token_1|>
import cv2
import sys
import online as API
def demo(myAPI):
myAPI.setAttr()
video_capture = cv2.VideoCapture(0)
print('Press q to quit: ')
while True:
ret, frame = video_capture.read()
frame = cv2.resize(frame, (320, 240))
key = cv2.waitKey(100) & 255
if key == ord('q'):
break
elif key == ord('r'):
pass
frame = myAPI.simple_demo(frame)
cv2.imshow('Video', frame)
video_capture.release()
cv2.destroyAllWindows()
demo(API.FacePlusPlus())
<|reserved_special_token_1|>
import cv2
import sys
import online as API
def demo(myAPI):
myAPI.setAttr()
video_capture = cv2.VideoCapture(0)
print("Press q to quit: ")
while True:
# Capture frame-by-frame
ret, frame = video_capture.read() #np.array
frame = cv2.resize(frame, (320, 240))
key = cv2.waitKey(100) & 0xFF
if key == ord('q'):
break
elif key == ord('r'):
pass
frame = myAPI.simple_demo(frame)
# Display the resulting frame
cv2.imshow('Video', frame)
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
demo(API.FacePlusPlus())
|
flexible
|
{
"blob_id": "778ef68b5270657f75185b27dc8219b35847afa1",
"index": 5829,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef demo(myAPI):\n myAPI.setAttr()\n video_capture = cv2.VideoCapture(0)\n print('Press q to quit: ')\n while True:\n ret, frame = video_capture.read()\n frame = cv2.resize(frame, (320, 240))\n key = cv2.waitKey(100) & 255\n if key == ord('q'):\n break\n elif key == ord('r'):\n pass\n frame = myAPI.simple_demo(frame)\n cv2.imshow('Video', frame)\n video_capture.release()\n cv2.destroyAllWindows()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef demo(myAPI):\n myAPI.setAttr()\n video_capture = cv2.VideoCapture(0)\n print('Press q to quit: ')\n while True:\n ret, frame = video_capture.read()\n frame = cv2.resize(frame, (320, 240))\n key = cv2.waitKey(100) & 255\n if key == ord('q'):\n break\n elif key == ord('r'):\n pass\n frame = myAPI.simple_demo(frame)\n cv2.imshow('Video', frame)\n video_capture.release()\n cv2.destroyAllWindows()\n\n\ndemo(API.FacePlusPlus())\n",
"step-4": "import cv2\nimport sys\nimport online as API\n\n\ndef demo(myAPI):\n myAPI.setAttr()\n video_capture = cv2.VideoCapture(0)\n print('Press q to quit: ')\n while True:\n ret, frame = video_capture.read()\n frame = cv2.resize(frame, (320, 240))\n key = cv2.waitKey(100) & 255\n if key == ord('q'):\n break\n elif key == ord('r'):\n pass\n frame = myAPI.simple_demo(frame)\n cv2.imshow('Video', frame)\n video_capture.release()\n cv2.destroyAllWindows()\n\n\ndemo(API.FacePlusPlus())\n",
"step-5": "import cv2\nimport sys\nimport online as API\n\ndef demo(myAPI):\n myAPI.setAttr()\n video_capture = cv2.VideoCapture(0)\n print(\"Press q to quit: \")\n while True:\n # Capture frame-by-frame\n ret, frame = video_capture.read() #np.array\n\n frame = cv2.resize(frame, (320, 240))\n\n key = cv2.waitKey(100) & 0xFF\n if key == ord('q'):\n break\n elif key == ord('r'):\n pass\n frame = myAPI.simple_demo(frame)\n\n # Display the resulting frame\n cv2.imshow('Video', frame)\n\n # When everything is done, release the capture\n video_capture.release()\n cv2.destroyAllWindows()\n\n\ndemo(API.FacePlusPlus())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import importlib
class Scrapper:
def get_pos(str_lf, str_rg, text):
left = text.find(str_lf)
right = text.rfind(str_rg)
return left, right
def scrapper(prov):
scrapper = importlib.import_module('scrappers.{}'.format(prov))
return scrapper.scrape()
|
normal
|
{
"blob_id": "67e06b6dddbd3f26295eaff921d1ad4a8b0e5487",
"index": 5580,
"step-1": "<mask token>\n\n\nclass Scrapper:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Scrapper:\n <mask token>\n\n def scrapper(prov):\n scrapper = importlib.import_module('scrappers.{}'.format(prov))\n return scrapper.scrape()\n",
"step-3": "<mask token>\n\n\nclass Scrapper:\n\n def get_pos(str_lf, str_rg, text):\n left = text.find(str_lf)\n right = text.rfind(str_rg)\n return left, right\n\n def scrapper(prov):\n scrapper = importlib.import_module('scrappers.{}'.format(prov))\n return scrapper.scrape()\n",
"step-4": "import importlib\n\n\nclass Scrapper:\n\n def get_pos(str_lf, str_rg, text):\n left = text.find(str_lf)\n right = text.rfind(str_rg)\n return left, right\n\n def scrapper(prov):\n scrapper = importlib.import_module('scrappers.{}'.format(prov))\n return scrapper.scrape()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
#!/usr/bin/env python
'''
fix a time and then draw the instant geopotential (contour) from
/gws/nopw/j04/ncas_generic/users/renql/ERA5_subdaily/ERA5_NH_z_1989.nc,
spatial filtered relative vorticity (shaded) from
~/ERA5-1HR-lev/ERA5_VOR850_1hr_1995_DET/ERA5_VOR850_1hr_1995_DET_T63filt.nc
and identified feature points from
~/ERA5-1HR-lev/ERA5_VOR850_1hr_1995_DET/fft_trs_pos
Loop through the height (850, 500, 250)
20211116
'''
import sys
import subprocess
import xarray as xr
import numpy as np
import pandas as pd
from datetime import datetime
import gc #garbage collector
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import colors
import cartopy.crs as ccrs
import cartopy.feature as cfeat
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import cmaps
from PIL import Image, ImageDraw, ImageSequence
def calc_frames(new_time):
old_time = datetime(new_time.year-1, 11, 30, 23)
days = (new_time - old_time).days
sec = (new_time - old_time).seconds
hours = days * 24 + sec/3600
return int(hours)
def read_point_fixtime(filname,fixtime,flonl,flonr,flats,flatn):
ff = open(filname,"r")
line1 = ff.readline()
line2 = ff.readline()
line3 = ff.readline()
line4 = ff.readline()
plat = []
plon = []
line = ff.readline()
while line:
if line.strip().split(" ")[0] == "TRACK_ID":
num = int(ff.readline().strip().split(" ")[-1])
for nl in range(0,num,1):
data = list(map(float,ff.readline().strip().split(" ")))
if str(int(data[0])) == fixtime and \
data[1]<=flonr and data[1] >= flonl and data[2]<=flatn and data[2]>=flats :
plat.append(data[2])
plon.append(data[1])
line = ff.readline()
ff.close()
print("%s total feature point in %s : %d"%(filname,fixtime,len(plat)))
return plat, plon
lonl=0 #0 #
lonr=150#360#
lats=15 #0 #
latn=70 #90 #
lat_sp = 20
lon_sp = 30
nrow = 3
ncol = 1
bmlo = 0.1
title_font=18
label_font=14
dtime = pd.date_range(start='1995-01-01 00',periods=60, freq='6H',closed=None)
#dtime = pd.date_range(start='1995-01-01 00',end='1995-01-15 00', freq='6H',closed=None)
create_gif = True #False#
nfilt="T63"
lev = [850,500,250]
cnlvl =[[-8 ,1 ]]
cnlvl2 = [30,50,100]
varname = 'z'
path = '/home/users/qd201969/ERA5-1HR-lev/'
datapath = "/gws/nopw/j04/ncas_generic/users/renql/"#t/ERA5_NH_t_1989.nc
figdir = "/home/users/qd201969/uor_track/fig/"
f = xr.open_dataset("%sERA5_subdaily/%s/ERA5_NH_%s_%d.nc"%(datapath,varname,varname,dtime[0].year))
lat = f['latitude'].data
lon = f['longitude'].data
ilon = lon[(lon>=lonl) & (lon<=lonr)]
ilat = lat[(lat>=lats) & (lat<=latn)]
ds = xr.open_dataset("/home/users/qd201969/gtopo30_0.9x1.25.nc")
phis = ds['PHIS'].sel(lon=ilon,lat=ilat,method="nearest").load()
phis = phis/9.8 # transfer from m2/s2 to m
del ds
gc.collect()
nl = 0
fcolors = cmaps.BlueDarkRed18
cnlevels = np.arange(cnlvl[nl][0], cnlvl[nl][0]+cnlvl[nl][1]*(fcolors.N-1), cnlvl[nl][1])
norm = colors.BoundaryNorm(boundaries=cnlevels, ncolors=fcolors.N,extend='both')
params = {'legend.fontsize': label_font,
'axes.labelsize': label_font,
'axes.titlesize':label_font,
'xtick.labelsize':label_font,
'ytick.labelsize':label_font}
plt.rcParams.update(params)
for nt in range(len(dtime)):
fig = plt.figure(figsize=(12,12),dpi=100)
ax = fig.subplots(nrow,ncol, subplot_kw=dict(projection=ccrs.PlateCarree())) #sharex=True, sharey=True
for nl in range(len(lev)):
var = f[varname].sel(time=dtime[nt],level=lev[nl],longitude=ilon,latitude=ilat)
var.data = var.data/9.8
path2 = "%sERA5_VOR%d_1hr_%d_DET/"%(path,lev[nl],dtime[nt].year)
plat, plon = read_point_fixtime(path2+"fft_trs_pos",dtime[nt].strftime('%Y%m%d%H'),lonl,lonr,lats,latn)
fvor = xr.open_dataset("%sERA5_VOR%d_1hr_%d_DET_%sfilt.nc"%(path2,lev[nl],dtime[nt].year,nfilt))
var1 = fvor['var'].sel(time=calc_frames(dtime[nt]),level = 1,lon=ilon,lat=ilat,method="nearest").load()
#fvor = xr.open_dataset("%sERA5_VOR_1h_dec_jan/ERA5_VOR%d_1hr_dec-jan%d_DET.nc"%(datapath,lev[nl],dtime[nt].year))
#var1 = fvor['var138'].sel(time=dtime[nt],lev=float(lev[nl]*100),lat=ilat,lon=ilon,method="nearest").load()
var1.values = var1.values*1e5
axe = ax[nl]
axe.add_feature(cfeat.COASTLINE.with_scale('110m'),edgecolor='black', linewidth=0.8, zorder=1)
axe.set_title("%s %dhPa (%d)"%(dtime[nt].strftime('%Y-%m-%d-%H:00'), lev[nl], len(plat)),fontsize=title_font)
shad = axe.contourf(ilon, ilat, var1, cnlevels,
transform=ccrs.PlateCarree(),cmap=fcolors,extend='both',norm=norm)
cont = axe.contour(ilon, ilat, var, np.arange(1000,15000,cnlvl2[nl]),
transform=ccrs.PlateCarree(), colors='gray', linewidths=1.5)
#pint = axe.plot(plon,plat,color='darkviolet', marker='o', markersize=12, transform=ccrs.PlateCarree())
pint = axe.scatter(plon,plat,10.0**2,color='k', marker='o', transform=ccrs.PlateCarree())
topo = axe.contour(ilon, ilat, phis, [1500,3000],
transform=ccrs.PlateCarree(),colors='black',linewidths=1.2)
axe.set_yticks(np.arange(lats,latn,lat_sp), crs=ccrs.PlateCarree())
axe.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))
axe.set_xticks(np.arange(lonl,lonr,lon_sp), crs=ccrs.PlateCarree())
axe.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))
position = fig.add_axes([0.85, bmlo+0.1, 0.015, 0.7]) #left, bottom, width, height
cb = plt.colorbar(shad, cax=position ,orientation='vertical')#, shrink=.9)
cb.set_label(label='T5~63 Relative Vort (1e5)', size=label_font) #, weight='bold'
plt.tight_layout(rect=(0,bmlo,1,1))
plt.savefig(figdir+"filt_vor_%s.png"%(dtime[nt].strftime('%Y%m%d%H')), bbox_inches='tight',pad_inches=0.01)
if create_gif == True:
figname = figdir+"filt_vor_*.png"
fn_stream = subprocess.check_output("ls "+figname, shell=True).decode('utf-8')
fn_list = fn_stream.split()
print(fn_list[0])
print('filenumber : '+str(len(fn_list)))
gif_name = figname.rsplit("_",1)[0]+".gif"
frames = []
for itm in fn_list:
frame = Image.open(itm)
frames.append(frame)
frames[0].save(gif_name, save_all=True, append_images=frames[1:],\
duration = 1000, loop=0, disposal=1)
subprocess.run('rm -f %s'%(figname),shell=True)
|
normal
|
{
"blob_id": "09a468e11651eb60e0805c151bda270e0ebecca9",
"index": 4853,
"step-1": "<mask token>\n\n\ndef calc_frames(new_time):\n old_time = datetime(new_time.year - 1, 11, 30, 23)\n days = (new_time - old_time).days\n sec = (new_time - old_time).seconds\n hours = days * 24 + sec / 3600\n return int(hours)\n\n\ndef read_point_fixtime(filname, fixtime, flonl, flonr, flats, flatn):\n ff = open(filname, 'r')\n line1 = ff.readline()\n line2 = ff.readline()\n line3 = ff.readline()\n line4 = ff.readline()\n plat = []\n plon = []\n line = ff.readline()\n while line:\n if line.strip().split(' ')[0] == 'TRACK_ID':\n num = int(ff.readline().strip().split(' ')[-1])\n for nl in range(0, num, 1):\n data = list(map(float, ff.readline().strip().split(' ')))\n if str(int(data[0])) == fixtime and data[1] <= flonr and data[1\n ] >= flonl and data[2] <= flatn and data[2] >= flats:\n plat.append(data[2])\n plon.append(data[1])\n line = ff.readline()\n ff.close()\n print('%s total feature point in %s : %d' % (filname, fixtime, len(plat)))\n return plat, plon\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calc_frames(new_time):\n old_time = datetime(new_time.year - 1, 11, 30, 23)\n days = (new_time - old_time).days\n sec = (new_time - old_time).seconds\n hours = days * 24 + sec / 3600\n return int(hours)\n\n\ndef read_point_fixtime(filname, fixtime, flonl, flonr, flats, flatn):\n ff = open(filname, 'r')\n line1 = ff.readline()\n line2 = ff.readline()\n line3 = ff.readline()\n line4 = ff.readline()\n plat = []\n plon = []\n line = ff.readline()\n while line:\n if line.strip().split(' ')[0] == 'TRACK_ID':\n num = int(ff.readline().strip().split(' ')[-1])\n for nl in range(0, num, 1):\n data = list(map(float, ff.readline().strip().split(' ')))\n if str(int(data[0])) == fixtime and data[1] <= flonr and data[1\n ] >= flonl and data[2] <= flatn and data[2] >= flats:\n plat.append(data[2])\n plon.append(data[1])\n line = ff.readline()\n ff.close()\n print('%s total feature point in %s : %d' % (filname, fixtime, len(plat)))\n return plat, plon\n\n\n<mask token>\ndel ds\ngc.collect()\n<mask token>\nplt.rcParams.update(params)\nfor nt in range(len(dtime)):\n fig = plt.figure(figsize=(12, 12), dpi=100)\n ax = fig.subplots(nrow, ncol, subplot_kw=dict(projection=ccrs.\n PlateCarree()))\n for nl in range(len(lev)):\n var = f[varname].sel(time=dtime[nt], level=lev[nl], longitude=ilon,\n latitude=ilat)\n var.data = var.data / 9.8\n path2 = '%sERA5_VOR%d_1hr_%d_DET/' % (path, lev[nl], dtime[nt].year)\n plat, plon = read_point_fixtime(path2 + 'fft_trs_pos', dtime[nt].\n strftime('%Y%m%d%H'), lonl, lonr, lats, latn)\n fvor = xr.open_dataset('%sERA5_VOR%d_1hr_%d_DET_%sfilt.nc' % (path2,\n lev[nl], dtime[nt].year, nfilt))\n var1 = fvor['var'].sel(time=calc_frames(dtime[nt]), level=1, lon=\n ilon, lat=ilat, method='nearest').load()\n var1.values = var1.values * 100000.0\n axe = ax[nl]\n axe.add_feature(cfeat.COASTLINE.with_scale('110m'), edgecolor=\n 'black', linewidth=0.8, zorder=1)\n axe.set_title('%s %dhPa (%d)' % (dtime[nt].strftime(\n '%Y-%m-%d-%H:00'), lev[nl], len(plat)), fontsize=title_font)\n shad = axe.contourf(ilon, ilat, var1, cnlevels, transform=ccrs.\n PlateCarree(), cmap=fcolors, extend='both', norm=norm)\n cont = axe.contour(ilon, ilat, var, np.arange(1000, 15000, cnlvl2[\n nl]), transform=ccrs.PlateCarree(), colors='gray', linewidths=1.5)\n pint = axe.scatter(plon, plat, 10.0 ** 2, color='k', marker='o',\n transform=ccrs.PlateCarree())\n topo = axe.contour(ilon, ilat, phis, [1500, 3000], transform=ccrs.\n PlateCarree(), colors='black', linewidths=1.2)\n axe.set_yticks(np.arange(lats, latn, lat_sp), crs=ccrs.PlateCarree())\n axe.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))\n axe.set_xticks(np.arange(lonl, lonr, lon_sp), crs=ccrs.PlateCarree())\n axe.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))\n position = fig.add_axes([0.85, bmlo + 0.1, 0.015, 0.7])\n cb = plt.colorbar(shad, cax=position, orientation='vertical')\n cb.set_label(label='T5~63 Relative Vort (1e5)', size=label_font)\n plt.tight_layout(rect=(0, bmlo, 1, 1))\n plt.savefig(figdir + 'filt_vor_%s.png' % dtime[nt].strftime('%Y%m%d%H'),\n bbox_inches='tight', pad_inches=0.01)\nif create_gif == True:\n figname = figdir + 'filt_vor_*.png'\n fn_stream = subprocess.check_output('ls ' + figname, shell=True).decode(\n 'utf-8')\n fn_list = fn_stream.split()\n print(fn_list[0])\n print('filenumber : ' + str(len(fn_list)))\n gif_name = figname.rsplit('_', 1)[0] + '.gif'\n frames = []\n for itm in fn_list:\n frame = Image.open(itm)\n frames.append(frame)\n frames[0].save(gif_name, save_all=True, append_images=frames[1:],\n duration=1000, loop=0, disposal=1)\n subprocess.run('rm -f %s' % figname, shell=True)\n",
"step-3": "<mask token>\n\n\ndef calc_frames(new_time):\n old_time = datetime(new_time.year - 1, 11, 30, 23)\n days = (new_time - old_time).days\n sec = (new_time - old_time).seconds\n hours = days * 24 + sec / 3600\n return int(hours)\n\n\ndef read_point_fixtime(filname, fixtime, flonl, flonr, flats, flatn):\n ff = open(filname, 'r')\n line1 = ff.readline()\n line2 = ff.readline()\n line3 = ff.readline()\n line4 = ff.readline()\n plat = []\n plon = []\n line = ff.readline()\n while line:\n if line.strip().split(' ')[0] == 'TRACK_ID':\n num = int(ff.readline().strip().split(' ')[-1])\n for nl in range(0, num, 1):\n data = list(map(float, ff.readline().strip().split(' ')))\n if str(int(data[0])) == fixtime and data[1] <= flonr and data[1\n ] >= flonl and data[2] <= flatn and data[2] >= flats:\n plat.append(data[2])\n plon.append(data[1])\n line = ff.readline()\n ff.close()\n print('%s total feature point in %s : %d' % (filname, fixtime, len(plat)))\n return plat, plon\n\n\nlonl = 0\nlonr = 150\nlats = 15\nlatn = 70\nlat_sp = 20\nlon_sp = 30\nnrow = 3\nncol = 1\nbmlo = 0.1\ntitle_font = 18\nlabel_font = 14\ndtime = pd.date_range(start='1995-01-01 00', periods=60, freq='6H', closed=None\n )\ncreate_gif = True\nnfilt = 'T63'\nlev = [850, 500, 250]\ncnlvl = [[-8, 1]]\ncnlvl2 = [30, 50, 100]\nvarname = 'z'\npath = '/home/users/qd201969/ERA5-1HR-lev/'\ndatapath = '/gws/nopw/j04/ncas_generic/users/renql/'\nfigdir = '/home/users/qd201969/uor_track/fig/'\nf = xr.open_dataset('%sERA5_subdaily/%s/ERA5_NH_%s_%d.nc' % (datapath,\n varname, varname, dtime[0].year))\nlat = f['latitude'].data\nlon = f['longitude'].data\nilon = lon[(lon >= lonl) & (lon <= lonr)]\nilat = lat[(lat >= lats) & (lat <= latn)]\nds = xr.open_dataset('/home/users/qd201969/gtopo30_0.9x1.25.nc')\nphis = ds['PHIS'].sel(lon=ilon, lat=ilat, method='nearest').load()\nphis = phis / 9.8\ndel ds\ngc.collect()\nnl = 0\nfcolors = cmaps.BlueDarkRed18\ncnlevels = np.arange(cnlvl[nl][0], cnlvl[nl][0] + cnlvl[nl][1] * (fcolors.N -\n 1), cnlvl[nl][1])\nnorm = colors.BoundaryNorm(boundaries=cnlevels, ncolors=fcolors.N, extend=\n 'both')\nparams = {'legend.fontsize': label_font, 'axes.labelsize': label_font,\n 'axes.titlesize': label_font, 'xtick.labelsize': label_font,\n 'ytick.labelsize': label_font}\nplt.rcParams.update(params)\nfor nt in range(len(dtime)):\n fig = plt.figure(figsize=(12, 12), dpi=100)\n ax = fig.subplots(nrow, ncol, subplot_kw=dict(projection=ccrs.\n PlateCarree()))\n for nl in range(len(lev)):\n var = f[varname].sel(time=dtime[nt], level=lev[nl], longitude=ilon,\n latitude=ilat)\n var.data = var.data / 9.8\n path2 = '%sERA5_VOR%d_1hr_%d_DET/' % (path, lev[nl], dtime[nt].year)\n plat, plon = read_point_fixtime(path2 + 'fft_trs_pos', dtime[nt].\n strftime('%Y%m%d%H'), lonl, lonr, lats, latn)\n fvor = xr.open_dataset('%sERA5_VOR%d_1hr_%d_DET_%sfilt.nc' % (path2,\n lev[nl], dtime[nt].year, nfilt))\n var1 = fvor['var'].sel(time=calc_frames(dtime[nt]), level=1, lon=\n ilon, lat=ilat, method='nearest').load()\n var1.values = var1.values * 100000.0\n axe = ax[nl]\n axe.add_feature(cfeat.COASTLINE.with_scale('110m'), edgecolor=\n 'black', linewidth=0.8, zorder=1)\n axe.set_title('%s %dhPa (%d)' % (dtime[nt].strftime(\n '%Y-%m-%d-%H:00'), lev[nl], len(plat)), fontsize=title_font)\n shad = axe.contourf(ilon, ilat, var1, cnlevels, transform=ccrs.\n PlateCarree(), cmap=fcolors, extend='both', norm=norm)\n cont = axe.contour(ilon, ilat, var, np.arange(1000, 15000, cnlvl2[\n nl]), transform=ccrs.PlateCarree(), colors='gray', linewidths=1.5)\n pint = axe.scatter(plon, plat, 10.0 ** 2, color='k', marker='o',\n transform=ccrs.PlateCarree())\n topo = axe.contour(ilon, ilat, phis, [1500, 3000], transform=ccrs.\n PlateCarree(), colors='black', linewidths=1.2)\n axe.set_yticks(np.arange(lats, latn, lat_sp), crs=ccrs.PlateCarree())\n axe.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))\n axe.set_xticks(np.arange(lonl, lonr, lon_sp), crs=ccrs.PlateCarree())\n axe.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))\n position = fig.add_axes([0.85, bmlo + 0.1, 0.015, 0.7])\n cb = plt.colorbar(shad, cax=position, orientation='vertical')\n cb.set_label(label='T5~63 Relative Vort (1e5)', size=label_font)\n plt.tight_layout(rect=(0, bmlo, 1, 1))\n plt.savefig(figdir + 'filt_vor_%s.png' % dtime[nt].strftime('%Y%m%d%H'),\n bbox_inches='tight', pad_inches=0.01)\nif create_gif == True:\n figname = figdir + 'filt_vor_*.png'\n fn_stream = subprocess.check_output('ls ' + figname, shell=True).decode(\n 'utf-8')\n fn_list = fn_stream.split()\n print(fn_list[0])\n print('filenumber : ' + str(len(fn_list)))\n gif_name = figname.rsplit('_', 1)[0] + '.gif'\n frames = []\n for itm in fn_list:\n frame = Image.open(itm)\n frames.append(frame)\n frames[0].save(gif_name, save_all=True, append_images=frames[1:],\n duration=1000, loop=0, disposal=1)\n subprocess.run('rm -f %s' % figname, shell=True)\n",
"step-4": "<mask token>\nimport sys\nimport subprocess\nimport xarray as xr\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport gc\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeat\nfrom cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter\nimport cmaps\nfrom PIL import Image, ImageDraw, ImageSequence\n\n\ndef calc_frames(new_time):\n old_time = datetime(new_time.year - 1, 11, 30, 23)\n days = (new_time - old_time).days\n sec = (new_time - old_time).seconds\n hours = days * 24 + sec / 3600\n return int(hours)\n\n\ndef read_point_fixtime(filname, fixtime, flonl, flonr, flats, flatn):\n ff = open(filname, 'r')\n line1 = ff.readline()\n line2 = ff.readline()\n line3 = ff.readline()\n line4 = ff.readline()\n plat = []\n plon = []\n line = ff.readline()\n while line:\n if line.strip().split(' ')[0] == 'TRACK_ID':\n num = int(ff.readline().strip().split(' ')[-1])\n for nl in range(0, num, 1):\n data = list(map(float, ff.readline().strip().split(' ')))\n if str(int(data[0])) == fixtime and data[1] <= flonr and data[1\n ] >= flonl and data[2] <= flatn and data[2] >= flats:\n plat.append(data[2])\n plon.append(data[1])\n line = ff.readline()\n ff.close()\n print('%s total feature point in %s : %d' % (filname, fixtime, len(plat)))\n return plat, plon\n\n\nlonl = 0\nlonr = 150\nlats = 15\nlatn = 70\nlat_sp = 20\nlon_sp = 30\nnrow = 3\nncol = 1\nbmlo = 0.1\ntitle_font = 18\nlabel_font = 14\ndtime = pd.date_range(start='1995-01-01 00', periods=60, freq='6H', closed=None\n )\ncreate_gif = True\nnfilt = 'T63'\nlev = [850, 500, 250]\ncnlvl = [[-8, 1]]\ncnlvl2 = [30, 50, 100]\nvarname = 'z'\npath = '/home/users/qd201969/ERA5-1HR-lev/'\ndatapath = '/gws/nopw/j04/ncas_generic/users/renql/'\nfigdir = '/home/users/qd201969/uor_track/fig/'\nf = xr.open_dataset('%sERA5_subdaily/%s/ERA5_NH_%s_%d.nc' % (datapath,\n varname, varname, dtime[0].year))\nlat = f['latitude'].data\nlon = f['longitude'].data\nilon = lon[(lon >= lonl) & (lon <= lonr)]\nilat = lat[(lat >= lats) & (lat <= latn)]\nds = xr.open_dataset('/home/users/qd201969/gtopo30_0.9x1.25.nc')\nphis = ds['PHIS'].sel(lon=ilon, lat=ilat, method='nearest').load()\nphis = phis / 9.8\ndel ds\ngc.collect()\nnl = 0\nfcolors = cmaps.BlueDarkRed18\ncnlevels = np.arange(cnlvl[nl][0], cnlvl[nl][0] + cnlvl[nl][1] * (fcolors.N -\n 1), cnlvl[nl][1])\nnorm = colors.BoundaryNorm(boundaries=cnlevels, ncolors=fcolors.N, extend=\n 'both')\nparams = {'legend.fontsize': label_font, 'axes.labelsize': label_font,\n 'axes.titlesize': label_font, 'xtick.labelsize': label_font,\n 'ytick.labelsize': label_font}\nplt.rcParams.update(params)\nfor nt in range(len(dtime)):\n fig = plt.figure(figsize=(12, 12), dpi=100)\n ax = fig.subplots(nrow, ncol, subplot_kw=dict(projection=ccrs.\n PlateCarree()))\n for nl in range(len(lev)):\n var = f[varname].sel(time=dtime[nt], level=lev[nl], longitude=ilon,\n latitude=ilat)\n var.data = var.data / 9.8\n path2 = '%sERA5_VOR%d_1hr_%d_DET/' % (path, lev[nl], dtime[nt].year)\n plat, plon = read_point_fixtime(path2 + 'fft_trs_pos', dtime[nt].\n strftime('%Y%m%d%H'), lonl, lonr, lats, latn)\n fvor = xr.open_dataset('%sERA5_VOR%d_1hr_%d_DET_%sfilt.nc' % (path2,\n lev[nl], dtime[nt].year, nfilt))\n var1 = fvor['var'].sel(time=calc_frames(dtime[nt]), level=1, lon=\n ilon, lat=ilat, method='nearest').load()\n var1.values = var1.values * 100000.0\n axe = ax[nl]\n axe.add_feature(cfeat.COASTLINE.with_scale('110m'), edgecolor=\n 'black', linewidth=0.8, zorder=1)\n axe.set_title('%s %dhPa (%d)' % (dtime[nt].strftime(\n '%Y-%m-%d-%H:00'), lev[nl], len(plat)), fontsize=title_font)\n shad = axe.contourf(ilon, ilat, var1, cnlevels, transform=ccrs.\n PlateCarree(), cmap=fcolors, extend='both', norm=norm)\n cont = axe.contour(ilon, ilat, var, np.arange(1000, 15000, cnlvl2[\n nl]), transform=ccrs.PlateCarree(), colors='gray', linewidths=1.5)\n pint = axe.scatter(plon, plat, 10.0 ** 2, color='k', marker='o',\n transform=ccrs.PlateCarree())\n topo = axe.contour(ilon, ilat, phis, [1500, 3000], transform=ccrs.\n PlateCarree(), colors='black', linewidths=1.2)\n axe.set_yticks(np.arange(lats, latn, lat_sp), crs=ccrs.PlateCarree())\n axe.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))\n axe.set_xticks(np.arange(lonl, lonr, lon_sp), crs=ccrs.PlateCarree())\n axe.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))\n position = fig.add_axes([0.85, bmlo + 0.1, 0.015, 0.7])\n cb = plt.colorbar(shad, cax=position, orientation='vertical')\n cb.set_label(label='T5~63 Relative Vort (1e5)', size=label_font)\n plt.tight_layout(rect=(0, bmlo, 1, 1))\n plt.savefig(figdir + 'filt_vor_%s.png' % dtime[nt].strftime('%Y%m%d%H'),\n bbox_inches='tight', pad_inches=0.01)\nif create_gif == True:\n figname = figdir + 'filt_vor_*.png'\n fn_stream = subprocess.check_output('ls ' + figname, shell=True).decode(\n 'utf-8')\n fn_list = fn_stream.split()\n print(fn_list[0])\n print('filenumber : ' + str(len(fn_list)))\n gif_name = figname.rsplit('_', 1)[0] + '.gif'\n frames = []\n for itm in fn_list:\n frame = Image.open(itm)\n frames.append(frame)\n frames[0].save(gif_name, save_all=True, append_images=frames[1:],\n duration=1000, loop=0, disposal=1)\n subprocess.run('rm -f %s' % figname, shell=True)\n",
"step-5": "#!/usr/bin/env python\n'''\nfix a time and then draw the instant geopotential (contour) from \n/gws/nopw/j04/ncas_generic/users/renql/ERA5_subdaily/ERA5_NH_z_1989.nc,\n\nspatial filtered relative vorticity (shaded) from \n~/ERA5-1HR-lev/ERA5_VOR850_1hr_1995_DET/ERA5_VOR850_1hr_1995_DET_T63filt.nc\n\nand identified feature points from \n~/ERA5-1HR-lev/ERA5_VOR850_1hr_1995_DET/fft_trs_pos\n\nLoop through the height (850, 500, 250)\n\n20211116\n'''\nimport sys\nimport subprocess\nimport xarray as xr\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport gc #garbage collector\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeat\nfrom cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter\nimport cmaps\nfrom PIL import Image, ImageDraw, ImageSequence\n\ndef calc_frames(new_time):\n old_time = datetime(new_time.year-1, 11, 30, 23)\n days = (new_time - old_time).days\n sec = (new_time - old_time).seconds\n hours = days * 24 + sec/3600\n return int(hours)\n\ndef read_point_fixtime(filname,fixtime,flonl,flonr,flats,flatn):\n ff = open(filname,\"r\") \n line1 = ff.readline()\n line2 = ff.readline()\n line3 = ff.readline()\n line4 = ff.readline()\n \n plat = []\n plon = []\n line = ff.readline()\n while line:\n if line.strip().split(\" \")[0] == \"TRACK_ID\":\n num = int(ff.readline().strip().split(\" \")[-1])\n for nl in range(0,num,1):\n data = list(map(float,ff.readline().strip().split(\" \")))\n if str(int(data[0])) == fixtime and \\\n data[1]<=flonr and data[1] >= flonl and data[2]<=flatn and data[2]>=flats :\n plat.append(data[2])\n plon.append(data[1])\n line = ff.readline()\n ff.close()\n print(\"%s total feature point in %s : %d\"%(filname,fixtime,len(plat)))\n return plat, plon \n\nlonl=0 #0 #\nlonr=150#360#\nlats=15 #0 #\nlatn=70 #90 #\nlat_sp = 20\nlon_sp = 30\n\nnrow = 3\nncol = 1\nbmlo = 0.1\ntitle_font=18\nlabel_font=14\n\ndtime = pd.date_range(start='1995-01-01 00',periods=60, freq='6H',closed=None)\n#dtime = pd.date_range(start='1995-01-01 00',end='1995-01-15 00', freq='6H',closed=None)\ncreate_gif = True #False#\nnfilt=\"T63\"\nlev = [850,500,250]\ncnlvl =[[-8 ,1 ]]\ncnlvl2 = [30,50,100]\nvarname = 'z'\npath = '/home/users/qd201969/ERA5-1HR-lev/'\ndatapath = \"/gws/nopw/j04/ncas_generic/users/renql/\"#t/ERA5_NH_t_1989.nc\nfigdir = \"/home/users/qd201969/uor_track/fig/\"\n\nf = xr.open_dataset(\"%sERA5_subdaily/%s/ERA5_NH_%s_%d.nc\"%(datapath,varname,varname,dtime[0].year))\nlat = f['latitude'].data\nlon = f['longitude'].data\nilon = lon[(lon>=lonl) & (lon<=lonr)]\nilat = lat[(lat>=lats) & (lat<=latn)]\nds = xr.open_dataset(\"/home/users/qd201969/gtopo30_0.9x1.25.nc\")\nphis = ds['PHIS'].sel(lon=ilon,lat=ilat,method=\"nearest\").load()\nphis = phis/9.8 # transfer from m2/s2 to m\ndel ds\ngc.collect()\n\nnl = 0\nfcolors = cmaps.BlueDarkRed18\ncnlevels = np.arange(cnlvl[nl][0], cnlvl[nl][0]+cnlvl[nl][1]*(fcolors.N-1), cnlvl[nl][1])\nnorm = colors.BoundaryNorm(boundaries=cnlevels, ncolors=fcolors.N,extend='both')\n\nparams = {'legend.fontsize': label_font,\n 'axes.labelsize': label_font,\n 'axes.titlesize':label_font,\n 'xtick.labelsize':label_font,\n 'ytick.labelsize':label_font}\nplt.rcParams.update(params)\n\nfor nt in range(len(dtime)):\n fig = plt.figure(figsize=(12,12),dpi=100)\n ax = fig.subplots(nrow,ncol, subplot_kw=dict(projection=ccrs.PlateCarree())) #sharex=True, sharey=True\n for nl in range(len(lev)):\n var = f[varname].sel(time=dtime[nt],level=lev[nl],longitude=ilon,latitude=ilat)\n var.data = var.data/9.8\n\n path2 = \"%sERA5_VOR%d_1hr_%d_DET/\"%(path,lev[nl],dtime[nt].year)\n plat, plon = read_point_fixtime(path2+\"fft_trs_pos\",dtime[nt].strftime('%Y%m%d%H'),lonl,lonr,lats,latn)\n \n fvor = xr.open_dataset(\"%sERA5_VOR%d_1hr_%d_DET_%sfilt.nc\"%(path2,lev[nl],dtime[nt].year,nfilt))\n var1 = fvor['var'].sel(time=calc_frames(dtime[nt]),level = 1,lon=ilon,lat=ilat,method=\"nearest\").load()\n #fvor = xr.open_dataset(\"%sERA5_VOR_1h_dec_jan/ERA5_VOR%d_1hr_dec-jan%d_DET.nc\"%(datapath,lev[nl],dtime[nt].year))\n #var1 = fvor['var138'].sel(time=dtime[nt],lev=float(lev[nl]*100),lat=ilat,lon=ilon,method=\"nearest\").load()\n var1.values = var1.values*1e5\n\n axe = ax[nl]\n axe.add_feature(cfeat.COASTLINE.with_scale('110m'),edgecolor='black', linewidth=0.8, zorder=1) \n axe.set_title(\"%s %dhPa (%d)\"%(dtime[nt].strftime('%Y-%m-%d-%H:00'), lev[nl], len(plat)),fontsize=title_font)\n\n shad = axe.contourf(ilon, ilat, var1, cnlevels,\n transform=ccrs.PlateCarree(),cmap=fcolors,extend='both',norm=norm)\n \n cont = axe.contour(ilon, ilat, var, np.arange(1000,15000,cnlvl2[nl]), \n transform=ccrs.PlateCarree(), colors='gray', linewidths=1.5)\n \n #pint = axe.plot(plon,plat,color='darkviolet', marker='o', markersize=12, transform=ccrs.PlateCarree())\n pint = axe.scatter(plon,plat,10.0**2,color='k', marker='o', transform=ccrs.PlateCarree())\n\n topo = axe.contour(ilon, ilat, phis, [1500,3000],\n transform=ccrs.PlateCarree(),colors='black',linewidths=1.2)\n\n axe.set_yticks(np.arange(lats,latn,lat_sp), crs=ccrs.PlateCarree())\n axe.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))\n axe.set_xticks(np.arange(lonl,lonr,lon_sp), crs=ccrs.PlateCarree())\n axe.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))\n\n position = fig.add_axes([0.85, bmlo+0.1, 0.015, 0.7]) #left, bottom, width, height\n cb = plt.colorbar(shad, cax=position ,orientation='vertical')#, shrink=.9)\n cb.set_label(label='T5~63 Relative Vort (1e5)', size=label_font) #, weight='bold'\n\n plt.tight_layout(rect=(0,bmlo,1,1))\n plt.savefig(figdir+\"filt_vor_%s.png\"%(dtime[nt].strftime('%Y%m%d%H')), bbox_inches='tight',pad_inches=0.01)\n\nif create_gif == True:\n figname = figdir+\"filt_vor_*.png\"\n fn_stream = subprocess.check_output(\"ls \"+figname, shell=True).decode('utf-8')\n fn_list = fn_stream.split()\n print(fn_list[0])\n print('filenumber : '+str(len(fn_list)))\n gif_name = figname.rsplit(\"_\",1)[0]+\".gif\" \n\n frames = []\n for itm in fn_list:\n frame = Image.open(itm)\n frames.append(frame)\n\n frames[0].save(gif_name, save_all=True, append_images=frames[1:],\\\n duration = 1000, loop=0, disposal=1)\n subprocess.run('rm -f %s'%(figname),shell=True)\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Global configuration."""
# ----------------------------------------------------------------------------
# Paths.
from facegan import ROOT_PATH
result_dir = 'results'
data_dir = 'datasets'
cache_dir = f'{ROOT_PATH}/data/cache'
run_dir_ignore = ['results', 'datasets', 'cache']
# experimental - replace Dense layers with TreeConnect
use_treeconnect = False
treeconnect_threshold = 1024
# ----------------------------------------------------------------------------
vgg16 = 'vgg16_zhang_perceptual.pkl'
model = 'stylegan2-ffhq-config-f.pkl'
networks_urls = {
'european': [
'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',
'generator_model-stylegan2-config-f.pkl'
],
'asian': [
'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',
'generator_yellow-stylegan2-config-f.pkl'
],
'asian beauty': [
'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',
'generator_star-stylegan2-config-f.pkl'
],
'baby': [
'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',
'generator_baby-stylegan2-config-f.pkl'
],
}
|
normal
|
{
"blob_id": "cb904408486ad9ea8cc0c8ff2ec393e480309a57",
"index": 2403,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nresult_dir = 'results'\ndata_dir = 'datasets'\ncache_dir = f'{ROOT_PATH}/data/cache'\nrun_dir_ignore = ['results', 'datasets', 'cache']\nuse_treeconnect = False\ntreeconnect_threshold = 1024\nvgg16 = 'vgg16_zhang_perceptual.pkl'\nmodel = 'stylegan2-ffhq-config-f.pkl'\nnetworks_urls = {'european': [\n 'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',\n 'generator_model-stylegan2-config-f.pkl'], 'asian': [\n 'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',\n 'generator_yellow-stylegan2-config-f.pkl'], 'asian beauty': [\n 'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',\n 'generator_star-stylegan2-config-f.pkl'], 'baby': [\n 'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',\n 'generator_baby-stylegan2-config-f.pkl']}\n",
"step-3": "<mask token>\nfrom facegan import ROOT_PATH\nresult_dir = 'results'\ndata_dir = 'datasets'\ncache_dir = f'{ROOT_PATH}/data/cache'\nrun_dir_ignore = ['results', 'datasets', 'cache']\nuse_treeconnect = False\ntreeconnect_threshold = 1024\nvgg16 = 'vgg16_zhang_perceptual.pkl'\nmodel = 'stylegan2-ffhq-config-f.pkl'\nnetworks_urls = {'european': [\n 'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',\n 'generator_model-stylegan2-config-f.pkl'], 'asian': [\n 'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',\n 'generator_yellow-stylegan2-config-f.pkl'], 'asian beauty': [\n 'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',\n 'generator_star-stylegan2-config-f.pkl'], 'baby': [\n 'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',\n 'generator_baby-stylegan2-config-f.pkl']}\n",
"step-4": "# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# This work is licensed under the Creative Commons Attribution-NonCommercial\n# 4.0 International License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to\n# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.\n\"\"\"Global configuration.\"\"\"\n\n# ----------------------------------------------------------------------------\n# Paths.\nfrom facegan import ROOT_PATH\n\nresult_dir = 'results'\ndata_dir = 'datasets'\ncache_dir = f'{ROOT_PATH}/data/cache'\nrun_dir_ignore = ['results', 'datasets', 'cache']\n\n# experimental - replace Dense layers with TreeConnect\nuse_treeconnect = False\ntreeconnect_threshold = 1024\n\n# ----------------------------------------------------------------------------\n\nvgg16 = 'vgg16_zhang_perceptual.pkl'\nmodel = 'stylegan2-ffhq-config-f.pkl'\n\nnetworks_urls = {\n 'european': [\n 'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',\n 'generator_model-stylegan2-config-f.pkl'\n ],\n 'asian': [\n 'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',\n 'generator_yellow-stylegan2-config-f.pkl'\n ],\n 'asian beauty': [\n 'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',\n 'generator_star-stylegan2-config-f.pkl'\n ],\n 'baby': [\n 'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',\n 'generator_baby-stylegan2-config-f.pkl'\n ],\n}\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
dataset = pd.read_csv('./dataset.csv')
X_train, X_test, y_train, y_test = train_test_split(dataset['text'],
dataset['label'], test_size=0.2, random_state=1, shuffle=True)
baseline_pipeline = Pipeline([('vect', TfidfVectorizer(ngram_range=(1,
3))), ('svc', LinearSVC())])
baseline_pipeline.fit(X_train, y_train)
print(classification_report(y_test, baseline_pipeline.predict(X_test),
digits=4))
<|reserved_special_token_1|>
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
if __name__ == '__main__':
dataset = pd.read_csv('./dataset.csv')
X_train, X_test, y_train, y_test = train_test_split(dataset['text'],
dataset['label'], test_size=0.2, random_state=1, shuffle=True)
baseline_pipeline = Pipeline([('vect', TfidfVectorizer(ngram_range=(1,
3))), ('svc', LinearSVC())])
baseline_pipeline.fit(X_train, y_train)
print(classification_report(y_test, baseline_pipeline.predict(X_test),
digits=4))
<|reserved_special_token_1|>
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
if __name__ == "__main__":
dataset = pd.read_csv('./dataset.csv')
X_train, X_test, y_train, y_test = train_test_split(
dataset["text"], dataset["label"], test_size=0.2, random_state=1, shuffle=True
)
baseline_pipeline = Pipeline(
[("vect", TfidfVectorizer(ngram_range=(1, 3))), ("svc", LinearSVC())]
)
baseline_pipeline.fit(X_train, y_train)
print(classification_report(y_test, baseline_pipeline.predict(X_test), digits=4))
|
flexible
|
{
"blob_id": "f82c961fc1accd362b34a685bac4cc35d98f44ef",
"index": 6371,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n dataset = pd.read_csv('./dataset.csv')\n X_train, X_test, y_train, y_test = train_test_split(dataset['text'],\n dataset['label'], test_size=0.2, random_state=1, shuffle=True)\n baseline_pipeline = Pipeline([('vect', TfidfVectorizer(ngram_range=(1, \n 3))), ('svc', LinearSVC())])\n baseline_pipeline.fit(X_train, y_train)\n print(classification_report(y_test, baseline_pipeline.predict(X_test),\n digits=4))\n",
"step-3": "import pandas as pd\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nif __name__ == '__main__':\n dataset = pd.read_csv('./dataset.csv')\n X_train, X_test, y_train, y_test = train_test_split(dataset['text'],\n dataset['label'], test_size=0.2, random_state=1, shuffle=True)\n baseline_pipeline = Pipeline([('vect', TfidfVectorizer(ngram_range=(1, \n 3))), ('svc', LinearSVC())])\n baseline_pipeline.fit(X_train, y_train)\n print(classification_report(y_test, baseline_pipeline.predict(X_test),\n digits=4))\n",
"step-4": "import pandas as pd\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\n\nif __name__ == \"__main__\":\n dataset = pd.read_csv('./dataset.csv')\n \n X_train, X_test, y_train, y_test = train_test_split(\n dataset[\"text\"], dataset[\"label\"], test_size=0.2, random_state=1, shuffle=True\n )\n\n baseline_pipeline = Pipeline(\n [(\"vect\", TfidfVectorizer(ngram_range=(1, 3))), (\"svc\", LinearSVC())]\n )\n\n baseline_pipeline.fit(X_train, y_train)\n print(classification_report(y_test, baseline_pipeline.predict(X_test), digits=4))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def test_config(app):
assert app.testing
<|reserved_special_token_1|>
# testa se uma aplicacao em modo de teste esta sendo construida
def test_config(app):
assert app.testing
|
flexible
|
{
"blob_id": "96d7963faf720a3dc0d96b55ad65ee7ac83c1818",
"index": 5798,
"step-1": "<mask token>\n",
"step-2": "def test_config(app):\n assert app.testing\n",
"step-3": "# testa se uma aplicacao em modo de teste esta sendo construida\ndef test_config(app):\n assert app.testing\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
class Component:
pass
class Entity:
def __init__(self, id):
self.id = id
self.components = {}
def add_component(self, component):
if type(component) in self.components:
raise Exception("This entity already has a component of that type")
# Since there is only one of each type of component, they are stored by type
self.components[type(component)] = component
def has_component(self, component_type):
return component_type in self.components
def get_component(self, component_type):
return self.components[component_type]
class System:
def __init__(self, *required):
self.required = required
self.entity_ids = set()
def bind_manager(self, manager):
self.manager = manager
def update(self, deltaTime):
self.begin()
for entity_id in self.entity_ids:
entity = self.manager.get_entity_by_id(entity_id)
self.process(entity, deltaTime)
self.end()
# Overridden in the derived class to specify functionality of system
def process(self, entity, deltaTime):
pass
# Can be overridden if you want to do something before the first entity is processed
def begin(self):
pass
# Can be overridden if you want to do something after the last entity is processed
def end(self):
pass
def update_entity_registration(self, entity):
contains = entity.id in self.entity_ids
matches = self.matches(entity)
# Already exists, but no longer matches
if contains and not matches:
self.entity_ids.remove(entity.id)
# Doesn't exist, but does match
elif not contains and matches:
self.entity_ids.add(entity.id)
def matches(self, entity):
for required in self.required:
if not entity.has_component(required):
return False
return True
class Manager:
def __init__(self):
self.entities = {}
self.current_id = 0
self.systems = []
def create_entity(self):
entity = Entity(self.current_id)
self.current_id += 1
self.entities[entity.id] = entity
return entity
def get_entity_by_id(self, id):
return self.entities[id]
# Use this to add components, not the entity method!! Wish there was a way to enforce that in python
def add_component_to_entity(self, entity, component):
entity.add_component(component)
self.update_entity_registration(entity)
def add_system(self, system):
system.bind_manager(self)
self.systems.append(system)
def update(self, deltaTime):
for system in self.systems:
system.update(deltaTime)
def update_entity_registration(self, entity):
for system in self.systems:
system.update_entity_registration(entity)
|
normal
|
{
"blob_id": "14f7f31fa64799cdc08b1363b945da50841d16b5",
"index": 3020,
"step-1": "<mask token>\n\n\nclass System:\n <mask token>\n\n def bind_manager(self, manager):\n self.manager = manager\n <mask token>\n\n def process(self, entity, deltaTime):\n pass\n <mask token>\n <mask token>\n\n def update_entity_registration(self, entity):\n contains = entity.id in self.entity_ids\n matches = self.matches(entity)\n if contains and not matches:\n self.entity_ids.remove(entity.id)\n elif not contains and matches:\n self.entity_ids.add(entity.id)\n\n def matches(self, entity):\n for required in self.required:\n if not entity.has_component(required):\n return False\n return True\n\n\nclass Manager:\n\n def __init__(self):\n self.entities = {}\n self.current_id = 0\n self.systems = []\n\n def create_entity(self):\n entity = Entity(self.current_id)\n self.current_id += 1\n self.entities[entity.id] = entity\n return entity\n\n def get_entity_by_id(self, id):\n return self.entities[id]\n\n def add_component_to_entity(self, entity, component):\n entity.add_component(component)\n self.update_entity_registration(entity)\n\n def add_system(self, system):\n system.bind_manager(self)\n self.systems.append(system)\n\n def update(self, deltaTime):\n for system in self.systems:\n system.update(deltaTime)\n\n def update_entity_registration(self, entity):\n for system in self.systems:\n system.update_entity_registration(entity)\n",
"step-2": "<mask token>\n\n\nclass System:\n <mask token>\n\n def bind_manager(self, manager):\n self.manager = manager\n\n def update(self, deltaTime):\n self.begin()\n for entity_id in self.entity_ids:\n entity = self.manager.get_entity_by_id(entity_id)\n self.process(entity, deltaTime)\n self.end()\n\n def process(self, entity, deltaTime):\n pass\n <mask token>\n <mask token>\n\n def update_entity_registration(self, entity):\n contains = entity.id in self.entity_ids\n matches = self.matches(entity)\n if contains and not matches:\n self.entity_ids.remove(entity.id)\n elif not contains and matches:\n self.entity_ids.add(entity.id)\n\n def matches(self, entity):\n for required in self.required:\n if not entity.has_component(required):\n return False\n return True\n\n\nclass Manager:\n\n def __init__(self):\n self.entities = {}\n self.current_id = 0\n self.systems = []\n\n def create_entity(self):\n entity = Entity(self.current_id)\n self.current_id += 1\n self.entities[entity.id] = entity\n return entity\n\n def get_entity_by_id(self, id):\n return self.entities[id]\n\n def add_component_to_entity(self, entity, component):\n entity.add_component(component)\n self.update_entity_registration(entity)\n\n def add_system(self, system):\n system.bind_manager(self)\n self.systems.append(system)\n\n def update(self, deltaTime):\n for system in self.systems:\n system.update(deltaTime)\n\n def update_entity_registration(self, entity):\n for system in self.systems:\n system.update_entity_registration(entity)\n",
"step-3": "<mask token>\n\n\nclass System:\n\n def __init__(self, *required):\n self.required = required\n self.entity_ids = set()\n\n def bind_manager(self, manager):\n self.manager = manager\n\n def update(self, deltaTime):\n self.begin()\n for entity_id in self.entity_ids:\n entity = self.manager.get_entity_by_id(entity_id)\n self.process(entity, deltaTime)\n self.end()\n\n def process(self, entity, deltaTime):\n pass\n\n def begin(self):\n pass\n\n def end(self):\n pass\n\n def update_entity_registration(self, entity):\n contains = entity.id in self.entity_ids\n matches = self.matches(entity)\n if contains and not matches:\n self.entity_ids.remove(entity.id)\n elif not contains and matches:\n self.entity_ids.add(entity.id)\n\n def matches(self, entity):\n for required in self.required:\n if not entity.has_component(required):\n return False\n return True\n\n\nclass Manager:\n\n def __init__(self):\n self.entities = {}\n self.current_id = 0\n self.systems = []\n\n def create_entity(self):\n entity = Entity(self.current_id)\n self.current_id += 1\n self.entities[entity.id] = entity\n return entity\n\n def get_entity_by_id(self, id):\n return self.entities[id]\n\n def add_component_to_entity(self, entity, component):\n entity.add_component(component)\n self.update_entity_registration(entity)\n\n def add_system(self, system):\n system.bind_manager(self)\n self.systems.append(system)\n\n def update(self, deltaTime):\n for system in self.systems:\n system.update(deltaTime)\n\n def update_entity_registration(self, entity):\n for system in self.systems:\n system.update_entity_registration(entity)\n",
"step-4": "<mask token>\n\n\nclass Entity:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass System:\n\n def __init__(self, *required):\n self.required = required\n self.entity_ids = set()\n\n def bind_manager(self, manager):\n self.manager = manager\n\n def update(self, deltaTime):\n self.begin()\n for entity_id in self.entity_ids:\n entity = self.manager.get_entity_by_id(entity_id)\n self.process(entity, deltaTime)\n self.end()\n\n def process(self, entity, deltaTime):\n pass\n\n def begin(self):\n pass\n\n def end(self):\n pass\n\n def update_entity_registration(self, entity):\n contains = entity.id in self.entity_ids\n matches = self.matches(entity)\n if contains and not matches:\n self.entity_ids.remove(entity.id)\n elif not contains and matches:\n self.entity_ids.add(entity.id)\n\n def matches(self, entity):\n for required in self.required:\n if not entity.has_component(required):\n return False\n return True\n\n\nclass Manager:\n\n def __init__(self):\n self.entities = {}\n self.current_id = 0\n self.systems = []\n\n def create_entity(self):\n entity = Entity(self.current_id)\n self.current_id += 1\n self.entities[entity.id] = entity\n return entity\n\n def get_entity_by_id(self, id):\n return self.entities[id]\n\n def add_component_to_entity(self, entity, component):\n entity.add_component(component)\n self.update_entity_registration(entity)\n\n def add_system(self, system):\n system.bind_manager(self)\n self.systems.append(system)\n\n def update(self, deltaTime):\n for system in self.systems:\n system.update(deltaTime)\n\n def update_entity_registration(self, entity):\n for system in self.systems:\n system.update_entity_registration(entity)\n",
"step-5": "\nclass Component:\n pass\n\nclass Entity:\n\n def __init__(self, id):\n self.id = id\n self.components = {}\n\n def add_component(self, component):\n if type(component) in self.components:\n raise Exception(\"This entity already has a component of that type\")\n\n # Since there is only one of each type of component, they are stored by type\n self.components[type(component)] = component\n\n def has_component(self, component_type):\n return component_type in self.components\n\n def get_component(self, component_type):\n return self.components[component_type]\n\nclass System:\n\n def __init__(self, *required):\n self.required = required\n self.entity_ids = set()\n\n def bind_manager(self, manager):\n self.manager = manager\n \n def update(self, deltaTime):\n self.begin()\n\n for entity_id in self.entity_ids:\n entity = self.manager.get_entity_by_id(entity_id)\n self.process(entity, deltaTime)\n \n self.end()\n\n # Overridden in the derived class to specify functionality of system\n def process(self, entity, deltaTime):\n pass\n\n # Can be overridden if you want to do something before the first entity is processed\n def begin(self):\n pass\n\n # Can be overridden if you want to do something after the last entity is processed\n def end(self):\n pass\n\n def update_entity_registration(self, entity):\n contains = entity.id in self.entity_ids\n matches = self.matches(entity)\n\n # Already exists, but no longer matches\n if contains and not matches:\n self.entity_ids.remove(entity.id)\n # Doesn't exist, but does match\n elif not contains and matches:\n self.entity_ids.add(entity.id)\n \n def matches(self, entity):\n for required in self.required:\n if not entity.has_component(required):\n return False\n\n return True\n\nclass Manager:\n\n def __init__(self):\n self.entities = {}\n self.current_id = 0\n\n self.systems = []\n \n def create_entity(self):\n entity = Entity(self.current_id)\n self.current_id += 1\n\n self.entities[entity.id] = entity\n return entity\n\n def get_entity_by_id(self, id):\n return self.entities[id]\n\n # Use this to add components, not the entity method!! Wish there was a way to enforce that in python\n def add_component_to_entity(self, entity, component):\n entity.add_component(component)\n self.update_entity_registration(entity)\n \n def add_system(self, system):\n system.bind_manager(self)\n self.systems.append(system)\n\n def update(self, deltaTime):\n for system in self.systems:\n system.update(deltaTime)\n\n def update_entity_registration(self, entity):\n for system in self.systems:\n system.update_entity_registration(entity)\n",
"step-ids": [
13,
14,
17,
18,
24
]
}
|
[
13,
14,
17,
18,
24
] |
#!/usr/bin/env python3
""" Greets the Pep Boys.
"""
for name in "Manny", "Moe", "Jack":
print("Hi ya", name + '!')
|
normal
|
{
"blob_id": "81ff77064a299b4fcd456f341ecb40ba5afe3295",
"index": 1714,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor name in ('Manny', 'Moe', 'Jack'):\n print('Hi ya', name + '!')\n",
"step-3": "#!/usr/bin/env python3\n\"\"\" Greets the Pep Boys.\n\"\"\"\n\nfor name in \"Manny\", \"Moe\", \"Jack\":\n print(\"Hi ya\", name + '!')\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Converts text to speech in different accents. Requires pip3 install gTTS
from gtts import gTTS
import os
language_code = """
Language Code
-------- ----
Afrikaans af
Albanian sq
Arabic ar
Belarusian be
Bulgarian bg
Catalan ca
Chinese Simplified zh-CN
Chinese Traditional zh-TW
Croatian hr
Czech cs
Danish da
Dutch nl
English en
Estonian et
Filipino tl
Finnish fi
French fr
Galician gl
German de
Greek el
Hebrew iw
Hindi hi
Hungarian hu
Icelandic is
Indonesian id
Irish ga
Italian it
Japanese ja
Korean ko
Latvian lv
Lithuanian lt
Macedonian mk
Malay ms
Maltese mt
Norwegian no
Persian fa
Polish pl
Portuguese pt
Romanian ro
Russian ru
Serbian sr
Slovak sk
Slovenian sl
Spanish es
Swahili sw
Swedish sv
Thai th
Turkish tr
Ukrainian uk
Vietnamese vi
Welsh cy
Yiddish yi
"""
print("We're going to speak anything you type in a different accent")
mytext = input("Please enter some text: ")
print(language_code)
language = input("Please select the accent: ")
# Passing the text and language to the engine
myobj = gTTS(text=mytext, lang=language, slow=True)
# Saving the converted audio in a mp3 file named texty
myobj.save("texty.mp3")
# It does create the file but doesnt play.
# Also, I wanted it to actually translate to a different language, but all it does is say it in a different accent!
os.system("mpg321 texty.mp3")
|
normal
|
{
"blob_id": "545053bc2b7c8687622d747673f2ad37b978014c",
"index": 3403,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\"We're going to speak anything you type in a different accent\")\n<mask token>\nprint(language_code)\n<mask token>\nmyobj.save('texty.mp3')\nos.system('mpg321 texty.mp3')\n",
"step-3": "<mask token>\nlanguage_code = \"\"\"\nLanguage Code\n-------- ----\nAfrikaans af\nAlbanian sq\nArabic ar\nBelarusian be\nBulgarian bg\nCatalan ca\nChinese Simplified zh-CN\nChinese Traditional zh-TW\nCroatian hr\nCzech cs\nDanish da\nDutch nl\nEnglish en\nEstonian et\nFilipino tl\nFinnish fi\nFrench fr\nGalician gl\nGerman de\nGreek el\nHebrew iw\nHindi hi\nHungarian hu\nIcelandic is\nIndonesian id\nIrish ga\nItalian it\nJapanese ja\nKorean ko\nLatvian lv\nLithuanian lt\nMacedonian mk\nMalay ms\nMaltese mt\nNorwegian no\nPersian fa\nPolish pl\nPortuguese pt\nRomanian ro\nRussian ru\nSerbian sr\nSlovak sk\nSlovenian sl\nSpanish es\nSwahili sw\nSwedish sv\nThai th\nTurkish tr\nUkrainian uk\nVietnamese vi\nWelsh cy\nYiddish yi\n\"\"\"\nprint(\"We're going to speak anything you type in a different accent\")\nmytext = input('Please enter some text: ')\nprint(language_code)\nlanguage = input('Please select the accent: ')\nmyobj = gTTS(text=mytext, lang=language, slow=True)\nmyobj.save('texty.mp3')\nos.system('mpg321 texty.mp3')\n",
"step-4": "from gtts import gTTS\nimport os\nlanguage_code = \"\"\"\nLanguage Code\n-------- ----\nAfrikaans af\nAlbanian sq\nArabic ar\nBelarusian be\nBulgarian bg\nCatalan ca\nChinese Simplified zh-CN\nChinese Traditional zh-TW\nCroatian hr\nCzech cs\nDanish da\nDutch nl\nEnglish en\nEstonian et\nFilipino tl\nFinnish fi\nFrench fr\nGalician gl\nGerman de\nGreek el\nHebrew iw\nHindi hi\nHungarian hu\nIcelandic is\nIndonesian id\nIrish ga\nItalian it\nJapanese ja\nKorean ko\nLatvian lv\nLithuanian lt\nMacedonian mk\nMalay ms\nMaltese mt\nNorwegian no\nPersian fa\nPolish pl\nPortuguese pt\nRomanian ro\nRussian ru\nSerbian sr\nSlovak sk\nSlovenian sl\nSpanish es\nSwahili sw\nSwedish sv\nThai th\nTurkish tr\nUkrainian uk\nVietnamese vi\nWelsh cy\nYiddish yi\n\"\"\"\nprint(\"We're going to speak anything you type in a different accent\")\nmytext = input('Please enter some text: ')\nprint(language_code)\nlanguage = input('Please select the accent: ')\nmyobj = gTTS(text=mytext, lang=language, slow=True)\nmyobj.save('texty.mp3')\nos.system('mpg321 texty.mp3')\n",
"step-5": "# Converts text to speech in different accents. Requires pip3 install gTTS\nfrom gtts import gTTS\nimport os\n\nlanguage_code = \"\"\"\nLanguage Code\n-------- ----\nAfrikaans af\nAlbanian sq\nArabic ar\nBelarusian be\nBulgarian bg\nCatalan ca\nChinese Simplified zh-CN\nChinese Traditional zh-TW\nCroatian hr\nCzech cs\nDanish da\nDutch nl\nEnglish en\nEstonian et\nFilipino tl\nFinnish fi\nFrench fr\nGalician gl\nGerman de\nGreek el\nHebrew iw\nHindi hi\nHungarian hu\nIcelandic is\nIndonesian id\nIrish ga\nItalian it\nJapanese ja\nKorean ko\nLatvian lv\nLithuanian lt\nMacedonian mk\nMalay ms\nMaltese mt\nNorwegian no\nPersian fa\nPolish pl\nPortuguese pt\nRomanian ro\nRussian ru\nSerbian sr\nSlovak sk\nSlovenian sl\nSpanish es\nSwahili sw\nSwedish sv\nThai th\nTurkish tr\nUkrainian uk\nVietnamese vi\nWelsh cy\nYiddish yi\n\"\"\"\n\nprint(\"We're going to speak anything you type in a different accent\")\nmytext = input(\"Please enter some text: \")\nprint(language_code)\nlanguage = input(\"Please select the accent: \")\n\n# Passing the text and language to the engine\nmyobj = gTTS(text=mytext, lang=language, slow=True)\n\n# Saving the converted audio in a mp3 file named texty\nmyobj.save(\"texty.mp3\")\n\n# It does create the file but doesnt play. \n# Also, I wanted it to actually translate to a different language, but all it does is say it in a different accent!\nos.system(\"mpg321 texty.mp3\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def calculate(x):
return x * x
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calculate(x):
return x * x
<|reserved_special_token_0|>
plt.plot(inputs, outputs)
plt.savefig('plot.png')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calculate(x):
return x * x
inputs = [-0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5]
outputs = [calculate(x) for x in inputs]
plt.plot(inputs, outputs)
plt.savefig('plot.png')
<|reserved_special_token_1|>
from matplotlib import pyplot as plt
def calculate(x):
return x * x
inputs = [-0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5]
outputs = [calculate(x) for x in inputs]
plt.plot(inputs, outputs)
plt.savefig('plot.png')
<|reserved_special_token_1|>
from matplotlib import pyplot as plt
# Function for testing
# Maps x => x*x
def calculate(x):
return x * x
inputs = [-0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5]
outputs = [calculate(x) for x in inputs]
plt.plot(inputs, outputs)
plt.savefig("plot.png")
|
flexible
|
{
"blob_id": "1b3891565f776064cfcca02fb22ea65853f7e66f",
"index": 3629,
"step-1": "<mask token>\n\n\ndef calculate(x):\n return x * x\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calculate(x):\n return x * x\n\n\n<mask token>\nplt.plot(inputs, outputs)\nplt.savefig('plot.png')\n",
"step-3": "<mask token>\n\n\ndef calculate(x):\n return x * x\n\n\ninputs = [-0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5]\noutputs = [calculate(x) for x in inputs]\nplt.plot(inputs, outputs)\nplt.savefig('plot.png')\n",
"step-4": "from matplotlib import pyplot as plt\n\n\ndef calculate(x):\n return x * x\n\n\ninputs = [-0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5]\noutputs = [calculate(x) for x in inputs]\nplt.plot(inputs, outputs)\nplt.savefig('plot.png')\n",
"step-5": "from matplotlib import pyplot as plt\n\n# Function for testing\n# Maps x => x*x\ndef calculate(x):\n\treturn x * x\n\n\ninputs = [-0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5]\n\noutputs = [calculate(x) for x in inputs]\n\nplt.plot(inputs, outputs)\nplt.savefig(\"plot.png\")",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.test import TestCase
from student.forms import StudentForm
class ModelTest(TestCase):
def test_expense_form_valid_data(self):
form = StudentForm(data={
'student_id': 500,
'firstName': "Emre",
'lastName': "Tan",
'department': "Panama",
'mathScore': 100,
'physicsScore': 70,
'chemistryScore': 40,
'biologyScore': 10
})
self.assertTrue(form.is_valid())
def test_expense_form_no_data(self):
form = StudentForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 8)
def test_expense_form_invalid_required(self):
form = StudentForm(data={
'student_id': 500,
'firstName': "",
'lastName': "",
'department': "",
'mathScore': 100,
'physicsScore': 70,
'chemistryScore': 40,
'biologyScore': 10
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors, {
'firstName': ['This field is required.'],
'lastName': ['This field is required.'],
'department': ['This field is required.']
})
def test_expense_form_invalid_equal_to_max(self):
form = StudentForm(data={
'student_id': 120000,
'firstName': "Berkay",
'lastName': "Tan",
'department': "Bilisim",
'mathScore': 200,
'physicsScore': 150,
'chemistryScore': 150,
'biologyScore': 101
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 5)
self.assertEqual(form.errors, {
'student_id': ['Ensure this value is less than or equal to 9999.'],
'mathScore': ['Ensure this value is less than or equal to 100.'],
'physicsScore': ['Ensure this value is less than or equal to 100.'],
'chemistryScore': ['Ensure this value is less than or equal to 100.'],
'biologyScore': ['Ensure this value is less than or equal to 100.'],
})
|
normal
|
{
"blob_id": "6dc7c7de972388f3984a1238a2d62e53c60c622e",
"index": 6252,
"step-1": "<mask token>\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',\n 'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertTrue(form.is_valid())\n <mask token>\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={'student_id': 500, 'firstName': '',\n 'lastName': '', 'department': '', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {'firstName': [\n 'This field is required.'], 'lastName': [\n 'This field is required.'], 'department': [\n 'This field is required.']})\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',\n 'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertTrue(form.is_valid())\n\n def test_expense_form_no_data(self):\n form = StudentForm(data={})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 8)\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={'student_id': 500, 'firstName': '',\n 'lastName': '', 'department': '', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {'firstName': [\n 'This field is required.'], 'lastName': [\n 'This field is required.'], 'department': [\n 'This field is required.']})\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',\n 'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertTrue(form.is_valid())\n\n def test_expense_form_no_data(self):\n form = StudentForm(data={})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 8)\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={'student_id': 500, 'firstName': '',\n 'lastName': '', 'department': '', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {'firstName': [\n 'This field is required.'], 'lastName': [\n 'This field is required.'], 'department': [\n 'This field is required.']})\n\n def test_expense_form_invalid_equal_to_max(self):\n form = StudentForm(data={'student_id': 120000, 'firstName':\n 'Berkay', 'lastName': 'Tan', 'department': 'Bilisim',\n 'mathScore': 200, 'physicsScore': 150, 'chemistryScore': 150,\n 'biologyScore': 101})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 5)\n self.assertEqual(form.errors, {'student_id': [\n 'Ensure this value is less than or equal to 9999.'],\n 'mathScore': ['Ensure this value is less than or equal to 100.'\n ], 'physicsScore': [\n 'Ensure this value is less than or equal to 100.'],\n 'chemistryScore': [\n 'Ensure this value is less than or equal to 100.'],\n 'biologyScore': [\n 'Ensure this value is less than or equal to 100.']})\n",
"step-4": "from django.test import TestCase\nfrom student.forms import StudentForm\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={'student_id': 500, 'firstName': 'Emre',\n 'lastName': 'Tan', 'department': 'Panama', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertTrue(form.is_valid())\n\n def test_expense_form_no_data(self):\n form = StudentForm(data={})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 8)\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={'student_id': 500, 'firstName': '',\n 'lastName': '', 'department': '', 'mathScore': 100,\n 'physicsScore': 70, 'chemistryScore': 40, 'biologyScore': 10})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {'firstName': [\n 'This field is required.'], 'lastName': [\n 'This field is required.'], 'department': [\n 'This field is required.']})\n\n def test_expense_form_invalid_equal_to_max(self):\n form = StudentForm(data={'student_id': 120000, 'firstName':\n 'Berkay', 'lastName': 'Tan', 'department': 'Bilisim',\n 'mathScore': 200, 'physicsScore': 150, 'chemistryScore': 150,\n 'biologyScore': 101})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 5)\n self.assertEqual(form.errors, {'student_id': [\n 'Ensure this value is less than or equal to 9999.'],\n 'mathScore': ['Ensure this value is less than or equal to 100.'\n ], 'physicsScore': [\n 'Ensure this value is less than or equal to 100.'],\n 'chemistryScore': [\n 'Ensure this value is less than or equal to 100.'],\n 'biologyScore': [\n 'Ensure this value is less than or equal to 100.']})\n",
"step-5": "from django.test import TestCase\nfrom student.forms import StudentForm\n\n\nclass ModelTest(TestCase):\n\n def test_expense_form_valid_data(self):\n form = StudentForm(data={\n 'student_id': 500,\n 'firstName': \"Emre\",\n 'lastName': \"Tan\",\n 'department': \"Panama\",\n 'mathScore': 100,\n 'physicsScore': 70,\n 'chemistryScore': 40,\n 'biologyScore': 10\n })\n\n self.assertTrue(form.is_valid())\n\n def test_expense_form_no_data(self):\n form = StudentForm(data={})\n\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 8)\n\n def test_expense_form_invalid_required(self):\n form = StudentForm(data={\n 'student_id': 500,\n 'firstName': \"\",\n 'lastName': \"\",\n 'department': \"\",\n 'mathScore': 100,\n 'physicsScore': 70,\n 'chemistryScore': 40,\n 'biologyScore': 10\n })\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 3)\n self.assertEqual(form.errors, {\n 'firstName': ['This field is required.'],\n 'lastName': ['This field is required.'],\n 'department': ['This field is required.']\n })\n\n def test_expense_form_invalid_equal_to_max(self):\n form = StudentForm(data={\n 'student_id': 120000,\n 'firstName': \"Berkay\",\n 'lastName': \"Tan\",\n 'department': \"Bilisim\",\n 'mathScore': 200,\n 'physicsScore': 150,\n 'chemistryScore': 150,\n 'biologyScore': 101\n })\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 5)\n self.assertEqual(form.errors, {\n 'student_id': ['Ensure this value is less than or equal to 9999.'],\n 'mathScore': ['Ensure this value is less than or equal to 100.'],\n 'physicsScore': ['Ensure this value is less than or equal to 100.'],\n 'chemistryScore': ['Ensure this value is less than or equal to 100.'],\n 'biologyScore': ['Ensure this value is less than or equal to 100.'],\n })\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import json
import os
from django.conf import settings
from django.db import models
from jsonfield import JSONField
class Word(models.Model):
value = models.CharField(
max_length=50,
verbose_name='Слово'
)
spelling = models.CharField(
max_length=250,
verbose_name='Транскрипция'
)
raw_od_article = JSONField(
verbose_name='Сырые данные с OD'
)
is_active = models.BooleanField(
default=True,
verbose_name='Используется'
)
def __str__(self):
return self.value
class Meta:
ordering = ["value"]
verbose_name = "Слово"
verbose_name_plural = "Слова"
class Meaning(models.Model):
word = models.ForeignKey(
Word,
on_delete=models.CASCADE,
verbose_name='Слово'
)
value = models.TextField(
verbose_name='Значение'
)
order = models.PositiveIntegerField(
verbose_name="Порядок",
default=0
)
examples = JSONField(
null=True,
blank=True
)
def __str__(self):
if self.value is None:
return ''
return self.value[:20]
class Meta:
ordering = ["order"]
verbose_name = "Доп. значение"
verbose_name_plural = "Доп. значения"
class Pronunciation(models.Model):
word = models.ForeignKey(
Word,
on_delete=models.CASCADE,
verbose_name='Слово'
)
audio = models.FileField(
upload_to='media/audio',
verbose_name='Произношение'
)
raw_od_data = JSONField(
verbose_name='Сырые данные с OD',
blank=True,
null=True
)
is_active = models.BooleanField(
default=True,
verbose_name='Используется'
)
def __str__(self):
return "Произношение {}".format(self.word)
class Meta:
verbose_name = "Произношение"
verbose_name_plural = "Произношения"
class PronunciationMeta(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class WordLearningState(models.Model):
word = models.ForeignKey(
Word,
on_delete=models.CASCADE,
verbose_name='Слово'
)
user = models.ForeignKey(
"auth.User",
on_delete=models.CASCADE,
verbose_name='Пользователь'
)
is_user_know_meaning = models.BooleanField(
default=False,
verbose_name='Выучил значение'
)
is_user_know_pronunciation = models.BooleanField(
default=False,
verbose_name='Выучил произношение'
)
usage_count = models.PositiveIntegerField(
default=0,
verbose_name='Количество показов'
)
last_usage_date = models.DateTimeField(
auto_now_add=True,
verbose_name='Дата последнего показа'
)
preferred_pronunciation = models.PositiveIntegerField(
default=0,
verbose_name='forvo id препочтительного произношения',
)
training_session = models.BooleanField(
default=False,
blank=False,
verbose_name='Сеанс обучения'
)
def _get_pronunciations_meta(self, word_str):
forvo_meta_path = os.path.join(
settings.BASE_DIR, 'media', 'forvo', '{}.json'.format(word_str)
)
if not os.path.exists(forvo_meta_path):
return
with open(forvo_meta_path, 'r') as f:
data = json.load(f)
return data
def _get_sounds(self, word_str):
ret = []
sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds', word_str)
print(sounds_path)
if not os.path.exists(sounds_path):
return []
items = list(os.listdir(sounds_path))
items.sort()
for item in items:
if item.endswith('.mp3'):
ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds', word_str, item))
return ret
def get_pronunciations(self):
word = self.word
forvo_meta = self._get_pronunciations_meta(word.value)
if not forvo_meta:
return []
ret = []
ct = 0
sounds = self._get_sounds(word.value)
slen = len(sounds)
prefered_detected = False
for item in forvo_meta.get('items') or []:
if item.get('code', '') != 'en' or item.get(
'country', '') != 'United States':
continue
if ct > slen-1:
break
sound_file = sounds[ct]
is_best = self.preferred_pronunciation == item['id']
if is_best:
prefered_detected = True
ret.append({
'id': item['id'],
'by': item['username'],
'sex': item['sex'],
'src': sound_file,
'best': is_best
})
ct += 1
if ct == 4:
break
if ret and not prefered_detected:
ret[0]['best'] = True
return ret
def __str__(self):
return "Статистика слова {}".format(self.word)
class Meta:
verbose_name = "Статистика"
verbose_name_plural = "Статистика"
|
normal
|
{
"blob_id": "067e0129b1a9084bbcee28d1973504299b89afdb",
"index": 8911,
"step-1": "<mask token>\n\n\nclass Meaning(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n if self.value is None:\n return ''\n return self.value[:20]\n\n\n class Meta:\n ordering = ['order']\n verbose_name = 'Доп. значение'\n verbose_name_plural = 'Доп. значения'\n\n\nclass Pronunciation(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n audio = models.FileField(upload_to='media/audio', verbose_name=\n 'Произношение')\n raw_od_data = JSONField(verbose_name='Сырые данные с OD', blank=True,\n null=True)\n is_active = models.BooleanField(default=True, verbose_name='Используется')\n\n def __str__(self):\n return 'Произношение {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Произношение'\n verbose_name_plural = 'Произношения'\n\n\nclass PronunciationMeta(object):\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass WordLearningState(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n user = models.ForeignKey('auth.User', on_delete=models.CASCADE,\n verbose_name='Пользователь')\n is_user_know_meaning = models.BooleanField(default=False, verbose_name=\n 'Выучил значение')\n is_user_know_pronunciation = models.BooleanField(default=False,\n verbose_name='Выучил произношение')\n usage_count = models.PositiveIntegerField(default=0, verbose_name=\n 'Количество показов')\n last_usage_date = models.DateTimeField(auto_now_add=True, verbose_name=\n 'Дата последнего показа')\n preferred_pronunciation = models.PositiveIntegerField(default=0,\n verbose_name='forvo id препочтительного произношения')\n training_session = models.BooleanField(default=False, blank=False,\n verbose_name='Сеанс обучения')\n\n def _get_pronunciations_meta(self, word_str):\n forvo_meta_path = os.path.join(settings.BASE_DIR, 'media', 'forvo',\n '{}.json'.format(word_str))\n if not os.path.exists(forvo_meta_path):\n return\n with open(forvo_meta_path, 'r') as f:\n data = json.load(f)\n return data\n\n def _get_sounds(self, word_str):\n ret = []\n sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds',\n word_str)\n print(sounds_path)\n if not os.path.exists(sounds_path):\n return []\n items = list(os.listdir(sounds_path))\n items.sort()\n for item in items:\n if item.endswith('.mp3'):\n ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds',\n word_str, item))\n return ret\n\n def get_pronunciations(self):\n word = self.word\n forvo_meta = self._get_pronunciations_meta(word.value)\n if not forvo_meta:\n return []\n ret = []\n ct = 0\n sounds = self._get_sounds(word.value)\n slen = len(sounds)\n prefered_detected = False\n for item in (forvo_meta.get('items') or []):\n if item.get('code', '') != 'en' or item.get('country', ''\n ) != 'United States':\n continue\n if ct > slen - 1:\n break\n sound_file = sounds[ct]\n is_best = self.preferred_pronunciation == item['id']\n if is_best:\n prefered_detected = True\n ret.append({'id': item['id'], 'by': item['username'], 'sex':\n item['sex'], 'src': sound_file, 'best': is_best})\n ct += 1\n if ct == 4:\n break\n if ret and not prefered_detected:\n ret[0]['best'] = True\n return ret\n\n def __str__(self):\n return 'Статистика слова {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Статистика'\n verbose_name_plural = 'Статистика'\n",
"step-2": "<mask token>\n\n\nclass Meaning(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n value = models.TextField(verbose_name='Значение')\n order = models.PositiveIntegerField(verbose_name='Порядок', default=0)\n examples = JSONField(null=True, blank=True)\n\n def __str__(self):\n if self.value is None:\n return ''\n return self.value[:20]\n\n\n class Meta:\n ordering = ['order']\n verbose_name = 'Доп. значение'\n verbose_name_plural = 'Доп. значения'\n\n\nclass Pronunciation(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n audio = models.FileField(upload_to='media/audio', verbose_name=\n 'Произношение')\n raw_od_data = JSONField(verbose_name='Сырые данные с OD', blank=True,\n null=True)\n is_active = models.BooleanField(default=True, verbose_name='Используется')\n\n def __str__(self):\n return 'Произношение {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Произношение'\n verbose_name_plural = 'Произношения'\n\n\nclass PronunciationMeta(object):\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass WordLearningState(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n user = models.ForeignKey('auth.User', on_delete=models.CASCADE,\n verbose_name='Пользователь')\n is_user_know_meaning = models.BooleanField(default=False, verbose_name=\n 'Выучил значение')\n is_user_know_pronunciation = models.BooleanField(default=False,\n verbose_name='Выучил произношение')\n usage_count = models.PositiveIntegerField(default=0, verbose_name=\n 'Количество показов')\n last_usage_date = models.DateTimeField(auto_now_add=True, verbose_name=\n 'Дата последнего показа')\n preferred_pronunciation = models.PositiveIntegerField(default=0,\n verbose_name='forvo id препочтительного произношения')\n training_session = models.BooleanField(default=False, blank=False,\n verbose_name='Сеанс обучения')\n\n def _get_pronunciations_meta(self, word_str):\n forvo_meta_path = os.path.join(settings.BASE_DIR, 'media', 'forvo',\n '{}.json'.format(word_str))\n if not os.path.exists(forvo_meta_path):\n return\n with open(forvo_meta_path, 'r') as f:\n data = json.load(f)\n return data\n\n def _get_sounds(self, word_str):\n ret = []\n sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds',\n word_str)\n print(sounds_path)\n if not os.path.exists(sounds_path):\n return []\n items = list(os.listdir(sounds_path))\n items.sort()\n for item in items:\n if item.endswith('.mp3'):\n ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds',\n word_str, item))\n return ret\n\n def get_pronunciations(self):\n word = self.word\n forvo_meta = self._get_pronunciations_meta(word.value)\n if not forvo_meta:\n return []\n ret = []\n ct = 0\n sounds = self._get_sounds(word.value)\n slen = len(sounds)\n prefered_detected = False\n for item in (forvo_meta.get('items') or []):\n if item.get('code', '') != 'en' or item.get('country', ''\n ) != 'United States':\n continue\n if ct > slen - 1:\n break\n sound_file = sounds[ct]\n is_best = self.preferred_pronunciation == item['id']\n if is_best:\n prefered_detected = True\n ret.append({'id': item['id'], 'by': item['username'], 'sex':\n item['sex'], 'src': sound_file, 'best': is_best})\n ct += 1\n if ct == 4:\n break\n if ret and not prefered_detected:\n ret[0]['best'] = True\n return ret\n\n def __str__(self):\n return 'Статистика слова {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Статистика'\n verbose_name_plural = 'Статистика'\n",
"step-3": "<mask token>\n\n\nclass Word(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n ordering = ['value']\n verbose_name = 'Слово'\n verbose_name_plural = 'Слова'\n\n\nclass Meaning(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n value = models.TextField(verbose_name='Значение')\n order = models.PositiveIntegerField(verbose_name='Порядок', default=0)\n examples = JSONField(null=True, blank=True)\n\n def __str__(self):\n if self.value is None:\n return ''\n return self.value[:20]\n\n\n class Meta:\n ordering = ['order']\n verbose_name = 'Доп. значение'\n verbose_name_plural = 'Доп. значения'\n\n\nclass Pronunciation(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n audio = models.FileField(upload_to='media/audio', verbose_name=\n 'Произношение')\n raw_od_data = JSONField(verbose_name='Сырые данные с OD', blank=True,\n null=True)\n is_active = models.BooleanField(default=True, verbose_name='Используется')\n\n def __str__(self):\n return 'Произношение {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Произношение'\n verbose_name_plural = 'Произношения'\n\n\nclass PronunciationMeta(object):\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass WordLearningState(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n user = models.ForeignKey('auth.User', on_delete=models.CASCADE,\n verbose_name='Пользователь')\n is_user_know_meaning = models.BooleanField(default=False, verbose_name=\n 'Выучил значение')\n is_user_know_pronunciation = models.BooleanField(default=False,\n verbose_name='Выучил произношение')\n usage_count = models.PositiveIntegerField(default=0, verbose_name=\n 'Количество показов')\n last_usage_date = models.DateTimeField(auto_now_add=True, verbose_name=\n 'Дата последнего показа')\n preferred_pronunciation = models.PositiveIntegerField(default=0,\n verbose_name='forvo id препочтительного произношения')\n training_session = models.BooleanField(default=False, blank=False,\n verbose_name='Сеанс обучения')\n\n def _get_pronunciations_meta(self, word_str):\n forvo_meta_path = os.path.join(settings.BASE_DIR, 'media', 'forvo',\n '{}.json'.format(word_str))\n if not os.path.exists(forvo_meta_path):\n return\n with open(forvo_meta_path, 'r') as f:\n data = json.load(f)\n return data\n\n def _get_sounds(self, word_str):\n ret = []\n sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds',\n word_str)\n print(sounds_path)\n if not os.path.exists(sounds_path):\n return []\n items = list(os.listdir(sounds_path))\n items.sort()\n for item in items:\n if item.endswith('.mp3'):\n ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds',\n word_str, item))\n return ret\n\n def get_pronunciations(self):\n word = self.word\n forvo_meta = self._get_pronunciations_meta(word.value)\n if not forvo_meta:\n return []\n ret = []\n ct = 0\n sounds = self._get_sounds(word.value)\n slen = len(sounds)\n prefered_detected = False\n for item in (forvo_meta.get('items') or []):\n if item.get('code', '') != 'en' or item.get('country', ''\n ) != 'United States':\n continue\n if ct > slen - 1:\n break\n sound_file = sounds[ct]\n is_best = self.preferred_pronunciation == item['id']\n if is_best:\n prefered_detected = True\n ret.append({'id': item['id'], 'by': item['username'], 'sex':\n item['sex'], 'src': sound_file, 'best': is_best})\n ct += 1\n if ct == 4:\n break\n if ret and not prefered_detected:\n ret[0]['best'] = True\n return ret\n\n def __str__(self):\n return 'Статистика слова {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Статистика'\n verbose_name_plural = 'Статистика'\n",
"step-4": "<mask token>\n\n\nclass Word(models.Model):\n value = models.CharField(max_length=50, verbose_name='Слово')\n spelling = models.CharField(max_length=250, verbose_name='Транскрипция')\n raw_od_article = JSONField(verbose_name='Сырые данные с OD')\n is_active = models.BooleanField(default=True, verbose_name='Используется')\n\n def __str__(self):\n return self.value\n\n\n class Meta:\n ordering = ['value']\n verbose_name = 'Слово'\n verbose_name_plural = 'Слова'\n\n\nclass Meaning(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n value = models.TextField(verbose_name='Значение')\n order = models.PositiveIntegerField(verbose_name='Порядок', default=0)\n examples = JSONField(null=True, blank=True)\n\n def __str__(self):\n if self.value is None:\n return ''\n return self.value[:20]\n\n\n class Meta:\n ordering = ['order']\n verbose_name = 'Доп. значение'\n verbose_name_plural = 'Доп. значения'\n\n\nclass Pronunciation(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n audio = models.FileField(upload_to='media/audio', verbose_name=\n 'Произношение')\n raw_od_data = JSONField(verbose_name='Сырые данные с OD', blank=True,\n null=True)\n is_active = models.BooleanField(default=True, verbose_name='Используется')\n\n def __str__(self):\n return 'Произношение {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Произношение'\n verbose_name_plural = 'Произношения'\n\n\nclass PronunciationMeta(object):\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass WordLearningState(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n user = models.ForeignKey('auth.User', on_delete=models.CASCADE,\n verbose_name='Пользователь')\n is_user_know_meaning = models.BooleanField(default=False, verbose_name=\n 'Выучил значение')\n is_user_know_pronunciation = models.BooleanField(default=False,\n verbose_name='Выучил произношение')\n usage_count = models.PositiveIntegerField(default=0, verbose_name=\n 'Количество показов')\n last_usage_date = models.DateTimeField(auto_now_add=True, verbose_name=\n 'Дата последнего показа')\n preferred_pronunciation = models.PositiveIntegerField(default=0,\n verbose_name='forvo id препочтительного произношения')\n training_session = models.BooleanField(default=False, blank=False,\n verbose_name='Сеанс обучения')\n\n def _get_pronunciations_meta(self, word_str):\n forvo_meta_path = os.path.join(settings.BASE_DIR, 'media', 'forvo',\n '{}.json'.format(word_str))\n if not os.path.exists(forvo_meta_path):\n return\n with open(forvo_meta_path, 'r') as f:\n data = json.load(f)\n return data\n\n def _get_sounds(self, word_str):\n ret = []\n sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds',\n word_str)\n print(sounds_path)\n if not os.path.exists(sounds_path):\n return []\n items = list(os.listdir(sounds_path))\n items.sort()\n for item in items:\n if item.endswith('.mp3'):\n ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds',\n word_str, item))\n return ret\n\n def get_pronunciations(self):\n word = self.word\n forvo_meta = self._get_pronunciations_meta(word.value)\n if not forvo_meta:\n return []\n ret = []\n ct = 0\n sounds = self._get_sounds(word.value)\n slen = len(sounds)\n prefered_detected = False\n for item in (forvo_meta.get('items') or []):\n if item.get('code', '') != 'en' or item.get('country', ''\n ) != 'United States':\n continue\n if ct > slen - 1:\n break\n sound_file = sounds[ct]\n is_best = self.preferred_pronunciation == item['id']\n if is_best:\n prefered_detected = True\n ret.append({'id': item['id'], 'by': item['username'], 'sex':\n item['sex'], 'src': sound_file, 'best': is_best})\n ct += 1\n if ct == 4:\n break\n if ret and not prefered_detected:\n ret[0]['best'] = True\n return ret\n\n def __str__(self):\n return 'Статистика слова {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Статистика'\n verbose_name_plural = 'Статистика'\n",
"step-5": "import json\nimport os\n\nfrom django.conf import settings\nfrom django.db import models\nfrom jsonfield import JSONField\n\n\nclass Word(models.Model):\n value = models.CharField(\n max_length=50,\n verbose_name='Слово'\n )\n spelling = models.CharField(\n max_length=250,\n verbose_name='Транскрипция'\n )\n raw_od_article = JSONField(\n verbose_name='Сырые данные с OD'\n )\n\n is_active = models.BooleanField(\n default=True,\n verbose_name='Используется'\n )\n\n def __str__(self):\n return self.value\n\n class Meta:\n ordering = [\"value\"]\n verbose_name = \"Слово\"\n verbose_name_plural = \"Слова\"\n\n\nclass Meaning(models.Model):\n word = models.ForeignKey(\n Word,\n on_delete=models.CASCADE,\n verbose_name='Слово'\n )\n value = models.TextField(\n verbose_name='Значение'\n )\n order = models.PositiveIntegerField(\n verbose_name=\"Порядок\",\n default=0\n )\n examples = JSONField(\n null=True,\n blank=True\n )\n\n def __str__(self):\n if self.value is None:\n return ''\n return self.value[:20]\n\n class Meta:\n ordering = [\"order\"]\n verbose_name = \"Доп. значение\"\n verbose_name_plural = \"Доп. значения\"\n\n\nclass Pronunciation(models.Model):\n word = models.ForeignKey(\n Word,\n on_delete=models.CASCADE,\n verbose_name='Слово'\n )\n audio = models.FileField(\n upload_to='media/audio',\n verbose_name='Произношение'\n )\n raw_od_data = JSONField(\n verbose_name='Сырые данные с OD',\n blank=True,\n null=True\n )\n is_active = models.BooleanField(\n default=True,\n verbose_name='Используется'\n )\n\n def __str__(self):\n return \"Произношение {}\".format(self.word)\n\n class Meta:\n verbose_name = \"Произношение\"\n verbose_name_plural = \"Произношения\"\n\n\nclass PronunciationMeta(object):\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\nclass WordLearningState(models.Model):\n word = models.ForeignKey(\n Word,\n on_delete=models.CASCADE,\n verbose_name='Слово'\n )\n user = models.ForeignKey(\n \"auth.User\",\n on_delete=models.CASCADE,\n verbose_name='Пользователь'\n )\n is_user_know_meaning = models.BooleanField(\n default=False,\n verbose_name='Выучил значение'\n )\n is_user_know_pronunciation = models.BooleanField(\n default=False,\n verbose_name='Выучил произношение'\n )\n usage_count = models.PositiveIntegerField(\n default=0,\n verbose_name='Количество показов'\n )\n last_usage_date = models.DateTimeField(\n auto_now_add=True,\n verbose_name='Дата последнего показа'\n )\n preferred_pronunciation = models.PositiveIntegerField(\n default=0,\n verbose_name='forvo id препочтительного произношения',\n )\n training_session = models.BooleanField(\n default=False,\n blank=False,\n verbose_name='Сеанс обучения'\n )\n\n def _get_pronunciations_meta(self, word_str):\n forvo_meta_path = os.path.join(\n settings.BASE_DIR, 'media', 'forvo', '{}.json'.format(word_str)\n )\n if not os.path.exists(forvo_meta_path):\n return\n with open(forvo_meta_path, 'r') as f:\n data = json.load(f)\n return data\n\n def _get_sounds(self, word_str):\n ret = []\n sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds', word_str)\n print(sounds_path)\n if not os.path.exists(sounds_path):\n return []\n items = list(os.listdir(sounds_path))\n items.sort()\n for item in items:\n if item.endswith('.mp3'):\n ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds', word_str, item))\n return ret\n\n def get_pronunciations(self):\n word = self.word\n forvo_meta = self._get_pronunciations_meta(word.value)\n if not forvo_meta:\n return []\n\n ret = []\n ct = 0\n sounds = self._get_sounds(word.value)\n slen = len(sounds)\n prefered_detected = False\n for item in forvo_meta.get('items') or []:\n\n if item.get('code', '') != 'en' or item.get(\n 'country', '') != 'United States':\n continue\n\n if ct > slen-1:\n break\n\n sound_file = sounds[ct]\n\n is_best = self.preferred_pronunciation == item['id']\n\n if is_best:\n prefered_detected = True\n\n ret.append({\n 'id': item['id'],\n 'by': item['username'],\n 'sex': item['sex'],\n 'src': sound_file,\n 'best': is_best\n })\n\n ct += 1\n if ct == 4:\n break\n if ret and not prefered_detected:\n ret[0]['best'] = True\n return ret\n\n def __str__(self):\n return \"Статистика слова {}\".format(self.word)\n\n class Meta:\n verbose_name = \"Статистика\"\n verbose_name_plural = \"Статистика\"\n",
"step-ids": [
13,
14,
15,
17,
19
]
}
|
[
13,
14,
15,
17,
19
] |
def filtra_acima(wires, origem):
return [wire for wire in wires if wire[0] > origem ]
def filtra_abaixo(wires, destino):
return [wire for wire in wires if wire[1] < destino ]
def calculate(wires):
count = 0
for i in xrange(len(wires)):
wires_acima = filtra_acima(wires, wires[i][0])
wires_abaixo = filtra_abaixo(wires_acima, wires[i][1])
count += len(wires_abaixo)
return count
#print calculate([(1,3), (2,5), (4,1), (6,7)])
#print calculate([(1,10), (5,5), (7,7)])
#print calculate([(1,1), (2,2)])
def read_input(n):
wires = []
for i in xrange(n):
o, d = map(int, raw_input().split())
wires.append( (o,d) )
return wires
for case_number in xrange(int(raw_input())):
n, = map(int, raw_input().split())
wires = read_input(n)
result = calculate(wires)
print 'Case #%d: %s' % (case_number+1, result)
|
normal
|
{
"blob_id": "fa8d603fbea287161d31499f96a7fe7e56e8eaa1",
"index": 129,
"step-1": "def filtra_acima(wires, origem):\n return [wire for wire in wires if wire[0] > origem ]\n\ndef filtra_abaixo(wires, destino):\n return [wire for wire in wires if wire[1] < destino ]\n\ndef calculate(wires):\n count = 0\n for i in xrange(len(wires)):\n wires_acima = filtra_acima(wires, wires[i][0])\n wires_abaixo = filtra_abaixo(wires_acima, wires[i][1])\n \n count += len(wires_abaixo)\n \n return count\n \n#print calculate([(1,3), (2,5), (4,1), (6,7)])\n#print calculate([(1,10), (5,5), (7,7)])\n#print calculate([(1,1), (2,2)])\n\ndef read_input(n):\n wires = []\n for i in xrange(n):\n o, d = map(int, raw_input().split())\n wires.append( (o,d) )\n \n return wires\n\nfor case_number in xrange(int(raw_input())):\n n, = map(int, raw_input().split())\n wires = read_input(n)\n result = calculate(wires)\n print 'Case #%d: %s' % (case_number+1, result)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class LatestBlessedModelStrategy(resolver.ResolverStrategy):
<|reserved_special_token_0|>
def _resolve(self, input_dict: Dict[str, List[types.Artifact]],
model_channel_key: str, model_blessing_channel_key: str):
all_models = input_dict[model_channel_key]
all_models.sort(key=lambda a: a.id, reverse=True)
all_model_blessings = input_dict[model_blessing_channel_key]
all_blessed_model_ids = {a.get_int_custom_property(
_CURRENT_MODEL_ID): a for a in all_model_blessings if a.
get_int_custom_property(_BLESSED) == 1}
result = {model_channel_key: [], model_blessing_channel_key: []}
for model in all_models:
if model.id in all_blessed_model_ids:
result[model_channel_key] = [model]
model_blessing = all_blessed_model_ids[model.id]
result[model_blessing_channel_key] = [model_blessing]
break
return result
@doc_controls.do_not_generate_docs
def resolve_artifacts(self, store: mlmd.MetadataStore, input_dict: Dict
[str, List[types.Artifact]]) ->Optional[Dict[str, List[types.Artifact]]
]:
"""Resolves artifacts from channels by querying MLMD.
Args:
store: An MLMD MetadataStore object.
input_dict: The input_dict to resolve from.
Returns:
The latest blessed Model and its corresponding ModelBlessing, respectively
in the same input channel they were contained to.
Raises:
RuntimeError: if input_dict contains unsupported artifact types.
"""
model_channel_key = None
model_blessing_channel_key = None
assert len(input_dict) == 2, 'Expecting 2 input Channels'
for k, artifact_list in input_dict.items():
if not artifact_list:
return {key: [] for key in input_dict}
artifact = artifact_list[0]
if issubclass(type(artifact), standard_artifacts.Model):
model_channel_key = k
elif issubclass(type(artifact), standard_artifacts.ModelBlessing):
model_blessing_channel_key = k
else:
raise RuntimeError(
'Only expecting Model or ModelBlessing, got %s' %
artifact.TYPE_NAME)
assert model_channel_key is not None, 'Expecting Model as input'
assert model_blessing_channel_key is not None, 'Expecting ModelBlessing as input'
result = self._resolve(input_dict, model_channel_key,
model_blessing_channel_key)
return result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LatestBlessedModelStrategy(resolver.ResolverStrategy):
"""LatestBlessedModelStrategy resolves the latest blessed Model artifact.
Note that this ResolverStrategy is experimental and is subject to change in
terms of both interface and implementation.
Don't construct LatestBlessedModelStrategy directly, example usage:
```
model_resolver = Resolver(
strategy_class=LatestBlessedModelStrategy,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing),
).with_id('latest_blessed_model_resolver')
model_resolver.outputs['model']
```
"""
def _resolve(self, input_dict: Dict[str, List[types.Artifact]],
model_channel_key: str, model_blessing_channel_key: str):
all_models = input_dict[model_channel_key]
all_models.sort(key=lambda a: a.id, reverse=True)
all_model_blessings = input_dict[model_blessing_channel_key]
all_blessed_model_ids = {a.get_int_custom_property(
_CURRENT_MODEL_ID): a for a in all_model_blessings if a.
get_int_custom_property(_BLESSED) == 1}
result = {model_channel_key: [], model_blessing_channel_key: []}
for model in all_models:
if model.id in all_blessed_model_ids:
result[model_channel_key] = [model]
model_blessing = all_blessed_model_ids[model.id]
result[model_blessing_channel_key] = [model_blessing]
break
return result
@doc_controls.do_not_generate_docs
def resolve_artifacts(self, store: mlmd.MetadataStore, input_dict: Dict
[str, List[types.Artifact]]) ->Optional[Dict[str, List[types.Artifact]]
]:
"""Resolves artifacts from channels by querying MLMD.
Args:
store: An MLMD MetadataStore object.
input_dict: The input_dict to resolve from.
Returns:
The latest blessed Model and its corresponding ModelBlessing, respectively
in the same input channel they were contained to.
Raises:
RuntimeError: if input_dict contains unsupported artifact types.
"""
model_channel_key = None
model_blessing_channel_key = None
assert len(input_dict) == 2, 'Expecting 2 input Channels'
for k, artifact_list in input_dict.items():
if not artifact_list:
return {key: [] for key in input_dict}
artifact = artifact_list[0]
if issubclass(type(artifact), standard_artifacts.Model):
model_channel_key = k
elif issubclass(type(artifact), standard_artifacts.ModelBlessing):
model_blessing_channel_key = k
else:
raise RuntimeError(
'Only expecting Model or ModelBlessing, got %s' %
artifact.TYPE_NAME)
assert model_channel_key is not None, 'Expecting Model as input'
assert model_blessing_channel_key is not None, 'Expecting ModelBlessing as input'
result = self._resolve(input_dict, model_channel_key,
model_blessing_channel_key)
return result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
from tfx.components.evaluator import constants as eval_consts
_CURRENT_MODEL_ID = eval_consts.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY
_BLESSED = eval_consts.ARTIFACT_PROPERTY_BLESSED_KEY
except ImportError:
_CURRENT_MODEL_ID = 'current_model_id'
_BLESSED = 'blessed'
class LatestBlessedModelStrategy(resolver.ResolverStrategy):
"""LatestBlessedModelStrategy resolves the latest blessed Model artifact.
Note that this ResolverStrategy is experimental and is subject to change in
terms of both interface and implementation.
Don't construct LatestBlessedModelStrategy directly, example usage:
```
model_resolver = Resolver(
strategy_class=LatestBlessedModelStrategy,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing),
).with_id('latest_blessed_model_resolver')
model_resolver.outputs['model']
```
"""
def _resolve(self, input_dict: Dict[str, List[types.Artifact]],
model_channel_key: str, model_blessing_channel_key: str):
all_models = input_dict[model_channel_key]
all_models.sort(key=lambda a: a.id, reverse=True)
all_model_blessings = input_dict[model_blessing_channel_key]
all_blessed_model_ids = {a.get_int_custom_property(
_CURRENT_MODEL_ID): a for a in all_model_blessings if a.
get_int_custom_property(_BLESSED) == 1}
result = {model_channel_key: [], model_blessing_channel_key: []}
for model in all_models:
if model.id in all_blessed_model_ids:
result[model_channel_key] = [model]
model_blessing = all_blessed_model_ids[model.id]
result[model_blessing_channel_key] = [model_blessing]
break
return result
@doc_controls.do_not_generate_docs
def resolve_artifacts(self, store: mlmd.MetadataStore, input_dict: Dict
[str, List[types.Artifact]]) ->Optional[Dict[str, List[types.Artifact]]
]:
"""Resolves artifacts from channels by querying MLMD.
Args:
store: An MLMD MetadataStore object.
input_dict: The input_dict to resolve from.
Returns:
The latest blessed Model and its corresponding ModelBlessing, respectively
in the same input channel they were contained to.
Raises:
RuntimeError: if input_dict contains unsupported artifact types.
"""
model_channel_key = None
model_blessing_channel_key = None
assert len(input_dict) == 2, 'Expecting 2 input Channels'
for k, artifact_list in input_dict.items():
if not artifact_list:
return {key: [] for key in input_dict}
artifact = artifact_list[0]
if issubclass(type(artifact), standard_artifacts.Model):
model_channel_key = k
elif issubclass(type(artifact), standard_artifacts.ModelBlessing):
model_blessing_channel_key = k
else:
raise RuntimeError(
'Only expecting Model or ModelBlessing, got %s' %
artifact.TYPE_NAME)
assert model_channel_key is not None, 'Expecting Model as input'
assert model_blessing_channel_key is not None, 'Expecting ModelBlessing as input'
result = self._resolve(input_dict, model_channel_key,
model_blessing_channel_key)
return result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from typing import Dict, List, Optional
from tfx import types
from tfx.dsl.components.common import resolver
from tfx.types import standard_artifacts
from tfx.utils import doc_controls
import ml_metadata as mlmd
try:
from tfx.components.evaluator import constants as eval_consts
_CURRENT_MODEL_ID = eval_consts.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY
_BLESSED = eval_consts.ARTIFACT_PROPERTY_BLESSED_KEY
except ImportError:
_CURRENT_MODEL_ID = 'current_model_id'
_BLESSED = 'blessed'
class LatestBlessedModelStrategy(resolver.ResolverStrategy):
"""LatestBlessedModelStrategy resolves the latest blessed Model artifact.
Note that this ResolverStrategy is experimental and is subject to change in
terms of both interface and implementation.
Don't construct LatestBlessedModelStrategy directly, example usage:
```
model_resolver = Resolver(
strategy_class=LatestBlessedModelStrategy,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing),
).with_id('latest_blessed_model_resolver')
model_resolver.outputs['model']
```
"""
def _resolve(self, input_dict: Dict[str, List[types.Artifact]],
model_channel_key: str, model_blessing_channel_key: str):
all_models = input_dict[model_channel_key]
all_models.sort(key=lambda a: a.id, reverse=True)
all_model_blessings = input_dict[model_blessing_channel_key]
all_blessed_model_ids = {a.get_int_custom_property(
_CURRENT_MODEL_ID): a for a in all_model_blessings if a.
get_int_custom_property(_BLESSED) == 1}
result = {model_channel_key: [], model_blessing_channel_key: []}
for model in all_models:
if model.id in all_blessed_model_ids:
result[model_channel_key] = [model]
model_blessing = all_blessed_model_ids[model.id]
result[model_blessing_channel_key] = [model_blessing]
break
return result
@doc_controls.do_not_generate_docs
def resolve_artifacts(self, store: mlmd.MetadataStore, input_dict: Dict
[str, List[types.Artifact]]) ->Optional[Dict[str, List[types.Artifact]]
]:
"""Resolves artifacts from channels by querying MLMD.
Args:
store: An MLMD MetadataStore object.
input_dict: The input_dict to resolve from.
Returns:
The latest blessed Model and its corresponding ModelBlessing, respectively
in the same input channel they were contained to.
Raises:
RuntimeError: if input_dict contains unsupported artifact types.
"""
model_channel_key = None
model_blessing_channel_key = None
assert len(input_dict) == 2, 'Expecting 2 input Channels'
for k, artifact_list in input_dict.items():
if not artifact_list:
return {key: [] for key in input_dict}
artifact = artifact_list[0]
if issubclass(type(artifact), standard_artifacts.Model):
model_channel_key = k
elif issubclass(type(artifact), standard_artifacts.ModelBlessing):
model_blessing_channel_key = k
else:
raise RuntimeError(
'Only expecting Model or ModelBlessing, got %s' %
artifact.TYPE_NAME)
assert model_channel_key is not None, 'Expecting Model as input'
assert model_blessing_channel_key is not None, 'Expecting ModelBlessing as input'
result = self._resolve(input_dict, model_channel_key,
model_blessing_channel_key)
return result
<|reserved_special_token_1|>
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental Resolver for getting the latest artifact."""
from typing import Dict, List, Optional
from tfx import types
from tfx.dsl.components.common import resolver
from tfx.types import standard_artifacts
from tfx.utils import doc_controls
import ml_metadata as mlmd
try:
from tfx.components.evaluator import constants as eval_consts # pylint: disable=g-import-not-at-top
_CURRENT_MODEL_ID = eval_consts.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY
_BLESSED = eval_consts.ARTIFACT_PROPERTY_BLESSED_KEY
except ImportError:
# ml-pipelines-sdk package doesn't have tfx.components.
_CURRENT_MODEL_ID = 'current_model_id'
_BLESSED = 'blessed'
class LatestBlessedModelStrategy(resolver.ResolverStrategy):
"""LatestBlessedModelStrategy resolves the latest blessed Model artifact.
Note that this ResolverStrategy is experimental and is subject to change in
terms of both interface and implementation.
Don't construct LatestBlessedModelStrategy directly, example usage:
```
model_resolver = Resolver(
strategy_class=LatestBlessedModelStrategy,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing),
).with_id('latest_blessed_model_resolver')
model_resolver.outputs['model']
```
"""
def _resolve(self, input_dict: Dict[str, List[types.Artifact]],
model_channel_key: str, model_blessing_channel_key: str):
all_models = input_dict[model_channel_key]
all_models.sort(key=lambda a: a.id, reverse=True)
all_model_blessings = input_dict[model_blessing_channel_key]
# Makes a dict of {model_id : ModelBlessing artifact} for blessed models.
all_blessed_model_ids = {
a.get_int_custom_property(_CURRENT_MODEL_ID): a
for a in all_model_blessings
if a.get_int_custom_property(_BLESSED) == 1}
result = {model_channel_key: [], model_blessing_channel_key: []}
# Iterates all models, if blessed, set as result. As the model list was
# sorted, it is guaranteed to get the latest blessed model.
for model in all_models:
if model.id in all_blessed_model_ids:
result[model_channel_key] = [model]
model_blessing = all_blessed_model_ids[model.id]
result[model_blessing_channel_key] = [model_blessing]
break
return result
@doc_controls.do_not_generate_docs
def resolve_artifacts(
self, store: mlmd.MetadataStore,
input_dict: Dict[str, List[types.Artifact]]
) -> Optional[Dict[str, List[types.Artifact]]]:
"""Resolves artifacts from channels by querying MLMD.
Args:
store: An MLMD MetadataStore object.
input_dict: The input_dict to resolve from.
Returns:
The latest blessed Model and its corresponding ModelBlessing, respectively
in the same input channel they were contained to.
Raises:
RuntimeError: if input_dict contains unsupported artifact types.
"""
model_channel_key = None
model_blessing_channel_key = None
assert len(input_dict) == 2, 'Expecting 2 input Channels'
for k, artifact_list in input_dict.items():
if not artifact_list:
# If model or model blessing channel has no artifacts, the min_count
# can not be met, short cut to return empty dict here.
return {key: [] for key in input_dict}
artifact = artifact_list[0]
if issubclass(type(artifact), standard_artifacts.Model):
model_channel_key = k
elif issubclass(type(artifact), standard_artifacts.ModelBlessing):
model_blessing_channel_key = k
else:
raise RuntimeError('Only expecting Model or ModelBlessing, got %s' %
artifact.TYPE_NAME)
assert model_channel_key is not None, 'Expecting Model as input'
assert model_blessing_channel_key is not None, ('Expecting ModelBlessing as'
' input')
result = self._resolve(input_dict, model_channel_key,
model_blessing_channel_key)
return result
|
flexible
|
{
"blob_id": "30df17d636c33d2824aad7d7ef6aae7db83615ec",
"index": 8058,
"step-1": "<mask token>\n\n\nclass LatestBlessedModelStrategy(resolver.ResolverStrategy):\n <mask token>\n\n def _resolve(self, input_dict: Dict[str, List[types.Artifact]],\n model_channel_key: str, model_blessing_channel_key: str):\n all_models = input_dict[model_channel_key]\n all_models.sort(key=lambda a: a.id, reverse=True)\n all_model_blessings = input_dict[model_blessing_channel_key]\n all_blessed_model_ids = {a.get_int_custom_property(\n _CURRENT_MODEL_ID): a for a in all_model_blessings if a.\n get_int_custom_property(_BLESSED) == 1}\n result = {model_channel_key: [], model_blessing_channel_key: []}\n for model in all_models:\n if model.id in all_blessed_model_ids:\n result[model_channel_key] = [model]\n model_blessing = all_blessed_model_ids[model.id]\n result[model_blessing_channel_key] = [model_blessing]\n break\n return result\n\n @doc_controls.do_not_generate_docs\n def resolve_artifacts(self, store: mlmd.MetadataStore, input_dict: Dict\n [str, List[types.Artifact]]) ->Optional[Dict[str, List[types.Artifact]]\n ]:\n \"\"\"Resolves artifacts from channels by querying MLMD.\n\n Args:\n store: An MLMD MetadataStore object.\n input_dict: The input_dict to resolve from.\n\n Returns:\n The latest blessed Model and its corresponding ModelBlessing, respectively\n in the same input channel they were contained to.\n\n Raises:\n RuntimeError: if input_dict contains unsupported artifact types.\n \"\"\"\n model_channel_key = None\n model_blessing_channel_key = None\n assert len(input_dict) == 2, 'Expecting 2 input Channels'\n for k, artifact_list in input_dict.items():\n if not artifact_list:\n return {key: [] for key in input_dict}\n artifact = artifact_list[0]\n if issubclass(type(artifact), standard_artifacts.Model):\n model_channel_key = k\n elif issubclass(type(artifact), standard_artifacts.ModelBlessing):\n model_blessing_channel_key = k\n else:\n raise RuntimeError(\n 'Only expecting Model or ModelBlessing, got %s' %\n artifact.TYPE_NAME)\n assert model_channel_key is not None, 'Expecting Model as input'\n assert model_blessing_channel_key is not None, 'Expecting ModelBlessing as input'\n result = self._resolve(input_dict, model_channel_key,\n model_blessing_channel_key)\n return result\n",
"step-2": "<mask token>\n\n\nclass LatestBlessedModelStrategy(resolver.ResolverStrategy):\n \"\"\"LatestBlessedModelStrategy resolves the latest blessed Model artifact.\n\n Note that this ResolverStrategy is experimental and is subject to change in\n terms of both interface and implementation.\n\n Don't construct LatestBlessedModelStrategy directly, example usage:\n ```\n model_resolver = Resolver(\n strategy_class=LatestBlessedModelStrategy,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing),\n ).with_id('latest_blessed_model_resolver')\n model_resolver.outputs['model']\n ```\n \"\"\"\n\n def _resolve(self, input_dict: Dict[str, List[types.Artifact]],\n model_channel_key: str, model_blessing_channel_key: str):\n all_models = input_dict[model_channel_key]\n all_models.sort(key=lambda a: a.id, reverse=True)\n all_model_blessings = input_dict[model_blessing_channel_key]\n all_blessed_model_ids = {a.get_int_custom_property(\n _CURRENT_MODEL_ID): a for a in all_model_blessings if a.\n get_int_custom_property(_BLESSED) == 1}\n result = {model_channel_key: [], model_blessing_channel_key: []}\n for model in all_models:\n if model.id in all_blessed_model_ids:\n result[model_channel_key] = [model]\n model_blessing = all_blessed_model_ids[model.id]\n result[model_blessing_channel_key] = [model_blessing]\n break\n return result\n\n @doc_controls.do_not_generate_docs\n def resolve_artifacts(self, store: mlmd.MetadataStore, input_dict: Dict\n [str, List[types.Artifact]]) ->Optional[Dict[str, List[types.Artifact]]\n ]:\n \"\"\"Resolves artifacts from channels by querying MLMD.\n\n Args:\n store: An MLMD MetadataStore object.\n input_dict: The input_dict to resolve from.\n\n Returns:\n The latest blessed Model and its corresponding ModelBlessing, respectively\n in the same input channel they were contained to.\n\n Raises:\n RuntimeError: if input_dict contains unsupported artifact types.\n \"\"\"\n model_channel_key = None\n model_blessing_channel_key = None\n assert len(input_dict) == 2, 'Expecting 2 input Channels'\n for k, artifact_list in input_dict.items():\n if not artifact_list:\n return {key: [] for key in input_dict}\n artifact = artifact_list[0]\n if issubclass(type(artifact), standard_artifacts.Model):\n model_channel_key = k\n elif issubclass(type(artifact), standard_artifacts.ModelBlessing):\n model_blessing_channel_key = k\n else:\n raise RuntimeError(\n 'Only expecting Model or ModelBlessing, got %s' %\n artifact.TYPE_NAME)\n assert model_channel_key is not None, 'Expecting Model as input'\n assert model_blessing_channel_key is not None, 'Expecting ModelBlessing as input'\n result = self._resolve(input_dict, model_channel_key,\n model_blessing_channel_key)\n return result\n",
"step-3": "<mask token>\ntry:\n from tfx.components.evaluator import constants as eval_consts\n _CURRENT_MODEL_ID = eval_consts.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY\n _BLESSED = eval_consts.ARTIFACT_PROPERTY_BLESSED_KEY\nexcept ImportError:\n _CURRENT_MODEL_ID = 'current_model_id'\n _BLESSED = 'blessed'\n\n\nclass LatestBlessedModelStrategy(resolver.ResolverStrategy):\n \"\"\"LatestBlessedModelStrategy resolves the latest blessed Model artifact.\n\n Note that this ResolverStrategy is experimental and is subject to change in\n terms of both interface and implementation.\n\n Don't construct LatestBlessedModelStrategy directly, example usage:\n ```\n model_resolver = Resolver(\n strategy_class=LatestBlessedModelStrategy,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing),\n ).with_id('latest_blessed_model_resolver')\n model_resolver.outputs['model']\n ```\n \"\"\"\n\n def _resolve(self, input_dict: Dict[str, List[types.Artifact]],\n model_channel_key: str, model_blessing_channel_key: str):\n all_models = input_dict[model_channel_key]\n all_models.sort(key=lambda a: a.id, reverse=True)\n all_model_blessings = input_dict[model_blessing_channel_key]\n all_blessed_model_ids = {a.get_int_custom_property(\n _CURRENT_MODEL_ID): a for a in all_model_blessings if a.\n get_int_custom_property(_BLESSED) == 1}\n result = {model_channel_key: [], model_blessing_channel_key: []}\n for model in all_models:\n if model.id in all_blessed_model_ids:\n result[model_channel_key] = [model]\n model_blessing = all_blessed_model_ids[model.id]\n result[model_blessing_channel_key] = [model_blessing]\n break\n return result\n\n @doc_controls.do_not_generate_docs\n def resolve_artifacts(self, store: mlmd.MetadataStore, input_dict: Dict\n [str, List[types.Artifact]]) ->Optional[Dict[str, List[types.Artifact]]\n ]:\n \"\"\"Resolves artifacts from channels by querying MLMD.\n\n Args:\n store: An MLMD MetadataStore object.\n input_dict: The input_dict to resolve from.\n\n Returns:\n The latest blessed Model and its corresponding ModelBlessing, respectively\n in the same input channel they were contained to.\n\n Raises:\n RuntimeError: if input_dict contains unsupported artifact types.\n \"\"\"\n model_channel_key = None\n model_blessing_channel_key = None\n assert len(input_dict) == 2, 'Expecting 2 input Channels'\n for k, artifact_list in input_dict.items():\n if not artifact_list:\n return {key: [] for key in input_dict}\n artifact = artifact_list[0]\n if issubclass(type(artifact), standard_artifacts.Model):\n model_channel_key = k\n elif issubclass(type(artifact), standard_artifacts.ModelBlessing):\n model_blessing_channel_key = k\n else:\n raise RuntimeError(\n 'Only expecting Model or ModelBlessing, got %s' %\n artifact.TYPE_NAME)\n assert model_channel_key is not None, 'Expecting Model as input'\n assert model_blessing_channel_key is not None, 'Expecting ModelBlessing as input'\n result = self._resolve(input_dict, model_channel_key,\n model_blessing_channel_key)\n return result\n",
"step-4": "<mask token>\nfrom typing import Dict, List, Optional\nfrom tfx import types\nfrom tfx.dsl.components.common import resolver\nfrom tfx.types import standard_artifacts\nfrom tfx.utils import doc_controls\nimport ml_metadata as mlmd\ntry:\n from tfx.components.evaluator import constants as eval_consts\n _CURRENT_MODEL_ID = eval_consts.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY\n _BLESSED = eval_consts.ARTIFACT_PROPERTY_BLESSED_KEY\nexcept ImportError:\n _CURRENT_MODEL_ID = 'current_model_id'\n _BLESSED = 'blessed'\n\n\nclass LatestBlessedModelStrategy(resolver.ResolverStrategy):\n \"\"\"LatestBlessedModelStrategy resolves the latest blessed Model artifact.\n\n Note that this ResolverStrategy is experimental and is subject to change in\n terms of both interface and implementation.\n\n Don't construct LatestBlessedModelStrategy directly, example usage:\n ```\n model_resolver = Resolver(\n strategy_class=LatestBlessedModelStrategy,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing),\n ).with_id('latest_blessed_model_resolver')\n model_resolver.outputs['model']\n ```\n \"\"\"\n\n def _resolve(self, input_dict: Dict[str, List[types.Artifact]],\n model_channel_key: str, model_blessing_channel_key: str):\n all_models = input_dict[model_channel_key]\n all_models.sort(key=lambda a: a.id, reverse=True)\n all_model_blessings = input_dict[model_blessing_channel_key]\n all_blessed_model_ids = {a.get_int_custom_property(\n _CURRENT_MODEL_ID): a for a in all_model_blessings if a.\n get_int_custom_property(_BLESSED) == 1}\n result = {model_channel_key: [], model_blessing_channel_key: []}\n for model in all_models:\n if model.id in all_blessed_model_ids:\n result[model_channel_key] = [model]\n model_blessing = all_blessed_model_ids[model.id]\n result[model_blessing_channel_key] = [model_blessing]\n break\n return result\n\n @doc_controls.do_not_generate_docs\n def resolve_artifacts(self, store: mlmd.MetadataStore, input_dict: Dict\n [str, List[types.Artifact]]) ->Optional[Dict[str, List[types.Artifact]]\n ]:\n \"\"\"Resolves artifacts from channels by querying MLMD.\n\n Args:\n store: An MLMD MetadataStore object.\n input_dict: The input_dict to resolve from.\n\n Returns:\n The latest blessed Model and its corresponding ModelBlessing, respectively\n in the same input channel they were contained to.\n\n Raises:\n RuntimeError: if input_dict contains unsupported artifact types.\n \"\"\"\n model_channel_key = None\n model_blessing_channel_key = None\n assert len(input_dict) == 2, 'Expecting 2 input Channels'\n for k, artifact_list in input_dict.items():\n if not artifact_list:\n return {key: [] for key in input_dict}\n artifact = artifact_list[0]\n if issubclass(type(artifact), standard_artifacts.Model):\n model_channel_key = k\n elif issubclass(type(artifact), standard_artifacts.ModelBlessing):\n model_blessing_channel_key = k\n else:\n raise RuntimeError(\n 'Only expecting Model or ModelBlessing, got %s' %\n artifact.TYPE_NAME)\n assert model_channel_key is not None, 'Expecting Model as input'\n assert model_blessing_channel_key is not None, 'Expecting ModelBlessing as input'\n result = self._resolve(input_dict, model_channel_key,\n model_blessing_channel_key)\n return result\n",
"step-5": "# Copyright 2021 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Experimental Resolver for getting the latest artifact.\"\"\"\n\nfrom typing import Dict, List, Optional\n\nfrom tfx import types\nfrom tfx.dsl.components.common import resolver\nfrom tfx.types import standard_artifacts\nfrom tfx.utils import doc_controls\n\nimport ml_metadata as mlmd\n\ntry:\n from tfx.components.evaluator import constants as eval_consts # pylint: disable=g-import-not-at-top\n _CURRENT_MODEL_ID = eval_consts.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY\n _BLESSED = eval_consts.ARTIFACT_PROPERTY_BLESSED_KEY\nexcept ImportError:\n # ml-pipelines-sdk package doesn't have tfx.components.\n _CURRENT_MODEL_ID = 'current_model_id'\n _BLESSED = 'blessed'\n\n\nclass LatestBlessedModelStrategy(resolver.ResolverStrategy):\n \"\"\"LatestBlessedModelStrategy resolves the latest blessed Model artifact.\n\n Note that this ResolverStrategy is experimental and is subject to change in\n terms of both interface and implementation.\n\n Don't construct LatestBlessedModelStrategy directly, example usage:\n ```\n model_resolver = Resolver(\n strategy_class=LatestBlessedModelStrategy,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing),\n ).with_id('latest_blessed_model_resolver')\n model_resolver.outputs['model']\n ```\n \"\"\"\n\n def _resolve(self, input_dict: Dict[str, List[types.Artifact]],\n model_channel_key: str, model_blessing_channel_key: str):\n all_models = input_dict[model_channel_key]\n all_models.sort(key=lambda a: a.id, reverse=True)\n all_model_blessings = input_dict[model_blessing_channel_key]\n\n # Makes a dict of {model_id : ModelBlessing artifact} for blessed models.\n all_blessed_model_ids = {\n a.get_int_custom_property(_CURRENT_MODEL_ID): a\n for a in all_model_blessings\n if a.get_int_custom_property(_BLESSED) == 1}\n\n result = {model_channel_key: [], model_blessing_channel_key: []}\n # Iterates all models, if blessed, set as result. As the model list was\n # sorted, it is guaranteed to get the latest blessed model.\n for model in all_models:\n if model.id in all_blessed_model_ids:\n result[model_channel_key] = [model]\n model_blessing = all_blessed_model_ids[model.id]\n result[model_blessing_channel_key] = [model_blessing]\n break\n\n return result\n\n @doc_controls.do_not_generate_docs\n def resolve_artifacts(\n self, store: mlmd.MetadataStore,\n input_dict: Dict[str, List[types.Artifact]]\n ) -> Optional[Dict[str, List[types.Artifact]]]:\n \"\"\"Resolves artifacts from channels by querying MLMD.\n\n Args:\n store: An MLMD MetadataStore object.\n input_dict: The input_dict to resolve from.\n\n Returns:\n The latest blessed Model and its corresponding ModelBlessing, respectively\n in the same input channel they were contained to.\n\n Raises:\n RuntimeError: if input_dict contains unsupported artifact types.\n \"\"\"\n model_channel_key = None\n model_blessing_channel_key = None\n assert len(input_dict) == 2, 'Expecting 2 input Channels'\n for k, artifact_list in input_dict.items():\n if not artifact_list:\n # If model or model blessing channel has no artifacts, the min_count\n # can not be met, short cut to return empty dict here.\n return {key: [] for key in input_dict}\n artifact = artifact_list[0]\n if issubclass(type(artifact), standard_artifacts.Model):\n model_channel_key = k\n elif issubclass(type(artifact), standard_artifacts.ModelBlessing):\n model_blessing_channel_key = k\n else:\n raise RuntimeError('Only expecting Model or ModelBlessing, got %s' %\n artifact.TYPE_NAME)\n assert model_channel_key is not None, 'Expecting Model as input'\n assert model_blessing_channel_key is not None, ('Expecting ModelBlessing as'\n ' input')\n\n result = self._resolve(input_dict, model_channel_key,\n model_blessing_channel_key)\n return result\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Identify a vowel
class MainInit(object):
def __init__(self):
self.vowel = str(input("Please type the character: \n"))
if len(self.vowel) > 1:
print("Invalid number of character")
else:
Vowel(self.vowel)
class Vowel(object):
def __init__(self, vowels):
self.vowels = vowels
self.list = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
for j in range(len(self.list)):
if self.vowels == self.list[j]:
print("The vowel is ", self.list[j])
else:
continue
MainInit()
#
#
# class MainVowel(object):
# def __init__(self):
# string = str(input("Please type the character: \n"))
# if len(string) > 1:
# print("Invalid number of character")
# else:
# VerifyVowel(string)
#
#
# class VerifyVowel(object):
# def __init__(self, string):
# self.string = string
# if len(string) > 1:
# print("Invalid number of character")
# else:
# if string == 'A' or string == 'a':
# print("The vowel is: ", string)
# elif string == 'E' or string == 'e':
# print("The vowel is: ", string)
# elif string == 'I' or string == 'i':
# print("The vowel is: ", string)
# elif string == 'O' or string == 'o':
# print("The vowel is: ", string)
# elif string == 'U' or string == 'u':
# print("The vowel is: ", string)
# else:
# print("No valid")
#
#
# MainVowel()
|
normal
|
{
"blob_id": "8d9f4bce998857bcc7bc2fda0b519f370bf957fe",
"index": 1497,
"step-1": "<mask token>\n\n\nclass Vowel(object):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Vowel(object):\n\n def __init__(self, vowels):\n self.vowels = vowels\n self.list = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\n for j in range(len(self.list)):\n if self.vowels == self.list[j]:\n print('The vowel is ', self.list[j])\n else:\n continue\n\n\n<mask token>\n",
"step-3": "class MainInit(object):\n <mask token>\n\n\nclass Vowel(object):\n\n def __init__(self, vowels):\n self.vowels = vowels\n self.list = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\n for j in range(len(self.list)):\n if self.vowels == self.list[j]:\n print('The vowel is ', self.list[j])\n else:\n continue\n\n\n<mask token>\n",
"step-4": "class MainInit(object):\n\n def __init__(self):\n self.vowel = str(input('Please type the character: \\n'))\n if len(self.vowel) > 1:\n print('Invalid number of character')\n else:\n Vowel(self.vowel)\n\n\nclass Vowel(object):\n\n def __init__(self, vowels):\n self.vowels = vowels\n self.list = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\n for j in range(len(self.list)):\n if self.vowels == self.list[j]:\n print('The vowel is ', self.list[j])\n else:\n continue\n\n\nMainInit()\n",
"step-5": "# Identify a vowel\r\n\r\n\r\nclass MainInit(object):\r\n def __init__(self):\r\n self.vowel = str(input(\"Please type the character: \\n\"))\r\n if len(self.vowel) > 1:\r\n print(\"Invalid number of character\")\r\n else:\r\n Vowel(self.vowel)\r\n\r\n\r\nclass Vowel(object):\r\n def __init__(self, vowels):\r\n self.vowels = vowels\r\n self.list = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\r\n for j in range(len(self.list)):\r\n if self.vowels == self.list[j]:\r\n print(\"The vowel is \", self.list[j])\r\n else:\r\n continue\r\n\r\n\r\nMainInit()\r\n\r\n\r\n#\r\n#\r\n# class MainVowel(object):\r\n# def __init__(self):\r\n# string = str(input(\"Please type the character: \\n\"))\r\n# if len(string) > 1:\r\n# print(\"Invalid number of character\")\r\n# else:\r\n# VerifyVowel(string)\r\n#\r\n#\r\n# class VerifyVowel(object):\r\n# def __init__(self, string):\r\n# self.string = string\r\n# if len(string) > 1:\r\n# print(\"Invalid number of character\")\r\n# else:\r\n# if string == 'A' or string == 'a':\r\n# print(\"The vowel is: \", string)\r\n# elif string == 'E' or string == 'e':\r\n# print(\"The vowel is: \", string)\r\n# elif string == 'I' or string == 'i':\r\n# print(\"The vowel is: \", string)\r\n# elif string == 'O' or string == 'o':\r\n# print(\"The vowel is: \", string)\r\n# elif string == 'U' or string == 'u':\r\n# print(\"The vowel is: \", string)\r\n# else:\r\n# print(\"No valid\")\r\n#\r\n#\r\n# MainVowel()\r\n",
"step-ids": [
1,
2,
3,
5,
6
]
}
|
[
1,
2,
3,
5,
6
] |
class Node:
def __init__(self, value):
self.value = value
self.next = None
<|reserved_special_token_0|>
def array_from_linked_list(head):
arr = []
cur = head
while cur:
arr.append(cur.value)
cur = cur.next
return arr
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node:
def __init__(self, value):
self.value = value
self.next = None
def linked_list_from_array(arr):
head = Node(arr[0])
cur = head
for i in range(1, len(arr)):
cur.next = Node(arr[i])
cur = cur.next
return head
def array_from_linked_list(head):
arr = []
cur = head
while cur:
arr.append(cur.value)
cur = cur.next
return arr
def reverse_linked_list(head):
prev = None
cur = head
while cur:
next = cur.next
cur.next = prev
prev = cur
cur = next
return prev
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node:
def __init__(self, value):
self.value = value
self.next = None
def linked_list_from_array(arr):
head = Node(arr[0])
cur = head
for i in range(1, len(arr)):
cur.next = Node(arr[i])
cur = cur.next
return head
def array_from_linked_list(head):
arr = []
cur = head
while cur:
arr.append(cur.value)
cur = cur.next
return arr
def reverse_linked_list(head):
prev = None
cur = head
while cur:
next = cur.next
cur.next = prev
prev = cur
cur = next
return prev
<|reserved_special_token_0|>
print(array)
print(rev_array)
def reverse_linked_list_section(head, start, end):
pass
<|reserved_special_token_1|>
class Node:
def __init__(self, value):
self.value = value
self.next = None
def linked_list_from_array(arr):
head = Node(arr[0])
cur = head
for i in range(1, len(arr)):
cur.next = Node(arr[i])
cur = cur.next
return head
def array_from_linked_list(head):
arr = []
cur = head
while cur:
arr.append(cur.value)
cur = cur.next
return arr
def reverse_linked_list(head):
prev = None
cur = head
while cur:
next = cur.next
cur.next = prev
prev = cur
cur = next
return prev
array = [9, 1, 2, 3, 6, 8, 11, 5]
ll = linked_list_from_array(array)
rev_ll = reverse_linked_list(ll)
rev_array = array_from_linked_list(rev_ll)
print(array)
print(rev_array)
def reverse_linked_list_section(head, start, end):
pass
<|reserved_special_token_1|>
class Node():
def __init__(self, value):
self.value = value
self.next = None
def linked_list_from_array(arr):
head = Node(arr[0])
cur = head
for i in range(1, len(arr)):
cur.next = Node(arr[i])
cur = cur.next
return head
def array_from_linked_list(head):
arr = []
cur = head
while cur:
arr.append(cur.value)
cur = cur.next
return arr
def reverse_linked_list(head):
prev = None
cur = head
while cur:
next = cur.next # save
cur.next = prev # assign next to prev
prev = cur
cur = next
return prev
array = [9, 1, 2, 3, 6, 8, 11, 5]
ll = linked_list_from_array(array)
rev_ll = reverse_linked_list(ll)
rev_array = array_from_linked_list(rev_ll)
print(array)
print(rev_array)
def reverse_linked_list_section(head, start, end):
pass
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# (0, 3) => [3, 2, 1, 0, 4, 5, 6, 7, 8, 9]
# (2, 4) => [0, 1, 4, 3, 2, 5, 6, 7, 8, 9]
# (6, 9) => [0, 1, 2, 3, 4, 5, 9, 8, 7, 6]
|
flexible
|
{
"blob_id": "e1eb86480fa4eadabf05f10cc54ff9daa790438c",
"index": 3935,
"step-1": "class Node:\n\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\n<mask token>\n\n\ndef array_from_linked_list(head):\n arr = []\n cur = head\n while cur:\n arr.append(cur.value)\n cur = cur.next\n return arr\n\n\n<mask token>\n",
"step-2": "class Node:\n\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\ndef linked_list_from_array(arr):\n head = Node(arr[0])\n cur = head\n for i in range(1, len(arr)):\n cur.next = Node(arr[i])\n cur = cur.next\n return head\n\n\ndef array_from_linked_list(head):\n arr = []\n cur = head\n while cur:\n arr.append(cur.value)\n cur = cur.next\n return arr\n\n\ndef reverse_linked_list(head):\n prev = None\n cur = head\n while cur:\n next = cur.next\n cur.next = prev\n prev = cur\n cur = next\n return prev\n\n\n<mask token>\n",
"step-3": "class Node:\n\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\ndef linked_list_from_array(arr):\n head = Node(arr[0])\n cur = head\n for i in range(1, len(arr)):\n cur.next = Node(arr[i])\n cur = cur.next\n return head\n\n\ndef array_from_linked_list(head):\n arr = []\n cur = head\n while cur:\n arr.append(cur.value)\n cur = cur.next\n return arr\n\n\ndef reverse_linked_list(head):\n prev = None\n cur = head\n while cur:\n next = cur.next\n cur.next = prev\n prev = cur\n cur = next\n return prev\n\n\n<mask token>\nprint(array)\nprint(rev_array)\n\n\ndef reverse_linked_list_section(head, start, end):\n pass\n",
"step-4": "class Node:\n\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\ndef linked_list_from_array(arr):\n head = Node(arr[0])\n cur = head\n for i in range(1, len(arr)):\n cur.next = Node(arr[i])\n cur = cur.next\n return head\n\n\ndef array_from_linked_list(head):\n arr = []\n cur = head\n while cur:\n arr.append(cur.value)\n cur = cur.next\n return arr\n\n\ndef reverse_linked_list(head):\n prev = None\n cur = head\n while cur:\n next = cur.next\n cur.next = prev\n prev = cur\n cur = next\n return prev\n\n\narray = [9, 1, 2, 3, 6, 8, 11, 5]\nll = linked_list_from_array(array)\nrev_ll = reverse_linked_list(ll)\nrev_array = array_from_linked_list(rev_ll)\nprint(array)\nprint(rev_array)\n\n\ndef reverse_linked_list_section(head, start, end):\n pass\n",
"step-5": "class Node():\n def __init__(self, value):\n self.value = value\n self.next = None\n\ndef linked_list_from_array(arr):\n head = Node(arr[0])\n cur = head\n \n for i in range(1, len(arr)):\n cur.next = Node(arr[i])\n cur = cur.next\n \n return head\n\ndef array_from_linked_list(head):\n arr = []\n cur = head\n\n while cur:\n arr.append(cur.value)\n cur = cur.next\n\n return arr\n\ndef reverse_linked_list(head):\n prev = None\n cur = head\n\n while cur:\n next = cur.next # save\n cur.next = prev # assign next to prev\n prev = cur\n cur = next\n\n return prev\n\narray = [9, 1, 2, 3, 6, 8, 11, 5]\nll = linked_list_from_array(array)\nrev_ll = reverse_linked_list(ll)\nrev_array = array_from_linked_list(rev_ll)\n\nprint(array)\nprint(rev_array)\n\ndef reverse_linked_list_section(head, start, end):\n pass\n\n# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n# (0, 3) => [3, 2, 1, 0, 4, 5, 6, 7, 8, 9]\n# (2, 4) => [0, 1, 4, 3, 2, 5, 6, 7, 8, 9]\n# (6, 9) => [0, 1, 2, 3, 4, 5, 9, 8, 7, 6]\n\n",
"step-ids": [
3,
5,
7,
8,
9
]
}
|
[
3,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def findAndReplacePattern(self, words: List[str], pattern: str) ->List[str
]:
def convert(word):
table = {}
count, converted = 0, ''
for w in word:
if w in table:
converted += table[w]
else:
converted += str(count)
table[w] = str(count)
count += 1
return converted
p = convert(pattern)
answer = []
for word in words:
if p == convert(word):
answer.append(word)
return answer
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def findAndReplacePattern(self, words: List[str], pattern: str) -> List[str]:
def convert(word):
table = {}
count, converted = 0, ''
for w in word:
if w in table:
converted += table[w]
else:
converted += str(count)
table[w] = str(count)
count += 1
return converted
p = convert(pattern)
answer = []
for word in words:
if p == convert(word):
answer.append(word)
return answer
"""
[빠른 풀이]
- zip을 이용해서 길이만 비교!!!
class Solution:
def findAndReplacePattern(self, w: List[str], p: str) -> List[str]:
return [i for i in w if len(set(zip(p,i)))==len(set(p))==len(set(i))]
"""
|
flexible
|
{
"blob_id": "e9ea48dec40e75f2fc73f8dcb3b5b975065cf8af",
"index": 5854,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def findAndReplacePattern(self, words: List[str], pattern: str) ->List[str\n ]:\n\n def convert(word):\n table = {}\n count, converted = 0, ''\n for w in word:\n if w in table:\n converted += table[w]\n else:\n converted += str(count)\n table[w] = str(count)\n count += 1\n return converted\n p = convert(pattern)\n answer = []\n for word in words:\n if p == convert(word):\n answer.append(word)\n return answer\n\n\n<mask token>\n",
"step-4": "class Solution:\n def findAndReplacePattern(self, words: List[str], pattern: str) -> List[str]:\n def convert(word):\n table = {}\n count, converted = 0, ''\n \n for w in word:\n if w in table:\n converted += table[w]\n else:\n converted += str(count)\n table[w] = str(count)\n count += 1\n return converted\n \n p = convert(pattern)\n answer = []\n for word in words:\n if p == convert(word):\n answer.append(word)\n \n return answer\n\n\"\"\"\n[빠른 풀이]\n- zip을 이용해서 길이만 비교!!!\n\nclass Solution:\n def findAndReplacePattern(self, w: List[str], p: str) -> List[str]:\n\t\t\t\t\treturn [i for i in w if len(set(zip(p,i)))==len(set(p))==len(set(i))]\n\"\"\"",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
fs = open('./src/keywords.txt', 'rb')
keywords = fs.read().decode('utf-8').split(',')
fs.close()
def find_features(doc):
words = set(doc)
features = {}
for word in keywords:
features['contains %s' % word] = word in words
return features
fs = open('./src/my_classifier.pickle', 'rb')
classifier = pickle.load(fs)
regex = re.compile('[一-龥]')
p = optparse.OptionParser(usage='usage: %prog [options] arg1 arg2',
version='%prog 0.1', prog='url-tagger')
p.add_option('--url', '-u', help='Your url')
p.add_option('--file', '-f', help='Your url file. One line one url')
options, arguments = p.parse_args()
url_list = []
for key, value in options.__dict__.items():
if value is not None:
print('%s: %s' % (key, value))
if key is 'url':
url_list.append(value)
else:
url_file = open(value, 'rb+')
for line in url_file.readlines():
url_list.append(str(line, encoding='utf-8').strip())
@asyncio.coroutine
def get_docs(url):
response = requests.get(url=url, headers={'Accept-Encoding': ''})
html = str(response.content, encoding=response.apparent_encoding,
errors='ignore')
soup = BeautifulSoup(html, 'lxml')
for script in soup(['script', 'style']):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split
(' '))
text = ''.join(chunk for chunk in chunks if chunk)
return url, text
loop = asyncio.get_event_loop()
tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)),
url_list))
data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))
loop.close()
results = [(url, classifier.classify(find_features(jieba.lcut(''.join(
regex.findall(data)))))) for url, data in data_list]
for url, category in results:
print('%s: %s' % (url, category))
<|reserved_special_token_1|>
import optparse
from bs4 import BeautifulSoup
import re
import jieba
import pickle
import requests
import asyncio
if __name__ == '__main__':
fs = open('./src/keywords.txt', 'rb')
keywords = fs.read().decode('utf-8').split(',')
fs.close()
def find_features(doc):
words = set(doc)
features = {}
for word in keywords:
features['contains %s' % word] = word in words
return features
fs = open('./src/my_classifier.pickle', 'rb')
classifier = pickle.load(fs)
regex = re.compile('[一-龥]')
p = optparse.OptionParser(usage='usage: %prog [options] arg1 arg2',
version='%prog 0.1', prog='url-tagger')
p.add_option('--url', '-u', help='Your url')
p.add_option('--file', '-f', help='Your url file. One line one url')
options, arguments = p.parse_args()
url_list = []
for key, value in options.__dict__.items():
if value is not None:
print('%s: %s' % (key, value))
if key is 'url':
url_list.append(value)
else:
url_file = open(value, 'rb+')
for line in url_file.readlines():
url_list.append(str(line, encoding='utf-8').strip())
@asyncio.coroutine
def get_docs(url):
response = requests.get(url=url, headers={'Accept-Encoding': ''})
html = str(response.content, encoding=response.apparent_encoding,
errors='ignore')
soup = BeautifulSoup(html, 'lxml')
for script in soup(['script', 'style']):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split
(' '))
text = ''.join(chunk for chunk in chunks if chunk)
return url, text
loop = asyncio.get_event_loop()
tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)),
url_list))
data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))
loop.close()
results = [(url, classifier.classify(find_features(jieba.lcut(''.join(
regex.findall(data)))))) for url, data in data_list]
for url, category in results:
print('%s: %s' % (url, category))
<|reserved_special_token_1|>
#!/usr/bin/env python3
import optparse
from bs4 import BeautifulSoup
import re
import jieba
import pickle
import requests
import asyncio
if __name__ == '__main__':
# 读取10000个关键词
fs = open("./src/keywords.txt", "rb")
keywords = fs.read().decode("utf-8").split(",")
fs.close()
# 找出特征
def find_features(doc):
words = set(doc)
features = {}
for word in keywords:
features["contains %s" % word] = (word in words)
return features
# 读取预先做好的nltk分词器
fs = open('./src/my_classifier.pickle', 'rb')
classifier = pickle.load(fs)
# 匹配中文字符
regex = re.compile("[\u4e00-\u9fa5]")
p = optparse.OptionParser(usage="usage: %prog [options] arg1 arg2", version="%prog 0.1", prog="url-tagger")
p.add_option("--url", "-u", help="Your url")
p.add_option("--file", "-f", help="Your url file. One line one url")
(options, arguments) = p.parse_args()
url_list = []
for key, value in options.__dict__.items():
if value is not None:
print("%s: %s" % (key, value))
if key is "url":
url_list.append(value)
else:
url_file = open(value, "rb+")
for line in url_file.readlines():
url_list.append(str(line, encoding="utf-8").strip())
# 异步发起http请求
@asyncio.coroutine
def get_docs(url):
response = requests.get(url=url, headers={'Accept-Encoding': ''})
# print(response.apparent_encoding)
html = str(response.content, encoding=response.apparent_encoding, errors="ignore")
soup = BeautifulSoup(html, "lxml")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "".join(chunk for chunk in chunks if chunk)
# print(text)
return url, text
loop = asyncio.get_event_loop()
tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)), url_list))
data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))
loop.close()
# 分类器进行分类
results = [(url, classifier.classify(find_features(jieba.lcut("".join(regex.findall(data)))))) for (url, data)
in data_list]
# 打印结果
for (url, category) in results:
print("%s: %s" % (url, category))
|
flexible
|
{
"blob_id": "88590aef975f7e473ef964ee0c4004cff7e24b07",
"index": 1049,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n fs = open('./src/keywords.txt', 'rb')\n keywords = fs.read().decode('utf-8').split(',')\n fs.close()\n\n def find_features(doc):\n words = set(doc)\n features = {}\n for word in keywords:\n features['contains %s' % word] = word in words\n return features\n fs = open('./src/my_classifier.pickle', 'rb')\n classifier = pickle.load(fs)\n regex = re.compile('[一-龥]')\n p = optparse.OptionParser(usage='usage: %prog [options] arg1 arg2',\n version='%prog 0.1', prog='url-tagger')\n p.add_option('--url', '-u', help='Your url')\n p.add_option('--file', '-f', help='Your url file. One line one url')\n options, arguments = p.parse_args()\n url_list = []\n for key, value in options.__dict__.items():\n if value is not None:\n print('%s: %s' % (key, value))\n if key is 'url':\n url_list.append(value)\n else:\n url_file = open(value, 'rb+')\n for line in url_file.readlines():\n url_list.append(str(line, encoding='utf-8').strip())\n\n @asyncio.coroutine\n def get_docs(url):\n response = requests.get(url=url, headers={'Accept-Encoding': ''})\n html = str(response.content, encoding=response.apparent_encoding,\n errors='ignore')\n soup = BeautifulSoup(html, 'lxml')\n for script in soup(['script', 'style']):\n script.extract()\n text = soup.get_text()\n lines = (line.strip() for line in text.splitlines())\n chunks = (phrase.strip() for line in lines for phrase in line.split\n (' '))\n text = ''.join(chunk for chunk in chunks if chunk)\n return url, text\n loop = asyncio.get_event_loop()\n tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)),\n url_list))\n data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))\n loop.close()\n results = [(url, classifier.classify(find_features(jieba.lcut(''.join(\n regex.findall(data)))))) for url, data in data_list]\n for url, category in results:\n print('%s: %s' % (url, category))\n",
"step-3": "import optparse\nfrom bs4 import BeautifulSoup\nimport re\nimport jieba\nimport pickle\nimport requests\nimport asyncio\nif __name__ == '__main__':\n fs = open('./src/keywords.txt', 'rb')\n keywords = fs.read().decode('utf-8').split(',')\n fs.close()\n\n def find_features(doc):\n words = set(doc)\n features = {}\n for word in keywords:\n features['contains %s' % word] = word in words\n return features\n fs = open('./src/my_classifier.pickle', 'rb')\n classifier = pickle.load(fs)\n regex = re.compile('[一-龥]')\n p = optparse.OptionParser(usage='usage: %prog [options] arg1 arg2',\n version='%prog 0.1', prog='url-tagger')\n p.add_option('--url', '-u', help='Your url')\n p.add_option('--file', '-f', help='Your url file. One line one url')\n options, arguments = p.parse_args()\n url_list = []\n for key, value in options.__dict__.items():\n if value is not None:\n print('%s: %s' % (key, value))\n if key is 'url':\n url_list.append(value)\n else:\n url_file = open(value, 'rb+')\n for line in url_file.readlines():\n url_list.append(str(line, encoding='utf-8').strip())\n\n @asyncio.coroutine\n def get_docs(url):\n response = requests.get(url=url, headers={'Accept-Encoding': ''})\n html = str(response.content, encoding=response.apparent_encoding,\n errors='ignore')\n soup = BeautifulSoup(html, 'lxml')\n for script in soup(['script', 'style']):\n script.extract()\n text = soup.get_text()\n lines = (line.strip() for line in text.splitlines())\n chunks = (phrase.strip() for line in lines for phrase in line.split\n (' '))\n text = ''.join(chunk for chunk in chunks if chunk)\n return url, text\n loop = asyncio.get_event_loop()\n tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)),\n url_list))\n data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))\n loop.close()\n results = [(url, classifier.classify(find_features(jieba.lcut(''.join(\n regex.findall(data)))))) for url, data in data_list]\n for url, category in results:\n print('%s: %s' % (url, category))\n",
"step-4": "#!/usr/bin/env python3\n\nimport optparse\nfrom bs4 import BeautifulSoup\nimport re\nimport jieba\nimport pickle\nimport requests\nimport asyncio\n\nif __name__ == '__main__':\n\n # 读取10000个关键词\n fs = open(\"./src/keywords.txt\", \"rb\")\n keywords = fs.read().decode(\"utf-8\").split(\",\")\n fs.close()\n\n # 找出特征\n def find_features(doc):\n words = set(doc)\n features = {}\n for word in keywords:\n features[\"contains %s\" % word] = (word in words)\n return features\n\n # 读取预先做好的nltk分词器\n fs = open('./src/my_classifier.pickle', 'rb')\n classifier = pickle.load(fs)\n\n # 匹配中文字符\n regex = re.compile(\"[\\u4e00-\\u9fa5]\")\n\n p = optparse.OptionParser(usage=\"usage: %prog [options] arg1 arg2\", version=\"%prog 0.1\", prog=\"url-tagger\")\n p.add_option(\"--url\", \"-u\", help=\"Your url\")\n p.add_option(\"--file\", \"-f\", help=\"Your url file. One line one url\")\n (options, arguments) = p.parse_args()\n\n url_list = []\n for key, value in options.__dict__.items():\n if value is not None:\n print(\"%s: %s\" % (key, value))\n if key is \"url\":\n url_list.append(value)\n else:\n url_file = open(value, \"rb+\")\n for line in url_file.readlines():\n url_list.append(str(line, encoding=\"utf-8\").strip())\n\n\n # 异步发起http请求\n @asyncio.coroutine\n def get_docs(url):\n response = requests.get(url=url, headers={'Accept-Encoding': ''})\n # print(response.apparent_encoding)\n html = str(response.content, encoding=response.apparent_encoding, errors=\"ignore\")\n soup = BeautifulSoup(html, \"lxml\")\n for script in soup([\"script\", \"style\"]):\n script.extract()\n text = soup.get_text()\n lines = (line.strip() for line in text.splitlines())\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n text = \"\".join(chunk for chunk in chunks if chunk)\n # print(text)\n return url, text\n\n loop = asyncio.get_event_loop()\n tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)), url_list))\n data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))\n loop.close()\n\n # 分类器进行分类\n results = [(url, classifier.classify(find_features(jieba.lcut(\"\".join(regex.findall(data)))))) for (url, data)\n in data_list]\n\n # 打印结果\n for (url, category) in results:\n print(\"%s: %s\" % (url, category))\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class ComplexGrid:
def __init__(self, startFile):
self.weakened = set()
self.infected = set()
self.flagged = set()
posx = 0
with open(startFile, 'r') as fo:
for i, line in enumerate(fo):
line = line.rstrip()
posx = int((len(line) - 1) / 2)
for j, char in enumerate(line):
if char == '#':
self.infected.add((i, j))
posy = int((sum(1 for line in open(startFile)) - 1) / 2)
self.pos = posx, posy
self.vec = -1, 0
self.infectionEvents = 0
def update(self):
if self.pos in self.weakened:
self.weakened.remove(self.pos)
self.infected.add(self.pos)
self.infectionEvents += 1
elif self.pos in self.infected:
self.infected.remove(self.pos)
self.flagged.add(self.pos)
self.turnRight()
elif self.pos in self.flagged:
self.flagged.remove(self.pos)
self.reverse()
else:
self.weakened.add(self.pos)
self.turnLeft()
self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]
def turnLeft(self):
if self.vec == (-1, 0):
self.vec = 0, -1
elif self.vec == (0, -1):
self.vec = 1, 0
elif self.vec == (1, 0):
self.vec = 0, 1
else:
self.vec = -1, 0
def turnRight(self):
if self.vec == (-1, 0):
self.vec = 0, 1
elif self.vec == (0, 1):
self.vec = 1, 0
elif self.vec == (1, 0):
self.vec = 0, -1
else:
self.vec = -1, 0
def reverse(self):
self.vec = tuple(-x for x in self.vec)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Grid:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ComplexGrid:
def __init__(self, startFile):
self.weakened = set()
self.infected = set()
self.flagged = set()
posx = 0
with open(startFile, 'r') as fo:
for i, line in enumerate(fo):
line = line.rstrip()
posx = int((len(line) - 1) / 2)
for j, char in enumerate(line):
if char == '#':
self.infected.add((i, j))
posy = int((sum(1 for line in open(startFile)) - 1) / 2)
self.pos = posx, posy
self.vec = -1, 0
self.infectionEvents = 0
def update(self):
if self.pos in self.weakened:
self.weakened.remove(self.pos)
self.infected.add(self.pos)
self.infectionEvents += 1
elif self.pos in self.infected:
self.infected.remove(self.pos)
self.flagged.add(self.pos)
self.turnRight()
elif self.pos in self.flagged:
self.flagged.remove(self.pos)
self.reverse()
else:
self.weakened.add(self.pos)
self.turnLeft()
self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]
def turnLeft(self):
if self.vec == (-1, 0):
self.vec = 0, -1
elif self.vec == (0, -1):
self.vec = 1, 0
elif self.vec == (1, 0):
self.vec = 0, 1
else:
self.vec = -1, 0
def turnRight(self):
if self.vec == (-1, 0):
self.vec = 0, 1
elif self.vec == (0, 1):
self.vec = 1, 0
elif self.vec == (1, 0):
self.vec = 0, -1
else:
self.vec = -1, 0
def reverse(self):
self.vec = tuple(-x for x in self.vec)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Grid:
def __init__(self, startFile):
self.infected = set()
posx = 0
with open(startFile, 'r') as fo:
for i, line in enumerate(fo):
line = line.rstrip()
posx = int((len(line) - 1) / 2)
for j, char in enumerate(line):
if char == '#':
self.infected.add((i, j))
posy = int((sum(1 for line in open(startFile)) - 1) / 2)
self.pos = posx, posy
self.vec = -1, 0
self.infectionEvents = 0
def update(self):
if self.pos in self.infected:
self.infected.remove(self.pos)
self.turnRight()
else:
self.infectionEvents += 1
self.infected.add(self.pos)
self.turnLeft()
self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]
def turnLeft(self):
if self.vec == (-1, 0):
self.vec = 0, -1
elif self.vec == (0, -1):
self.vec = 1, 0
elif self.vec == (1, 0):
self.vec = 0, 1
else:
self.vec = -1, 0
def turnRight(self):
if self.vec == (-1, 0):
self.vec = 0, 1
elif self.vec == (0, 1):
self.vec = 1, 0
elif self.vec == (1, 0):
self.vec = 0, -1
else:
self.vec = -1, 0
class ComplexGrid:
def __init__(self, startFile):
self.weakened = set()
self.infected = set()
self.flagged = set()
posx = 0
with open(startFile, 'r') as fo:
for i, line in enumerate(fo):
line = line.rstrip()
posx = int((len(line) - 1) / 2)
for j, char in enumerate(line):
if char == '#':
self.infected.add((i, j))
posy = int((sum(1 for line in open(startFile)) - 1) / 2)
self.pos = posx, posy
self.vec = -1, 0
self.infectionEvents = 0
def update(self):
if self.pos in self.weakened:
self.weakened.remove(self.pos)
self.infected.add(self.pos)
self.infectionEvents += 1
elif self.pos in self.infected:
self.infected.remove(self.pos)
self.flagged.add(self.pos)
self.turnRight()
elif self.pos in self.flagged:
self.flagged.remove(self.pos)
self.reverse()
else:
self.weakened.add(self.pos)
self.turnLeft()
self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]
def turnLeft(self):
if self.vec == (-1, 0):
self.vec = 0, -1
elif self.vec == (0, -1):
self.vec = 1, 0
elif self.vec == (1, 0):
self.vec = 0, 1
else:
self.vec = -1, 0
def turnRight(self):
if self.vec == (-1, 0):
self.vec = 0, 1
elif self.vec == (0, 1):
self.vec = 1, 0
elif self.vec == (1, 0):
self.vec = 0, -1
else:
self.vec = -1, 0
def reverse(self):
self.vec = tuple(-x for x in self.vec)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Grid:
def __init__(self, startFile):
self.infected = set()
posx = 0
with open(startFile, 'r') as fo:
for i, line in enumerate(fo):
line = line.rstrip()
posx = int((len(line) - 1) / 2)
for j, char in enumerate(line):
if char == '#':
self.infected.add((i, j))
posy = int((sum(1 for line in open(startFile)) - 1) / 2)
self.pos = posx, posy
self.vec = -1, 0
self.infectionEvents = 0
def update(self):
if self.pos in self.infected:
self.infected.remove(self.pos)
self.turnRight()
else:
self.infectionEvents += 1
self.infected.add(self.pos)
self.turnLeft()
self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]
def turnLeft(self):
if self.vec == (-1, 0):
self.vec = 0, -1
elif self.vec == (0, -1):
self.vec = 1, 0
elif self.vec == (1, 0):
self.vec = 0, 1
else:
self.vec = -1, 0
def turnRight(self):
if self.vec == (-1, 0):
self.vec = 0, 1
elif self.vec == (0, 1):
self.vec = 1, 0
elif self.vec == (1, 0):
self.vec = 0, -1
else:
self.vec = -1, 0
class ComplexGrid:
def __init__(self, startFile):
self.weakened = set()
self.infected = set()
self.flagged = set()
posx = 0
with open(startFile, 'r') as fo:
for i, line in enumerate(fo):
line = line.rstrip()
posx = int((len(line) - 1) / 2)
for j, char in enumerate(line):
if char == '#':
self.infected.add((i, j))
posy = int((sum(1 for line in open(startFile)) - 1) / 2)
self.pos = posx, posy
self.vec = -1, 0
self.infectionEvents = 0
def update(self):
if self.pos in self.weakened:
self.weakened.remove(self.pos)
self.infected.add(self.pos)
self.infectionEvents += 1
elif self.pos in self.infected:
self.infected.remove(self.pos)
self.flagged.add(self.pos)
self.turnRight()
elif self.pos in self.flagged:
self.flagged.remove(self.pos)
self.reverse()
else:
self.weakened.add(self.pos)
self.turnLeft()
self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]
def turnLeft(self):
if self.vec == (-1, 0):
self.vec = 0, -1
elif self.vec == (0, -1):
self.vec = 1, 0
elif self.vec == (1, 0):
self.vec = 0, 1
else:
self.vec = -1, 0
def turnRight(self):
if self.vec == (-1, 0):
self.vec = 0, 1
elif self.vec == (0, 1):
self.vec = 1, 0
elif self.vec == (1, 0):
self.vec = 0, -1
else:
self.vec = -1, 0
def reverse(self):
self.vec = tuple(-x for x in self.vec)
def main():
file = 'day_22_input.txt'
g = Grid(file)
for i in range(10000):
g.update()
print('Part 1: {}'.format(g.infectionEvents))
cg = ComplexGrid(file)
for i in range(10000000):
if i % 500000 == 0:
print(i)
cg.update()
print('Part 2: {}'.format(cg.infectionEvents))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
# USAGE: day_22_01.py
# Michael Chambers, 2017
class Grid:
def __init__(self, startFile):
# Load initial infected sites
# Origin is top-left of input file
self.infected = set()
posx = 0
with open(startFile, 'r') as fo:
for i, line in enumerate(fo):
line = line.rstrip()
posx = int((len(line) -1) / 2)
for j, char in enumerate(line):
if char == "#":
self.infected.add((i, j))
# Set initial position to middle of start grid
posy = int((sum(1 for line in open(startFile)) - 1) / 2)
self.pos = (posx, posy)
self.vec = (-1,0)
self.infectionEvents = 0
def update(self):
if self.pos in self.infected:
self.infected.remove(self.pos)
self.turnRight()
else:
self.infectionEvents += 1
self.infected.add(self.pos)
self.turnLeft()
self.pos = (self.pos[0] + self.vec[0], self.pos[1] + self.vec[1])
def turnLeft(self):
if self.vec == (-1, 0):
self.vec = (0, -1)
elif self.vec == (0, -1):
self.vec = (1,0)
elif self.vec == (1, 0):
self.vec = (0, 1)
else:
self.vec = (-1, 0)
def turnRight(self):
if self.vec == (-1, 0):
self.vec = (0, 1)
elif self.vec == (0, 1):
self.vec = (1, 0)
elif self.vec == (1, 0):
self.vec = (0, -1)
else:
self.vec = (-1, 0)
class ComplexGrid:
# clean : 0
# weakened : 1
# infected : 2
# flagged : 3
def __init__(self, startFile):
# Load initial infected sites
# Origin is top-left of input file
self.weakened = set()
self.infected = set()
self.flagged = set()
posx = 0
with open(startFile, 'r') as fo:
for i, line in enumerate(fo):
line = line.rstrip()
posx = int((len(line) -1) / 2)
for j, char in enumerate(line):
if char == "#":
self.infected.add((i, j))
# Set initial position to middle of start grid
posy = int((sum(1 for line in open(startFile)) - 1) / 2)
self.pos = (posx, posy)
self.vec = (-1,0)
self.infectionEvents = 0
def update(self):
if self.pos in self.weakened:
self.weakened.remove(self.pos)
self.infected.add(self.pos)
self.infectionEvents += 1
elif self.pos in self.infected:
self.infected.remove(self.pos)
self.flagged.add(self.pos)
self.turnRight()
elif self.pos in self.flagged:
self.flagged.remove(self.pos)
self.reverse()
else:
self.weakened.add(self.pos)
self.turnLeft()
self.pos = (self.pos[0] + self.vec[0], self.pos[1] + self.vec[1])
def turnLeft(self):
if self.vec == (-1, 0):
self.vec = (0, -1)
elif self.vec == (0, -1):
self.vec = (1,0)
elif self.vec == (1, 0):
self.vec = (0, 1)
else:
self.vec = (-1, 0)
def turnRight(self):
if self.vec == (-1, 0):
self.vec = (0, 1)
elif self.vec == (0, 1):
self.vec = (1, 0)
elif self.vec == (1, 0):
self.vec = (0, -1)
else:
self.vec = (-1, 0)
def reverse(self):
self.vec = tuple(-x for x in self.vec)
def main():
file = "day_22_input.txt"
# file = "day_22_test.txt"
g = Grid(file)
# print(g.infected)
# print("Pos {} Vec {}".format(g.pos, g.vec))
for i in range(10000):
g.update()
# print(g.infected)
# print("Pos {} Vec {}".format(g.pos, g.vec))
print("Part 1: {}".format(g.infectionEvents))
cg = ComplexGrid(file)
for i in range(10000000):
if i % 500000 == 0:
print(i)
cg.update()
print("Part 2: {}".format(cg.infectionEvents))
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "f840624ec11679d576fbb80f8e753c59663a7ee2",
"index": 9168,
"step-1": "<mask token>\n\n\nclass ComplexGrid:\n\n def __init__(self, startFile):\n self.weakened = set()\n self.infected = set()\n self.flagged = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.weakened:\n self.weakened.remove(self.pos)\n self.infected.add(self.pos)\n self.infectionEvents += 1\n elif self.pos in self.infected:\n self.infected.remove(self.pos)\n self.flagged.add(self.pos)\n self.turnRight()\n elif self.pos in self.flagged:\n self.flagged.remove(self.pos)\n self.reverse()\n else:\n self.weakened.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n def reverse(self):\n self.vec = tuple(-x for x in self.vec)\n\n\n<mask token>\n",
"step-2": "class Grid:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ComplexGrid:\n\n def __init__(self, startFile):\n self.weakened = set()\n self.infected = set()\n self.flagged = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.weakened:\n self.weakened.remove(self.pos)\n self.infected.add(self.pos)\n self.infectionEvents += 1\n elif self.pos in self.infected:\n self.infected.remove(self.pos)\n self.flagged.add(self.pos)\n self.turnRight()\n elif self.pos in self.flagged:\n self.flagged.remove(self.pos)\n self.reverse()\n else:\n self.weakened.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n def reverse(self):\n self.vec = tuple(-x for x in self.vec)\n\n\n<mask token>\n",
"step-3": "class Grid:\n\n def __init__(self, startFile):\n self.infected = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.infected:\n self.infected.remove(self.pos)\n self.turnRight()\n else:\n self.infectionEvents += 1\n self.infected.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n\nclass ComplexGrid:\n\n def __init__(self, startFile):\n self.weakened = set()\n self.infected = set()\n self.flagged = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.weakened:\n self.weakened.remove(self.pos)\n self.infected.add(self.pos)\n self.infectionEvents += 1\n elif self.pos in self.infected:\n self.infected.remove(self.pos)\n self.flagged.add(self.pos)\n self.turnRight()\n elif self.pos in self.flagged:\n self.flagged.remove(self.pos)\n self.reverse()\n else:\n self.weakened.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n def reverse(self):\n self.vec = tuple(-x for x in self.vec)\n\n\n<mask token>\n",
"step-4": "class Grid:\n\n def __init__(self, startFile):\n self.infected = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.infected:\n self.infected.remove(self.pos)\n self.turnRight()\n else:\n self.infectionEvents += 1\n self.infected.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n\nclass ComplexGrid:\n\n def __init__(self, startFile):\n self.weakened = set()\n self.infected = set()\n self.flagged = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.weakened:\n self.weakened.remove(self.pos)\n self.infected.add(self.pos)\n self.infectionEvents += 1\n elif self.pos in self.infected:\n self.infected.remove(self.pos)\n self.flagged.add(self.pos)\n self.turnRight()\n elif self.pos in self.flagged:\n self.flagged.remove(self.pos)\n self.reverse()\n else:\n self.weakened.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n def reverse(self):\n self.vec = tuple(-x for x in self.vec)\n\n\ndef main():\n file = 'day_22_input.txt'\n g = Grid(file)\n for i in range(10000):\n g.update()\n print('Part 1: {}'.format(g.infectionEvents))\n cg = ComplexGrid(file)\n for i in range(10000000):\n if i % 500000 == 0:\n print(i)\n cg.update()\n print('Part 2: {}'.format(cg.infectionEvents))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\n# USAGE: day_22_01.py\n# Michael Chambers, 2017\n\nclass Grid:\n\tdef __init__(self, startFile):\n\t\t# Load initial infected sites\n\t\t# Origin is top-left of input file\n\t\tself.infected = set()\n\t\tposx = 0\n\t\twith open(startFile, 'r') as fo:\n\t\t\tfor i, line in enumerate(fo):\n\t\t\t\tline = line.rstrip()\n\t\t\t\tposx = int((len(line) -1) / 2)\n\t\t\t\tfor j, char in enumerate(line):\n\t\t\t\t\tif char == \"#\":\n\t\t\t\t\t\tself.infected.add((i, j))\n\n\t\t# Set initial position to middle of start grid\n\t\tposy = int((sum(1 for line in open(startFile)) - 1) / 2)\n\t\tself.pos = (posx, posy)\n\t\tself.vec = (-1,0)\n\t\tself.infectionEvents = 0\n\n\tdef update(self):\n\t\tif self.pos in self.infected:\n\t\t\tself.infected.remove(self.pos)\n\t\t\tself.turnRight()\n\t\telse:\n\t\t\tself.infectionEvents += 1\n\t\t\tself.infected.add(self.pos)\n\t\t\tself.turnLeft()\n\t\tself.pos = (self.pos[0] + self.vec[0], self.pos[1] + self.vec[1])\n\n\tdef turnLeft(self):\n\t\tif self.vec == (-1, 0):\n\t\t\tself.vec = (0, -1)\n\t\telif self.vec == (0, -1):\n\t\t\tself.vec = (1,0)\n\t\telif self.vec == (1, 0):\n\t\t\tself.vec = (0, 1)\n\t\telse:\n\t\t\tself.vec = (-1, 0)\n\n\tdef turnRight(self):\n\t\tif self.vec == (-1, 0):\n\t\t\tself.vec = (0, 1)\n\t\telif self.vec == (0, 1):\n\t\t\tself.vec = (1, 0)\n\t\telif self.vec == (1, 0):\n\t\t\tself.vec = (0, -1)\n\t\telse:\n\t\t\tself.vec = (-1, 0)\n\n\nclass ComplexGrid:\n\t# clean : 0\n\t# weakened : 1\n\t# infected : 2\n\t# flagged : 3\n\n\tdef __init__(self, startFile):\n\t\t# Load initial infected sites\n\t\t# Origin is top-left of input file\n\t\tself.weakened = set()\n\t\tself.infected = set()\n\t\tself.flagged = set()\n\t\tposx = 0\n\t\twith open(startFile, 'r') as fo:\n\t\t\tfor i, line in enumerate(fo):\n\t\t\t\tline = line.rstrip()\n\t\t\t\tposx = int((len(line) -1) / 2)\n\t\t\t\tfor j, char in enumerate(line):\n\t\t\t\t\tif char == \"#\":\n\t\t\t\t\t\tself.infected.add((i, j))\n\n\t\t# Set initial position to middle of start grid\n\t\tposy = int((sum(1 for line in open(startFile)) - 1) / 2)\n\t\tself.pos = (posx, posy)\n\t\tself.vec = (-1,0)\n\t\tself.infectionEvents = 0\n\n\tdef update(self):\n\t\tif self.pos in self.weakened:\n\t\t\tself.weakened.remove(self.pos)\n\t\t\tself.infected.add(self.pos)\n\t\t\tself.infectionEvents += 1\n\t\telif self.pos in self.infected:\n\t\t\tself.infected.remove(self.pos)\n\t\t\tself.flagged.add(self.pos)\n\t\t\tself.turnRight()\n\t\telif self.pos in self.flagged:\n\t\t\tself.flagged.remove(self.pos)\n\t\t\tself.reverse()\n\t\telse:\n\t\t\tself.weakened.add(self.pos)\n\t\t\tself.turnLeft()\n\t\tself.pos = (self.pos[0] + self.vec[0], self.pos[1] + self.vec[1])\n\n\tdef turnLeft(self):\n\t\tif self.vec == (-1, 0):\n\t\t\tself.vec = (0, -1)\n\t\telif self.vec == (0, -1):\n\t\t\tself.vec = (1,0)\n\t\telif self.vec == (1, 0):\n\t\t\tself.vec = (0, 1)\n\t\telse:\n\t\t\tself.vec = (-1, 0)\n\n\tdef turnRight(self):\n\t\tif self.vec == (-1, 0):\n\t\t\tself.vec = (0, 1)\n\t\telif self.vec == (0, 1):\n\t\t\tself.vec = (1, 0)\n\t\telif self.vec == (1, 0):\n\t\t\tself.vec = (0, -1)\n\t\telse:\n\t\t\tself.vec = (-1, 0)\t\n\n\tdef reverse(self):\n\t\tself.vec = tuple(-x for x in self.vec)\t\n\ndef main():\n\tfile = \"day_22_input.txt\"\n\t# file = \"day_22_test.txt\"\n\tg = Grid(file)\n\t# print(g.infected)\n\t# print(\"Pos {} Vec {}\".format(g.pos, g.vec))\n\tfor i in range(10000):\n\t\tg.update()\n\t\t# print(g.infected)\n\t\t# print(\"Pos {} Vec {}\".format(g.pos, g.vec))\n\tprint(\"Part 1: {}\".format(g.infectionEvents))\n\n\tcg = ComplexGrid(file)\n\tfor i in range(10000000):\n\t\tif i % 500000 == 0:\n\t\t\tprint(i)\n\t\tcg.update()\n\tprint(\"Part 2: {}\".format(cg.infectionEvents))\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n",
"step-ids": [
6,
7,
11,
13,
14
]
}
|
[
6,
7,
11,
13,
14
] |
clear ;
clc;
%-----------------------读入图像-------------------------------------%
markbefore=imread('p203.bmp');
markbefore2=rgb2gray(markbefore);
mark=im2bw(markbefore2);
figure(1);
subplot(2,3,1);
imshow(mark),title('水印图像');
[rm,cm]=size(mark);
cover=imread('pic.bmp');
cover1=imresize(cover,[512,512]);
cover_image=rgb2gray(cover1);
subplot(2,3,2),imshow(cover_image,[]),title('原始图像');
before=blkproc(cover_image,[8 8],'dct2'); %将载体图像的灰度层分为8×8的小块,每一块内做二维DCT变换,结果记入矩阵before
I=mark;
alpha=50; %尺度因子,控制水印添加的强度,决定了频域系数被修改的幅度
k1=randn(1,8); %产生两个不同的随机序列
k2=randn(1,8);
after=before; %初始化载入水印的结果矩阵
for i=1:rm %在中频段嵌入水印
for j=1:cm
x=(i-1)*8;
y=(j-1)*8;
if mark(i,j)==1
k=k1;
else
k=k2;
end;
after(x+1,y+8)=before(x+1,y+8)+alpha*k(1);
after(x+2,y+7)=before(x+2,y+7)+alpha*k(2);
after(x+3,y+6)=before(x+3,y+6)+alpha*k(3);
after(x+4,y+5)=before(x+4,y+5)+alpha*k(4);
after(x+5,y+4)=before(x+5,y+4)+alpha*k(5);
after(x+6,y+3)=before(x+6,y+3)+alpha*k(6);
after(x+7,y+2)=before(x+7,y+2)+alpha*k(7);
after(x+8,y+1)=before(x+8,y+1)+alpha*k(8);
end;
end;
result=blkproc(after,[8 8],'idct2'); %将经处理的图像分为8×8的小块,每一块内做二维DCT逆变换
result = uint8(result);
imwrite(result,'watermarked.bmp','bmp'); %隐写图像命名为watermarked.bmp
subplot(2,3,3),imshow(result,[]),title('隐写图像');
subplot(2,3,4);
imshow(result,[]);
title('水印图像');
withmark=result;
subplot(2,3,4);
imshow(result,[]);
title('图像');
withmark=result;
%------------------------水印提取-----------------------------%
%
after_2=blkproc(withmark,[8,8],'dct2'); %此步开始提取水印,将灰度层分块进行DCT变换
p=zeros(1,8); %初始化提取数值用的矩阵
mark_2 = zeros(rm,cm);
for i=1:rm
for j=1:cm
x=(i-1)*8;y=(j-1)*8;
p(1)=after_2(x+1,y+8); %将之前改变过数值的点的数值提取出来
p(2)=after_2(x+2,y+7);
p(3)=after_2(x+3,y+6);
p(4)=after_2(x+4,y+5);
p(5)=after_2(x+5,y+4);
p(6)=after_2(x+6,y+3);
p(7)=after_2(x+7,y+2);
p(8)=after_2(x+8,y+1);
if corr2(p,k1)>corr2(p,k2) %corr2计算两个矩阵的相似度,越接近1相似度越大
mark_2(i,j)=1; %比较提取出来的数值与随机频率k1和k2的相似度,还原水印图样
else
mark_2(i,j)=0;
end
end
end
subplot(2,3,5);
mark_2 = uint8(mark_2);
imshow(mark_2,[]),title('提取水印');
subplot(2,3,6);
imshow(mark),title('原水印图像');
|
normal
|
{
"blob_id": "56d3e59e3e077b1febb834668aba44ce8dba13ae",
"index": 635,
"step-1": "clear ;\nclc;\n \n%-----------------------读入图像-------------------------------------%\nmarkbefore=imread('p203.bmp');\nmarkbefore2=rgb2gray(markbefore);\nmark=im2bw(markbefore2); \nfigure(1); \nsubplot(2,3,1); \nimshow(mark),title('水印图像'); \n[rm,cm]=size(mark); \ncover=imread('pic.bmp');\ncover1=imresize(cover,[512,512]);\ncover_image=rgb2gray(cover1);\nsubplot(2,3,2),imshow(cover_image,[]),title('原始图像'); \n \nbefore=blkproc(cover_image,[8 8],'dct2'); %将载体图像的灰度层分为8×8的小块,每一块内做二维DCT变换,结果记入矩阵before\nI=mark;\nalpha=50; %尺度因子,控制水印添加的强度,决定了频域系数被修改的幅度\nk1=randn(1,8); %产生两个不同的随机序列\nk2=randn(1,8);\nafter=before; %初始化载入水印的结果矩阵\nfor i=1:rm %在中频段嵌入水印\n for j=1:cm\n x=(i-1)*8;\n y=(j-1)*8;\n if mark(i,j)==1\n k=k1;\n else\n k=k2;\n end;\n after(x+1,y+8)=before(x+1,y+8)+alpha*k(1);\n after(x+2,y+7)=before(x+2,y+7)+alpha*k(2);\n after(x+3,y+6)=before(x+3,y+6)+alpha*k(3);\n after(x+4,y+5)=before(x+4,y+5)+alpha*k(4);\n after(x+5,y+4)=before(x+5,y+4)+alpha*k(5);\n after(x+6,y+3)=before(x+6,y+3)+alpha*k(6);\n after(x+7,y+2)=before(x+7,y+2)+alpha*k(7);\n after(x+8,y+1)=before(x+8,y+1)+alpha*k(8);\n end;\nend;\nresult=blkproc(after,[8 8],'idct2'); %将经处理的图像分为8×8的小块,每一块内做二维DCT逆变换\nresult = uint8(result);\nimwrite(result,'watermarked.bmp','bmp'); %隐写图像命名为watermarked.bmp\nsubplot(2,3,3),imshow(result,[]),title('隐写图像'); \n\n\n subplot(2,3,4);\n imshow(result,[]);\n title('水印图像');\n withmark=result;\n subplot(2,3,4);\n imshow(result,[]);\n title('图像');\n withmark=result;\n\n \n%------------------------水印提取-----------------------------%\n%\nafter_2=blkproc(withmark,[8,8],'dct2'); %此步开始提取水印,将灰度层分块进行DCT变换\np=zeros(1,8); %初始化提取数值用的矩阵\nmark_2 = zeros(rm,cm);\nfor i=1:rm\n for j=1:cm\n x=(i-1)*8;y=(j-1)*8;\n p(1)=after_2(x+1,y+8); %将之前改变过数值的点的数值提取出来\n p(2)=after_2(x+2,y+7);\n p(3)=after_2(x+3,y+6);\n p(4)=after_2(x+4,y+5);\n p(5)=after_2(x+5,y+4);\n p(6)=after_2(x+6,y+3);\n p(7)=after_2(x+7,y+2);\n p(8)=after_2(x+8,y+1);\n if corr2(p,k1)>corr2(p,k2) %corr2计算两个矩阵的相似度,越接近1相似度越大\n mark_2(i,j)=1; %比较提取出来的数值与随机频率k1和k2的相似度,还原水印图样\n else\n mark_2(i,j)=0;\n end\n end\nend\nsubplot(2,3,5);\nmark_2 = uint8(mark_2);\nimshow(mark_2,[]),title('提取水印');\nsubplot(2,3,6);\nimshow(mark),title('原水印图像');\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
ii = [('CookGHP3.py', 2), ('MarrFDI.py', 1), ('GodwWSL2.py', 2), (
'ChanWS.py', 6), ('SadlMLP.py', 1), ('WilbRLW.py', 1), ('AubePRP2.py',
1), ('MartHSI2.py', 1), ('WilbRLW5.py', 1), ('KnowJMM.py', 1), (
'AubePRP.py', 2), ('ChalTPW2.py', 1), ('ClarGE2.py', 2), ('CarlTFR.py',
3), ('SeniNSP.py', 4), ('GrimSLE.py', 1), ('RoscTTI3.py', 1), (
'CookGHP2.py', 1), ('CoolWHM.py', 1), ('DaltJMA.py', 1), ('NewmJLP.py',
1), ('GodwWLN.py', 3), ('MereHHB3.py', 1), ('MartHRW.py', 2), (
'BentJRP.py', 23), ('ThomGLG.py', 1), ('StorJCC.py', 1), ('LewiMJW.py',
1), ('WilbRLW3.py', 1), ('FitzRNS2.py', 1), ('MartHSI.py', 1), (
'EvarJSP.py', 5), ('DwigTHH.py', 4), ('TaylIF.py', 1), ('WordWYR.py', 1
), ('WaylFEP.py', 1)]
|
flexible
|
{
"blob_id": "b80ccee42489aefb2858b8491008b252f6a2b9b7",
"index": 4864,
"step-1": "<mask token>\n",
"step-2": "ii = [('CookGHP3.py', 2), ('MarrFDI.py', 1), ('GodwWSL2.py', 2), (\n 'ChanWS.py', 6), ('SadlMLP.py', 1), ('WilbRLW.py', 1), ('AubePRP2.py', \n 1), ('MartHSI2.py', 1), ('WilbRLW5.py', 1), ('KnowJMM.py', 1), (\n 'AubePRP.py', 2), ('ChalTPW2.py', 1), ('ClarGE2.py', 2), ('CarlTFR.py',\n 3), ('SeniNSP.py', 4), ('GrimSLE.py', 1), ('RoscTTI3.py', 1), (\n 'CookGHP2.py', 1), ('CoolWHM.py', 1), ('DaltJMA.py', 1), ('NewmJLP.py',\n 1), ('GodwWLN.py', 3), ('MereHHB3.py', 1), ('MartHRW.py', 2), (\n 'BentJRP.py', 23), ('ThomGLG.py', 1), ('StorJCC.py', 1), ('LewiMJW.py',\n 1), ('WilbRLW3.py', 1), ('FitzRNS2.py', 1), ('MartHSI.py', 1), (\n 'EvarJSP.py', 5), ('DwigTHH.py', 4), ('TaylIF.py', 1), ('WordWYR.py', 1\n ), ('WaylFEP.py', 1)]\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from django.http import JsonResponse
from django.shortcuts import render
from phone_number_parser.forms import TextForm
import re
def parse_text(request):
###########################################################################
#
# Parse Text is the lone view for this project. A GET request renders a
# form with one textarea field. A POST of this form passes the text via an
# ajax call in the field 'the_text'. The text is parsed using REGEX for
# phone numbers and passed back as a JSON object.
# See main.js for the ajax request and success callback function.
#
###########################################################################
if request.method == 'POST':
text = request.POST.get('the_text')
phone_number_list = []
matches = re.findall(r'\(?(\d{3})\)?[\.\-]?\s*(\d{3})\s*[\.\-]?\s*(\d{4})', text)
for match in matches:
phone_number_list.append('({}) {}-{}'.format(match[0], match[1], match[2]))
response_data = {'phone_number_list': phone_number_list}
return JsonResponse(response_data)
else:
form = TextForm()
return render(request, 'phone_number_parser/index.html', {'form': form})
|
normal
|
{
"blob_id": "d27a7ca04e12d50aca5a9f9db199102dbeb4e9f1",
"index": 7678,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_text(request):\n if request.method == 'POST':\n text = request.POST.get('the_text')\n phone_number_list = []\n matches = re.findall(\n '\\\\(?(\\\\d{3})\\\\)?[\\\\.\\\\-]?\\\\s*(\\\\d{3})\\\\s*[\\\\.\\\\-]?\\\\s*(\\\\d{4})',\n text)\n for match in matches:\n phone_number_list.append('({}) {}-{}'.format(match[0], match[1],\n match[2]))\n response_data = {'phone_number_list': phone_number_list}\n return JsonResponse(response_data)\n else:\n form = TextForm()\n return render(request, 'phone_number_parser/index.html', {'form': form}\n )\n",
"step-3": "from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom phone_number_parser.forms import TextForm\nimport re\n\n\ndef parse_text(request):\n if request.method == 'POST':\n text = request.POST.get('the_text')\n phone_number_list = []\n matches = re.findall(\n '\\\\(?(\\\\d{3})\\\\)?[\\\\.\\\\-]?\\\\s*(\\\\d{3})\\\\s*[\\\\.\\\\-]?\\\\s*(\\\\d{4})',\n text)\n for match in matches:\n phone_number_list.append('({}) {}-{}'.format(match[0], match[1],\n match[2]))\n response_data = {'phone_number_list': phone_number_list}\n return JsonResponse(response_data)\n else:\n form = TextForm()\n return render(request, 'phone_number_parser/index.html', {'form': form}\n )\n",
"step-4": "from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom phone_number_parser.forms import TextForm\nimport re\n\n\ndef parse_text(request):\n ###########################################################################\n #\n # Parse Text is the lone view for this project. A GET request renders a\n # form with one textarea field. A POST of this form passes the text via an\n # ajax call in the field 'the_text'. The text is parsed using REGEX for\n # phone numbers and passed back as a JSON object.\n # See main.js for the ajax request and success callback function.\n #\n ###########################################################################\n\n if request.method == 'POST':\n text = request.POST.get('the_text')\n phone_number_list = []\n matches = re.findall(r'\\(?(\\d{3})\\)?[\\.\\-]?\\s*(\\d{3})\\s*[\\.\\-]?\\s*(\\d{4})', text)\n for match in matches:\n phone_number_list.append('({}) {}-{}'.format(match[0], match[1], match[2]))\n\n response_data = {'phone_number_list': phone_number_list}\n\n return JsonResponse(response_data)\n\n else:\n form = TextForm()\n\n return render(request, 'phone_number_parser/index.html', {'form': form})\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Logistic(object):
<|reserved_special_token_0|>
def __init__(self, *args, **kwargs):
"""
Initializing the model parameter
:param args:
:param kwargs:
X_train,
Y_train,
X_test,
Y_test,
num_iterations = 2000,
learning_rate = 0.5
"""
self._x_train = kwargs['X_train']
self._y_train = kwargs['Y_train']
self._x_test = kwargs['X_test']
self._y_test = kwargs['Y_test']
self.num_iteration = kwargs['num_iteration']
self.learning_rate = kwargs['learning_rate']
def fit(self):
"""
function will fit the model with initialized parameter
:return:
costs,
y_prediction_test,
y_prediction_train,
weight,
intercept,
self.learning_rate,
self.num_iteration
"""
weight, intercept = initialize_with_zeros(self._x_train.shape[0])
parameters, grads, costs = optimize(weight, intercept, self.
_x_train, self._y_train, self.num_iteration, self.learning_rate)
weight = parameters['w']
intercept = parameters['b']
y_prediction_test = predict(weight, intercept, self._x_test)
y_prediction_train = predict(weight, intercept, self._x_train)
print('train accuracy: {} %'.format(100 - np.mean(np.abs(
y_prediction_train - self._y_train)) * 100))
print('test accuracy: {} %'.format(100 - np.mean(np.abs(
y_prediction_test - self._x_test)) * 100))
return {'costs': costs, 'Y_prediction_test': y_prediction_test,
'Y_prediction_train': y_prediction_train, 'w': weight, 'b':
intercept, 'learning_rate': self.learning_rate,
'num_iterations': self.num_iteration}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Logistic(object):
"""
This class provides the flexibility to run
logistic regression to your data set
"""
def __init__(self, *args, **kwargs):
"""
Initializing the model parameter
:param args:
:param kwargs:
X_train,
Y_train,
X_test,
Y_test,
num_iterations = 2000,
learning_rate = 0.5
"""
self._x_train = kwargs['X_train']
self._y_train = kwargs['Y_train']
self._x_test = kwargs['X_test']
self._y_test = kwargs['Y_test']
self.num_iteration = kwargs['num_iteration']
self.learning_rate = kwargs['learning_rate']
def fit(self):
"""
function will fit the model with initialized parameter
:return:
costs,
y_prediction_test,
y_prediction_train,
weight,
intercept,
self.learning_rate,
self.num_iteration
"""
weight, intercept = initialize_with_zeros(self._x_train.shape[0])
parameters, grads, costs = optimize(weight, intercept, self.
_x_train, self._y_train, self.num_iteration, self.learning_rate)
weight = parameters['w']
intercept = parameters['b']
y_prediction_test = predict(weight, intercept, self._x_test)
y_prediction_train = predict(weight, intercept, self._x_train)
print('train accuracy: {} %'.format(100 - np.mean(np.abs(
y_prediction_train - self._y_train)) * 100))
print('test accuracy: {} %'.format(100 - np.mean(np.abs(
y_prediction_test - self._x_test)) * 100))
return {'costs': costs, 'Y_prediction_test': y_prediction_test,
'Y_prediction_train': y_prediction_train, 'w': weight, 'b':
intercept, 'learning_rate': self.learning_rate,
'num_iterations': self.num_iteration}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def predict(weight, intercept, x_vector):
"""
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
"""
m = x_vector.shape[1]
y_prediction = np.zeros((1, m))
weight = weight.reshape(x_vector.shape[0], 1)
yhat = sigmoid(np.dot(weight.T, x_vector) + intercept)
for i in range(yhat.shape[1]):
if yhat[0][i] > 0.5:
y_prediction[0][i] = 1
else:
y_prediction[0][i] = 0
assert y_prediction.shape == (1, m)
return y_prediction
class Logistic(object):
"""
This class provides the flexibility to run
logistic regression to your data set
"""
def __init__(self, *args, **kwargs):
"""
Initializing the model parameter
:param args:
:param kwargs:
X_train,
Y_train,
X_test,
Y_test,
num_iterations = 2000,
learning_rate = 0.5
"""
self._x_train = kwargs['X_train']
self._y_train = kwargs['Y_train']
self._x_test = kwargs['X_test']
self._y_test = kwargs['Y_test']
self.num_iteration = kwargs['num_iteration']
self.learning_rate = kwargs['learning_rate']
def fit(self):
"""
function will fit the model with initialized parameter
:return:
costs,
y_prediction_test,
y_prediction_train,
weight,
intercept,
self.learning_rate,
self.num_iteration
"""
weight, intercept = initialize_with_zeros(self._x_train.shape[0])
parameters, grads, costs = optimize(weight, intercept, self.
_x_train, self._y_train, self.num_iteration, self.learning_rate)
weight = parameters['w']
intercept = parameters['b']
y_prediction_test = predict(weight, intercept, self._x_test)
y_prediction_train = predict(weight, intercept, self._x_train)
print('train accuracy: {} %'.format(100 - np.mean(np.abs(
y_prediction_train - self._y_train)) * 100))
print('test accuracy: {} %'.format(100 - np.mean(np.abs(
y_prediction_test - self._x_test)) * 100))
return {'costs': costs, 'Y_prediction_test': y_prediction_test,
'Y_prediction_train': y_prediction_train, 'w': weight, 'b':
intercept, 'learning_rate': self.learning_rate,
'num_iterations': self.num_iteration}
<|reserved_special_token_1|>
from function import *
from .propogation import optimize
from .initialize import initialize_with_zeros
def predict(weight, intercept, x_vector):
"""
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
"""
m = x_vector.shape[1]
y_prediction = np.zeros((1, m))
weight = weight.reshape(x_vector.shape[0], 1)
yhat = sigmoid(np.dot(weight.T, x_vector) + intercept)
for i in range(yhat.shape[1]):
if yhat[0][i] > 0.5:
y_prediction[0][i] = 1
else:
y_prediction[0][i] = 0
assert y_prediction.shape == (1, m)
return y_prediction
class Logistic(object):
"""
This class provides the flexibility to run
logistic regression to your data set
"""
def __init__(self, *args, **kwargs):
"""
Initializing the model parameter
:param args:
:param kwargs:
X_train,
Y_train,
X_test,
Y_test,
num_iterations = 2000,
learning_rate = 0.5
"""
self._x_train = kwargs['X_train']
self._y_train = kwargs['Y_train']
self._x_test = kwargs['X_test']
self._y_test = kwargs['Y_test']
self.num_iteration = kwargs['num_iteration']
self.learning_rate = kwargs['learning_rate']
def fit(self):
"""
function will fit the model with initialized parameter
:return:
costs,
y_prediction_test,
y_prediction_train,
weight,
intercept,
self.learning_rate,
self.num_iteration
"""
weight, intercept = initialize_with_zeros(self._x_train.shape[0])
parameters, grads, costs = optimize(weight, intercept, self.
_x_train, self._y_train, self.num_iteration, self.learning_rate)
weight = parameters['w']
intercept = parameters['b']
y_prediction_test = predict(weight, intercept, self._x_test)
y_prediction_train = predict(weight, intercept, self._x_train)
print('train accuracy: {} %'.format(100 - np.mean(np.abs(
y_prediction_train - self._y_train)) * 100))
print('test accuracy: {} %'.format(100 - np.mean(np.abs(
y_prediction_test - self._x_test)) * 100))
return {'costs': costs, 'Y_prediction_test': y_prediction_test,
'Y_prediction_train': y_prediction_train, 'w': weight, 'b':
intercept, 'learning_rate': self.learning_rate,
'num_iterations': self.num_iteration}
<|reserved_special_token_1|>
from function import *
from .propogation import optimize
from .initialize import initialize_with_zeros
def predict(weight, intercept, x_vector):
"""
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
"""
m = x_vector.shape[1]
y_prediction = np.zeros((1, m))
weight = weight.reshape(x_vector.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
yhat = sigmoid(np.dot(weight.T, x_vector) + intercept)
for i in range(yhat.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
if yhat[0][i] > 0.5:
y_prediction[0][i] = 1
else:
y_prediction[0][i] = 0
assert (y_prediction.shape == (1, m))
return y_prediction
class Logistic(object):
"""
This class provides the flexibility to run
logistic regression to your data set
"""
def __init__(self, *args, **kwargs):
"""
Initializing the model parameter
:param args:
:param kwargs:
X_train,
Y_train,
X_test,
Y_test,
num_iterations = 2000,
learning_rate = 0.5
"""
# Initializing the test & training set
self._x_train = kwargs['X_train']
self._y_train = kwargs['Y_train']
self._x_test = kwargs['X_test']
self._y_test = kwargs['Y_test']
self.num_iteration = kwargs['num_iteration']
self.learning_rate = kwargs['learning_rate']
def fit(self):
"""
function will fit the model with initialized parameter
:return:
costs,
y_prediction_test,
y_prediction_train,
weight,
intercept,
self.learning_rate,
self.num_iteration
"""
# initialize parameters with zeros (≈ 1 line of code)
weight, intercept = initialize_with_zeros(self._x_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(weight,
intercept,
self._x_train,
self._y_train,
self.num_iteration,
self.learning_rate
)
# Retrieve parameters w and b from dictionary "parameters"
weight = parameters["w"]
intercept = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
y_prediction_test = predict(weight, intercept, self._x_test)
y_prediction_train = predict(weight, intercept, self._x_train)
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(y_prediction_train - self._y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(y_prediction_test - self._x_test)) * 100))
return {"costs": costs,
"Y_prediction_test": y_prediction_test,
"Y_prediction_train": y_prediction_train,
"w": weight,
"b": intercept,
"learning_rate": self.learning_rate,
"num_iterations": self.num_iteration}
|
flexible
|
{
"blob_id": "63360ec9693a916375b49d0881008b1d7d4ec953",
"index": 4546,
"step-1": "<mask token>\n\n\nclass Logistic(object):\n <mask token>\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initializing the model parameter\n :param args:\n :param kwargs:\n X_train,\n Y_train,\n X_test,\n Y_test,\n num_iterations = 2000,\n learning_rate = 0.5\n \"\"\"\n self._x_train = kwargs['X_train']\n self._y_train = kwargs['Y_train']\n self._x_test = kwargs['X_test']\n self._y_test = kwargs['Y_test']\n self.num_iteration = kwargs['num_iteration']\n self.learning_rate = kwargs['learning_rate']\n\n def fit(self):\n \"\"\"\n function will fit the model with initialized parameter\n :return:\n costs,\n y_prediction_test,\n y_prediction_train,\n weight,\n intercept,\n self.learning_rate,\n self.num_iteration\n \"\"\"\n weight, intercept = initialize_with_zeros(self._x_train.shape[0])\n parameters, grads, costs = optimize(weight, intercept, self.\n _x_train, self._y_train, self.num_iteration, self.learning_rate)\n weight = parameters['w']\n intercept = parameters['b']\n y_prediction_test = predict(weight, intercept, self._x_test)\n y_prediction_train = predict(weight, intercept, self._x_train)\n print('train accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_train - self._y_train)) * 100))\n print('test accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_test - self._x_test)) * 100))\n return {'costs': costs, 'Y_prediction_test': y_prediction_test,\n 'Y_prediction_train': y_prediction_train, 'w': weight, 'b':\n intercept, 'learning_rate': self.learning_rate,\n 'num_iterations': self.num_iteration}\n",
"step-2": "<mask token>\n\n\nclass Logistic(object):\n \"\"\"\n This class provides the flexibility to run\n logistic regression to your data set\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initializing the model parameter\n :param args:\n :param kwargs:\n X_train,\n Y_train,\n X_test,\n Y_test,\n num_iterations = 2000,\n learning_rate = 0.5\n \"\"\"\n self._x_train = kwargs['X_train']\n self._y_train = kwargs['Y_train']\n self._x_test = kwargs['X_test']\n self._y_test = kwargs['Y_test']\n self.num_iteration = kwargs['num_iteration']\n self.learning_rate = kwargs['learning_rate']\n\n def fit(self):\n \"\"\"\n function will fit the model with initialized parameter\n :return:\n costs,\n y_prediction_test,\n y_prediction_train,\n weight,\n intercept,\n self.learning_rate,\n self.num_iteration\n \"\"\"\n weight, intercept = initialize_with_zeros(self._x_train.shape[0])\n parameters, grads, costs = optimize(weight, intercept, self.\n _x_train, self._y_train, self.num_iteration, self.learning_rate)\n weight = parameters['w']\n intercept = parameters['b']\n y_prediction_test = predict(weight, intercept, self._x_test)\n y_prediction_train = predict(weight, intercept, self._x_train)\n print('train accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_train - self._y_train)) * 100))\n print('test accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_test - self._x_test)) * 100))\n return {'costs': costs, 'Y_prediction_test': y_prediction_test,\n 'Y_prediction_train': y_prediction_train, 'w': weight, 'b':\n intercept, 'learning_rate': self.learning_rate,\n 'num_iterations': self.num_iteration}\n",
"step-3": "<mask token>\n\n\ndef predict(weight, intercept, x_vector):\n \"\"\"\n Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)\n\n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n\n Returns:\n Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X\n \"\"\"\n m = x_vector.shape[1]\n y_prediction = np.zeros((1, m))\n weight = weight.reshape(x_vector.shape[0], 1)\n yhat = sigmoid(np.dot(weight.T, x_vector) + intercept)\n for i in range(yhat.shape[1]):\n if yhat[0][i] > 0.5:\n y_prediction[0][i] = 1\n else:\n y_prediction[0][i] = 0\n assert y_prediction.shape == (1, m)\n return y_prediction\n\n\nclass Logistic(object):\n \"\"\"\n This class provides the flexibility to run\n logistic regression to your data set\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initializing the model parameter\n :param args:\n :param kwargs:\n X_train,\n Y_train,\n X_test,\n Y_test,\n num_iterations = 2000,\n learning_rate = 0.5\n \"\"\"\n self._x_train = kwargs['X_train']\n self._y_train = kwargs['Y_train']\n self._x_test = kwargs['X_test']\n self._y_test = kwargs['Y_test']\n self.num_iteration = kwargs['num_iteration']\n self.learning_rate = kwargs['learning_rate']\n\n def fit(self):\n \"\"\"\n function will fit the model with initialized parameter\n :return:\n costs,\n y_prediction_test,\n y_prediction_train,\n weight,\n intercept,\n self.learning_rate,\n self.num_iteration\n \"\"\"\n weight, intercept = initialize_with_zeros(self._x_train.shape[0])\n parameters, grads, costs = optimize(weight, intercept, self.\n _x_train, self._y_train, self.num_iteration, self.learning_rate)\n weight = parameters['w']\n intercept = parameters['b']\n y_prediction_test = predict(weight, intercept, self._x_test)\n y_prediction_train = predict(weight, intercept, self._x_train)\n print('train accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_train - self._y_train)) * 100))\n print('test accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_test - self._x_test)) * 100))\n return {'costs': costs, 'Y_prediction_test': y_prediction_test,\n 'Y_prediction_train': y_prediction_train, 'w': weight, 'b':\n intercept, 'learning_rate': self.learning_rate,\n 'num_iterations': self.num_iteration}\n",
"step-4": "from function import *\nfrom .propogation import optimize\nfrom .initialize import initialize_with_zeros\n\n\ndef predict(weight, intercept, x_vector):\n \"\"\"\n Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)\n\n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n\n Returns:\n Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X\n \"\"\"\n m = x_vector.shape[1]\n y_prediction = np.zeros((1, m))\n weight = weight.reshape(x_vector.shape[0], 1)\n yhat = sigmoid(np.dot(weight.T, x_vector) + intercept)\n for i in range(yhat.shape[1]):\n if yhat[0][i] > 0.5:\n y_prediction[0][i] = 1\n else:\n y_prediction[0][i] = 0\n assert y_prediction.shape == (1, m)\n return y_prediction\n\n\nclass Logistic(object):\n \"\"\"\n This class provides the flexibility to run\n logistic regression to your data set\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initializing the model parameter\n :param args:\n :param kwargs:\n X_train,\n Y_train,\n X_test,\n Y_test,\n num_iterations = 2000,\n learning_rate = 0.5\n \"\"\"\n self._x_train = kwargs['X_train']\n self._y_train = kwargs['Y_train']\n self._x_test = kwargs['X_test']\n self._y_test = kwargs['Y_test']\n self.num_iteration = kwargs['num_iteration']\n self.learning_rate = kwargs['learning_rate']\n\n def fit(self):\n \"\"\"\n function will fit the model with initialized parameter\n :return:\n costs,\n y_prediction_test,\n y_prediction_train,\n weight,\n intercept,\n self.learning_rate,\n self.num_iteration\n \"\"\"\n weight, intercept = initialize_with_zeros(self._x_train.shape[0])\n parameters, grads, costs = optimize(weight, intercept, self.\n _x_train, self._y_train, self.num_iteration, self.learning_rate)\n weight = parameters['w']\n intercept = parameters['b']\n y_prediction_test = predict(weight, intercept, self._x_test)\n y_prediction_train = predict(weight, intercept, self._x_train)\n print('train accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_train - self._y_train)) * 100))\n print('test accuracy: {} %'.format(100 - np.mean(np.abs(\n y_prediction_test - self._x_test)) * 100))\n return {'costs': costs, 'Y_prediction_test': y_prediction_test,\n 'Y_prediction_train': y_prediction_train, 'w': weight, 'b':\n intercept, 'learning_rate': self.learning_rate,\n 'num_iterations': self.num_iteration}\n",
"step-5": "from function import *\nfrom .propogation import optimize\nfrom .initialize import initialize_with_zeros\n\n\ndef predict(weight, intercept, x_vector):\n \"\"\"\n Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)\n\n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n\n Returns:\n Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X\n \"\"\"\n\n m = x_vector.shape[1]\n y_prediction = np.zeros((1, m))\n weight = weight.reshape(x_vector.shape[0], 1)\n\n # Compute vector \"A\" predicting the probabilities of a cat being present in the picture\n yhat = sigmoid(np.dot(weight.T, x_vector) + intercept)\n for i in range(yhat.shape[1]):\n\n # Convert probabilities A[0,i] to actual predictions p[0,i]\n if yhat[0][i] > 0.5:\n y_prediction[0][i] = 1\n else:\n y_prediction[0][i] = 0\n\n assert (y_prediction.shape == (1, m))\n\n return y_prediction\n\n\nclass Logistic(object):\n \"\"\"\n This class provides the flexibility to run\n logistic regression to your data set\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initializing the model parameter\n :param args:\n :param kwargs:\n X_train,\n Y_train,\n X_test,\n Y_test,\n num_iterations = 2000,\n learning_rate = 0.5\n \"\"\"\n # Initializing the test & training set\n self._x_train = kwargs['X_train']\n self._y_train = kwargs['Y_train']\n self._x_test = kwargs['X_test']\n self._y_test = kwargs['Y_test']\n\n self.num_iteration = kwargs['num_iteration']\n self.learning_rate = kwargs['learning_rate']\n\n def fit(self):\n \"\"\"\n function will fit the model with initialized parameter\n :return:\n costs,\n y_prediction_test,\n y_prediction_train,\n weight,\n intercept,\n self.learning_rate,\n self.num_iteration\n \"\"\"\n # initialize parameters with zeros (≈ 1 line of code)\n weight, intercept = initialize_with_zeros(self._x_train.shape[0])\n\n # Gradient descent (≈ 1 line of code)\n parameters, grads, costs = optimize(weight,\n intercept,\n self._x_train,\n self._y_train,\n self.num_iteration,\n self.learning_rate\n )\n\n # Retrieve parameters w and b from dictionary \"parameters\"\n weight = parameters[\"w\"]\n intercept = parameters[\"b\"]\n\n # Predict test/train set examples (≈ 2 lines of code)\n y_prediction_test = predict(weight, intercept, self._x_test)\n y_prediction_train = predict(weight, intercept, self._x_train)\n\n # Print train/test Errors\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(y_prediction_train - self._y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(y_prediction_test - self._x_test)) * 100))\n\n return {\"costs\": costs,\n \"Y_prediction_test\": y_prediction_test,\n \"Y_prediction_train\": y_prediction_train,\n \"w\": weight,\n \"b\": intercept,\n \"learning_rate\": self.learning_rate,\n \"num_iterations\": self.num_iteration}\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from PenaltyTracker import PenaltyTracker
from DatabaseManager import DatabaseManager
import unittest,os,sys,shutil, filecmp
class TestingPenaltyTracker(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.testPTDatabase = os.path.join( os.getcwd(), "Tests", "test_penalty.db")
cls.testPenaltyTracker = PenaltyTracker()
cls.testPenaltyTracker.setDatabaseLocation(cls.testPTDatabase)
cls.testPenaltyTracker.setSeason("PenaltyTracker")
cls.testPenaltyTracker.createAndSetDatabaseManager()
controlPath = os.path.join(os.getcwd(), "Tests", "season_test_10-24-16.db")
cls.controlDatabase = DatabaseManager(controlPath, "PenaltyTracker")
@classmethod
def tearDownClass(cls):
cls.testPenaltyTracker = None
cls.controlDatabase = None
os.remove( os.path.join( os.getcwd(), "Tests", "test_penalty.db") )
def testGameUrls(self):
self.testPenaltyTracker.setTargetDate("2016-02-26")
numberOfGames = len( self.testPenaltyTracker.GetGameURLS() )
self.assertEqual( numberOfGames, 5 )
def testSetDBLocation(self):
self.assertNotEqual(self.testPenaltyTracker.databaseManager, None )
def testPenaltyProcessing(self):
# generate the test data
self.testPenaltyTracker.setTargetDate("2016-10-24")
self.testPenaltyTracker.run();
self.assertEqual( self.controlDatabase.getHighestID(), self.testPenaltyTracker.databaseManager.getHighestID() )
getAllCommand = "SELECT * FROM PenaltyTracker"
controlRows = self.controlDatabase.getData(getAllCommand)
testRows = self.testPenaltyTracker.databaseManager.getData(getAllCommand)
self.assertEqual(controlRows, testRows)
|
normal
|
{
"blob_id": "607d8bc79caa9d767bdb7e77a5db52295d90236f",
"index": 1759,
"step-1": "<mask token>\n\n\nclass TestingPenaltyTracker(unittest.TestCase):\n <mask token>\n\n @classmethod\n def tearDownClass(cls):\n cls.testPenaltyTracker = None\n cls.controlDatabase = None\n os.remove(os.path.join(os.getcwd(), 'Tests', 'test_penalty.db'))\n <mask token>\n <mask token>\n\n def testPenaltyProcessing(self):\n self.testPenaltyTracker.setTargetDate('2016-10-24')\n self.testPenaltyTracker.run()\n self.assertEqual(self.controlDatabase.getHighestID(), self.\n testPenaltyTracker.databaseManager.getHighestID())\n getAllCommand = 'SELECT * FROM PenaltyTracker'\n controlRows = self.controlDatabase.getData(getAllCommand)\n testRows = self.testPenaltyTracker.databaseManager.getData(\n getAllCommand)\n self.assertEqual(controlRows, testRows)\n",
"step-2": "<mask token>\n\n\nclass TestingPenaltyTracker(unittest.TestCase):\n <mask token>\n\n @classmethod\n def tearDownClass(cls):\n cls.testPenaltyTracker = None\n cls.controlDatabase = None\n os.remove(os.path.join(os.getcwd(), 'Tests', 'test_penalty.db'))\n\n def testGameUrls(self):\n self.testPenaltyTracker.setTargetDate('2016-02-26')\n numberOfGames = len(self.testPenaltyTracker.GetGameURLS())\n self.assertEqual(numberOfGames, 5)\n\n def testSetDBLocation(self):\n self.assertNotEqual(self.testPenaltyTracker.databaseManager, None)\n\n def testPenaltyProcessing(self):\n self.testPenaltyTracker.setTargetDate('2016-10-24')\n self.testPenaltyTracker.run()\n self.assertEqual(self.controlDatabase.getHighestID(), self.\n testPenaltyTracker.databaseManager.getHighestID())\n getAllCommand = 'SELECT * FROM PenaltyTracker'\n controlRows = self.controlDatabase.getData(getAllCommand)\n testRows = self.testPenaltyTracker.databaseManager.getData(\n getAllCommand)\n self.assertEqual(controlRows, testRows)\n",
"step-3": "<mask token>\n\n\nclass TestingPenaltyTracker(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.testPTDatabase = os.path.join(os.getcwd(), 'Tests',\n 'test_penalty.db')\n cls.testPenaltyTracker = PenaltyTracker()\n cls.testPenaltyTracker.setDatabaseLocation(cls.testPTDatabase)\n cls.testPenaltyTracker.setSeason('PenaltyTracker')\n cls.testPenaltyTracker.createAndSetDatabaseManager()\n controlPath = os.path.join(os.getcwd(), 'Tests',\n 'season_test_10-24-16.db')\n cls.controlDatabase = DatabaseManager(controlPath, 'PenaltyTracker')\n\n @classmethod\n def tearDownClass(cls):\n cls.testPenaltyTracker = None\n cls.controlDatabase = None\n os.remove(os.path.join(os.getcwd(), 'Tests', 'test_penalty.db'))\n\n def testGameUrls(self):\n self.testPenaltyTracker.setTargetDate('2016-02-26')\n numberOfGames = len(self.testPenaltyTracker.GetGameURLS())\n self.assertEqual(numberOfGames, 5)\n\n def testSetDBLocation(self):\n self.assertNotEqual(self.testPenaltyTracker.databaseManager, None)\n\n def testPenaltyProcessing(self):\n self.testPenaltyTracker.setTargetDate('2016-10-24')\n self.testPenaltyTracker.run()\n self.assertEqual(self.controlDatabase.getHighestID(), self.\n testPenaltyTracker.databaseManager.getHighestID())\n getAllCommand = 'SELECT * FROM PenaltyTracker'\n controlRows = self.controlDatabase.getData(getAllCommand)\n testRows = self.testPenaltyTracker.databaseManager.getData(\n getAllCommand)\n self.assertEqual(controlRows, testRows)\n",
"step-4": "from PenaltyTracker import PenaltyTracker\nfrom DatabaseManager import DatabaseManager\nimport unittest, os, sys, shutil, filecmp\n\n\nclass TestingPenaltyTracker(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.testPTDatabase = os.path.join(os.getcwd(), 'Tests',\n 'test_penalty.db')\n cls.testPenaltyTracker = PenaltyTracker()\n cls.testPenaltyTracker.setDatabaseLocation(cls.testPTDatabase)\n cls.testPenaltyTracker.setSeason('PenaltyTracker')\n cls.testPenaltyTracker.createAndSetDatabaseManager()\n controlPath = os.path.join(os.getcwd(), 'Tests',\n 'season_test_10-24-16.db')\n cls.controlDatabase = DatabaseManager(controlPath, 'PenaltyTracker')\n\n @classmethod\n def tearDownClass(cls):\n cls.testPenaltyTracker = None\n cls.controlDatabase = None\n os.remove(os.path.join(os.getcwd(), 'Tests', 'test_penalty.db'))\n\n def testGameUrls(self):\n self.testPenaltyTracker.setTargetDate('2016-02-26')\n numberOfGames = len(self.testPenaltyTracker.GetGameURLS())\n self.assertEqual(numberOfGames, 5)\n\n def testSetDBLocation(self):\n self.assertNotEqual(self.testPenaltyTracker.databaseManager, None)\n\n def testPenaltyProcessing(self):\n self.testPenaltyTracker.setTargetDate('2016-10-24')\n self.testPenaltyTracker.run()\n self.assertEqual(self.controlDatabase.getHighestID(), self.\n testPenaltyTracker.databaseManager.getHighestID())\n getAllCommand = 'SELECT * FROM PenaltyTracker'\n controlRows = self.controlDatabase.getData(getAllCommand)\n testRows = self.testPenaltyTracker.databaseManager.getData(\n getAllCommand)\n self.assertEqual(controlRows, testRows)\n",
"step-5": "from PenaltyTracker import PenaltyTracker\nfrom DatabaseManager import DatabaseManager\nimport unittest,os,sys,shutil, filecmp\n\nclass TestingPenaltyTracker(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.testPTDatabase = os.path.join( os.getcwd(), \"Tests\", \"test_penalty.db\")\n cls.testPenaltyTracker = PenaltyTracker()\n cls.testPenaltyTracker.setDatabaseLocation(cls.testPTDatabase)\n cls.testPenaltyTracker.setSeason(\"PenaltyTracker\")\n cls.testPenaltyTracker.createAndSetDatabaseManager()\n\n controlPath = os.path.join(os.getcwd(), \"Tests\", \"season_test_10-24-16.db\")\n cls.controlDatabase = DatabaseManager(controlPath, \"PenaltyTracker\")\n\n @classmethod\n def tearDownClass(cls):\n cls.testPenaltyTracker = None\n cls.controlDatabase = None\n os.remove( os.path.join( os.getcwd(), \"Tests\", \"test_penalty.db\") )\n\n def testGameUrls(self):\n self.testPenaltyTracker.setTargetDate(\"2016-02-26\")\n numberOfGames = len( self.testPenaltyTracker.GetGameURLS() )\n self.assertEqual( numberOfGames, 5 )\n\n def testSetDBLocation(self):\n self.assertNotEqual(self.testPenaltyTracker.databaseManager, None )\n\n def testPenaltyProcessing(self):\n # generate the test data\n self.testPenaltyTracker.setTargetDate(\"2016-10-24\") \n self.testPenaltyTracker.run();\n\n self.assertEqual( self.controlDatabase.getHighestID(), self.testPenaltyTracker.databaseManager.getHighestID() )\n \n getAllCommand = \"SELECT * FROM PenaltyTracker\"\n controlRows = self.controlDatabase.getData(getAllCommand)\n testRows = self.testPenaltyTracker.databaseManager.getData(getAllCommand)\n self.assertEqual(controlRows, testRows)\n\n\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for lab in labels:
print(lab)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
labels = np.load('DataVariationOther/w1_s500/targetTestNP.npy')
for lab in labels:
print(lab)
<|reserved_special_token_1|>
import numpy as np
labels = np.load('DataVariationOther/w1_s500/targetTestNP.npy')
for lab in labels:
print(lab)
|
flexible
|
{
"blob_id": "a83988e936d9dee4838db61c8eb8ec108f5ecd3f",
"index": 4669,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor lab in labels:\n print(lab)\n",
"step-3": "<mask token>\nlabels = np.load('DataVariationOther/w1_s500/targetTestNP.npy')\nfor lab in labels:\n print(lab)\n",
"step-4": "import numpy as np\nlabels = np.load('DataVariationOther/w1_s500/targetTestNP.npy')\nfor lab in labels:\n print(lab)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#------------------------------------------------------------------------------
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
import wx
from .wx_control import WXControl
from ...components.image_view import AbstractTkImageView
class wxBitmapWidget(wx.Panel):
""" A wx.Panel subclass which paints a provided wx.Bitmap.
This differs from wx.StaticBitmap in that it provides the option to
scale the provided bitmap to the bounds of the widget. If the widget
is set to scale its contents, low quality scaling will occur during
resize, with a high quality pass performed once resizing as finished.
"""
def __init__(self, parent):
""" Initialize a wxBitmapWidget.
Parameters
----------
parent : wx.Window
The wx.Window object which serves as the widget parent.
"""
super(wxBitmapWidget, self).__init__(parent)
self._bitmap = None
self._scaled_contents = False
self._preserve_aspect_ratio = False
self._allow_upscaling = False
self._resize_timer = None
self._resizing = False
self.Bind(wx.EVT_PAINT, self.OnPaint)
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def OnPaint(self, event):
""" The paint event handler for the widget.
"""
bmp = self._bitmap
if bmp is None:
return
bmp_width, bmp_height = bmp.GetWidth(), bmp.GetHeight()
if bmp_width == 0 or bmp_height == 0:
return
evt_x = 0
evt_y = 0
evt_width, evt_height = self.GetSize().asTuple()
if not self._scaled_contents:
# If the image isn't scaled, it is centered if possible.
# Otherwise, it's painted at the origin and clipped.
paint_x = max(0, int((evt_width / 2. - bmp_width / 2.) + evt_x))
paint_y = max(0, int((evt_height / 2. - bmp_height / 2.) + evt_y))
paint_width = bmp_width
paint_height = bmp_height
else:
# If the image *is* scaled, it's scaled size depends on the
# size of the paint area as well as the other scaling flags.
if self._preserve_aspect_ratio:
bmp_ratio = float(bmp_width) / bmp_height
evt_ratio = float(evt_width) / evt_height
if evt_ratio >= bmp_ratio:
if self._allow_upscaling:
paint_height = evt_height
else:
paint_height = min(bmp_height, evt_height)
paint_width = int(paint_height * bmp_ratio)
else:
if self._allow_upscaling:
paint_width = evt_width
else:
paint_width = min(bmp_width, evt_width)
paint_height = int(paint_width / bmp_ratio)
else:
if self._allow_upscaling:
paint_height = evt_height
paint_width = evt_width
else:
paint_height = min(bmp_height, evt_height)
paint_width = min(bmp_width, evt_width)
# In all cases of scaling, we know that the scaled image is
# no larger than the paint area, and can thus be centered.
paint_x = int((evt_width / 2. - paint_width / 2.) + evt_x)
paint_y = int((evt_height / 2. - paint_height / 2.) + evt_y)
# Scale the bitmap if needed, using a faster method if the
# image is currently being resized
if paint_width != bmp_width or paint_height != bmp_height:
img = bmp.ConvertToImage()
if self._resizing:
quality = wx.IMAGE_QUALITY_NORMAL
else:
quality = wx.IMAGE_QUALITY_HIGH
img.Rescale(paint_width, paint_height, quality)
bmp = wx.BitmapFromImage(img)
# Finally, draw the bitmap into the computed location
dc = wx.PaintDC(self)
dc.DrawBitmap(bmp, paint_x, paint_y)
def OnResize(self, event):
""" The resize event handler for the widget.
This method is only bound and called when content scaling is
enabled. It starts(restarts) a timer to perform a high quality
scaled repaint when resizing is finished.
"""
self._resizing = True
self._resize_timer.Start(60, True)
def OnResizeEnd(self, event):
""" The repaint timer event handler.
This method is only bound and called when content scaling is
enabled and resizing has completed. It triggers a high quality
repaint.
"""
self._resizing = False
self.Refresh()
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def GetBestSize(self):
""" Overridden method to return the size of the bitmap as the
best size for the widget.
"""
bmp = self._bitmap
return wx.Size(bmp.GetWidth(), bmp.GetHeight())
def GetBestSizeTuple(self):
""" Overridden method to return the size of the bitmap as the
best size for the widget.
"""
return self.GetBestSize().asTuple()
def GetBitmap(self, bitmap):
""" Get the underlying wx.Bitmap used to paint the control.
Returns
-------
result : wx.Bitmap or None
The bitmap being used to paint the control, or None if
no bitmap has been supplied.
"""
return self._bitmap
def SetBitmap(self, bitmap):
""" Set the underlying wx.Bitmap and refresh the widget.
Parameters
----------
bitmap : wx.Bitmap
The bitmap to paint on the widget.
"""
self._bitmap = bitmap
self.Refresh()
def GetScaledContents(self):
""" Whether or not the bitmap is scaled to fit the bounds.
Returns
-------
result : bool
Whether or not the bitmap is scaled to fit the bounds of
the widget.
"""
return self._scaled_contents
def SetScaledContents(self, scaled):
""" Set whether or not the bitmap should be scaled to fit the
bounds of the widget.
Parameters
----------
scaled : bool
Whether or not to scale the bitmap to fit the bounds of the
widget.
"""
if scaled:
if not self._scaled_contents:
self._scaled_contents = True
self._resize_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnResizeEnd)
self.Bind(wx.EVT_SIZE, self.OnResize)
else:
if self._scaled_contents:
self._scaled_contents = False
self._timer = None
self.Unbind(wx.EVT_TIMER, handler=self.OnResizeEnd)
self.Unbind(wx.EVT_SIZE, handler=self.OnResize)
self.Refresh()
def GetPreserveAspectRatio(self):
""" Returns whether or not the aspect ratio of the image is
maintained during a resize.
"""
return self._preserve_aspect_ratio
def SetPreserveAspectRatio(self, preserve):
""" Set whether or not to preserve the image aspect ratio.
Parameters
----------
preserve : bool
If True then the aspect ratio of the image will be preserved
if it is scaled to fit. Otherwise, the aspect ratio will be
ignored.
"""
self._preserve_aspect_ratio = preserve
self.Refresh()
def GetAllowUpscaling(self):
""" Returns whether or not the image can be scaled greater than
its natural size.
"""
return self._allow_upscaling
def SetAllowUpscaling(self, allow):
""" Set whether or not to allow the image to be scaled beyond
its natural size.
Parameters
----------
allow : bool
If True, then the image may be scaled larger than its
natural if it is scaled to fit. If False, the image will
never be scaled larger than its natural size. In either
case, the image may be scaled smaller.
"""
self._allow_upscaling = allow
self.Refresh()
class WXImageView(WXControl, AbstractTkImageView):
""" A Wx implementation of ImageView.
"""
#: The internal cached size hint which is used to determine whether
#: of not a size hint updated event should be emitted when the text
#: in the label changes
_cached_size_hint = None
#--------------------------------------------------------------------------
# Setup methods
#--------------------------------------------------------------------------
def create(self, parent):
""" Creates the underlying wxBitmapWidget control.
"""
self.widget = wxBitmapWidget(parent)
def initialize(self):
""" Initializes the attributes on the underlying control.
"""
super(WXImageView, self).initialize()
shell = self.shell_obj
self.set_image(shell.image)
self.set_scale_to_fit(shell.scale_to_fit)
self.set_preserve_aspect_ratio(shell.preserve_aspect_ratio)
self.set_allow_upscaling(shell.allow_upscaling)
#--------------------------------------------------------------------------
# Implementation
#--------------------------------------------------------------------------
def shell_image_changed(self, image):
""" The change handler for the 'image' attribute on the shell
component.
"""
self.set_image(image)
def shell_scale_to_fit_changed(self, scale_to_fit):
""" The change handler for the 'scale_to_fit' attribute on the
shell component.
"""
self.set_scale_to_fit(scale_to_fit)
def shell_preserve_aspect_ratio_changed(self, preserve):
""" The change handler for the 'preserve_aspect_ratio' attribute
on the shell component.
"""
self.set_preserve_aspect_ratio(preserve)
def shell_allow_upscaling_changed(self, allow):
""" The change handler for the 'allow_upscaling' attribute on
the shell component.
"""
self.set_allow_upscaling(allow)
#--------------------------------------------------------------------------
# Widget Update Methods
#--------------------------------------------------------------------------
def set_image(self, image):
""" Sets the image on the underlying wxBitmapWidget.
"""
bmp = image.as_wxBitmap() if image is not None else None
self.widget.SetBitmap(bmp)
# Emit a size hint updated event if the size hint has actually
# changed. This is an optimization so that a constraints update
# only occurs when the size hint has actually changed. This
# logic must be implemented here so that the label has been
# updated before the new size hint is computed. Placing this
# logic on the shell object would not guarantee that the label
# has been updated at the time the change handler is called.
cached = self._cached_size_hint
hint = self._cached_size_hint = self.size_hint()
if cached != hint:
self.shell_obj.size_hint_updated()
def set_scale_to_fit(self, scale_to_fit):
""" Sets whether or not the image scales with the underlying
control.
"""
self.widget.SetScaledContents(scale_to_fit)
def set_preserve_aspect_ratio(self, preserve):
""" Sets whether or not to preserve the aspect ratio of the
image when scaling.
"""
self.widget.SetPreserveAspectRatio(preserve)
def set_allow_upscaling(self, allow):
""" Sets whether or not the image will scale beyond its natural
size.
"""
self.widget.SetAllowUpscaling(allow)
|
normal
|
{
"blob_id": "d4198c2c3706e03ba1bce3e31c5139f01248a184",
"index": 5161,
"step-1": "<mask token>\n\n\nclass wxBitmapWidget(wx.Panel):\n <mask token>\n\n def __init__(self, parent):\n \"\"\" Initialize a wxBitmapWidget.\n\n Parameters\n ----------\n parent : wx.Window\n The wx.Window object which serves as the widget parent.\n \n \"\"\"\n super(wxBitmapWidget, self).__init__(parent)\n self._bitmap = None\n self._scaled_contents = False\n self._preserve_aspect_ratio = False\n self._allow_upscaling = False\n self._resize_timer = None\n self._resizing = False\n self.Bind(wx.EVT_PAINT, self.OnPaint)\n\n def OnPaint(self, event):\n \"\"\" The paint event handler for the widget.\n\n \"\"\"\n bmp = self._bitmap\n if bmp is None:\n return\n bmp_width, bmp_height = bmp.GetWidth(), bmp.GetHeight()\n if bmp_width == 0 or bmp_height == 0:\n return\n evt_x = 0\n evt_y = 0\n evt_width, evt_height = self.GetSize().asTuple()\n if not self._scaled_contents:\n paint_x = max(0, int(evt_width / 2.0 - bmp_width / 2.0 + evt_x))\n paint_y = max(0, int(evt_height / 2.0 - bmp_height / 2.0 + evt_y))\n paint_width = bmp_width\n paint_height = bmp_height\n else:\n if self._preserve_aspect_ratio:\n bmp_ratio = float(bmp_width) / bmp_height\n evt_ratio = float(evt_width) / evt_height\n if evt_ratio >= bmp_ratio:\n if self._allow_upscaling:\n paint_height = evt_height\n else:\n paint_height = min(bmp_height, evt_height)\n paint_width = int(paint_height * bmp_ratio)\n else:\n if self._allow_upscaling:\n paint_width = evt_width\n else:\n paint_width = min(bmp_width, evt_width)\n paint_height = int(paint_width / bmp_ratio)\n elif self._allow_upscaling:\n paint_height = evt_height\n paint_width = evt_width\n else:\n paint_height = min(bmp_height, evt_height)\n paint_width = min(bmp_width, evt_width)\n paint_x = int(evt_width / 2.0 - paint_width / 2.0 + evt_x)\n paint_y = int(evt_height / 2.0 - paint_height / 2.0 + evt_y)\n if paint_width != bmp_width or paint_height != bmp_height:\n img = bmp.ConvertToImage()\n if self._resizing:\n quality = wx.IMAGE_QUALITY_NORMAL\n else:\n quality = wx.IMAGE_QUALITY_HIGH\n img.Rescale(paint_width, paint_height, quality)\n bmp = wx.BitmapFromImage(img)\n dc = wx.PaintDC(self)\n dc.DrawBitmap(bmp, paint_x, paint_y)\n\n def OnResize(self, event):\n \"\"\" The resize event handler for the widget.\n\n This method is only bound and called when content scaling is\n enabled. It starts(restarts) a timer to perform a high quality\n scaled repaint when resizing is finished.\n\n \"\"\"\n self._resizing = True\n self._resize_timer.Start(60, True)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def SetAllowUpscaling(self, allow):\n \"\"\" Set whether or not to allow the image to be scaled beyond\n its natural size.\n\n Parameters\n ----------\n allow : bool\n If True, then the image may be scaled larger than its \n natural if it is scaled to fit. If False, the image will\n never be scaled larger than its natural size. In either\n case, the image may be scaled smaller.\n\n \"\"\"\n self._allow_upscaling = allow\n self.Refresh()\n\n\nclass WXImageView(WXControl, AbstractTkImageView):\n \"\"\" A Wx implementation of ImageView.\n\n \"\"\"\n _cached_size_hint = None\n\n def create(self, parent):\n \"\"\" Creates the underlying wxBitmapWidget control.\n\n \"\"\"\n self.widget = wxBitmapWidget(parent)\n\n def initialize(self):\n \"\"\" Initializes the attributes on the underlying control.\n\n \"\"\"\n super(WXImageView, self).initialize()\n shell = self.shell_obj\n self.set_image(shell.image)\n self.set_scale_to_fit(shell.scale_to_fit)\n self.set_preserve_aspect_ratio(shell.preserve_aspect_ratio)\n self.set_allow_upscaling(shell.allow_upscaling)\n\n def shell_image_changed(self, image):\n \"\"\" The change handler for the 'image' attribute on the shell \n component.\n\n \"\"\"\n self.set_image(image)\n\n def shell_scale_to_fit_changed(self, scale_to_fit):\n \"\"\" The change handler for the 'scale_to_fit' attribute on the \n shell component.\n\n \"\"\"\n self.set_scale_to_fit(scale_to_fit)\n\n def shell_preserve_aspect_ratio_changed(self, preserve):\n \"\"\" The change handler for the 'preserve_aspect_ratio' attribute\n on the shell component.\n\n \"\"\"\n self.set_preserve_aspect_ratio(preserve)\n\n def shell_allow_upscaling_changed(self, allow):\n \"\"\" The change handler for the 'allow_upscaling' attribute on \n the shell component.\n\n \"\"\"\n self.set_allow_upscaling(allow)\n\n def set_image(self, image):\n \"\"\" Sets the image on the underlying wxBitmapWidget.\n\n \"\"\"\n bmp = image.as_wxBitmap() if image is not None else None\n self.widget.SetBitmap(bmp)\n cached = self._cached_size_hint\n hint = self._cached_size_hint = self.size_hint()\n if cached != hint:\n self.shell_obj.size_hint_updated()\n\n def set_scale_to_fit(self, scale_to_fit):\n \"\"\" Sets whether or not the image scales with the underlying \n control.\n\n \"\"\"\n self.widget.SetScaledContents(scale_to_fit)\n\n def set_preserve_aspect_ratio(self, preserve):\n \"\"\" Sets whether or not to preserve the aspect ratio of the \n image when scaling.\n\n \"\"\"\n self.widget.SetPreserveAspectRatio(preserve)\n\n def set_allow_upscaling(self, allow):\n \"\"\" Sets whether or not the image will scale beyond its natural\n size.\n\n \"\"\"\n self.widget.SetAllowUpscaling(allow)\n",
"step-2": "<mask token>\n\n\nclass wxBitmapWidget(wx.Panel):\n <mask token>\n\n def __init__(self, parent):\n \"\"\" Initialize a wxBitmapWidget.\n\n Parameters\n ----------\n parent : wx.Window\n The wx.Window object which serves as the widget parent.\n \n \"\"\"\n super(wxBitmapWidget, self).__init__(parent)\n self._bitmap = None\n self._scaled_contents = False\n self._preserve_aspect_ratio = False\n self._allow_upscaling = False\n self._resize_timer = None\n self._resizing = False\n self.Bind(wx.EVT_PAINT, self.OnPaint)\n\n def OnPaint(self, event):\n \"\"\" The paint event handler for the widget.\n\n \"\"\"\n bmp = self._bitmap\n if bmp is None:\n return\n bmp_width, bmp_height = bmp.GetWidth(), bmp.GetHeight()\n if bmp_width == 0 or bmp_height == 0:\n return\n evt_x = 0\n evt_y = 0\n evt_width, evt_height = self.GetSize().asTuple()\n if not self._scaled_contents:\n paint_x = max(0, int(evt_width / 2.0 - bmp_width / 2.0 + evt_x))\n paint_y = max(0, int(evt_height / 2.0 - bmp_height / 2.0 + evt_y))\n paint_width = bmp_width\n paint_height = bmp_height\n else:\n if self._preserve_aspect_ratio:\n bmp_ratio = float(bmp_width) / bmp_height\n evt_ratio = float(evt_width) / evt_height\n if evt_ratio >= bmp_ratio:\n if self._allow_upscaling:\n paint_height = evt_height\n else:\n paint_height = min(bmp_height, evt_height)\n paint_width = int(paint_height * bmp_ratio)\n else:\n if self._allow_upscaling:\n paint_width = evt_width\n else:\n paint_width = min(bmp_width, evt_width)\n paint_height = int(paint_width / bmp_ratio)\n elif self._allow_upscaling:\n paint_height = evt_height\n paint_width = evt_width\n else:\n paint_height = min(bmp_height, evt_height)\n paint_width = min(bmp_width, evt_width)\n paint_x = int(evt_width / 2.0 - paint_width / 2.0 + evt_x)\n paint_y = int(evt_height / 2.0 - paint_height / 2.0 + evt_y)\n if paint_width != bmp_width or paint_height != bmp_height:\n img = bmp.ConvertToImage()\n if self._resizing:\n quality = wx.IMAGE_QUALITY_NORMAL\n else:\n quality = wx.IMAGE_QUALITY_HIGH\n img.Rescale(paint_width, paint_height, quality)\n bmp = wx.BitmapFromImage(img)\n dc = wx.PaintDC(self)\n dc.DrawBitmap(bmp, paint_x, paint_y)\n\n def OnResize(self, event):\n \"\"\" The resize event handler for the widget.\n\n This method is only bound and called when content scaling is\n enabled. It starts(restarts) a timer to perform a high quality\n scaled repaint when resizing is finished.\n\n \"\"\"\n self._resizing = True\n self._resize_timer.Start(60, True)\n <mask token>\n <mask token>\n <mask token>\n\n def GetBitmap(self, bitmap):\n \"\"\" Get the underlying wx.Bitmap used to paint the control.\n\n Returns\n -------\n result : wx.Bitmap or None\n The bitmap being used to paint the control, or None if\n no bitmap has been supplied.\n\n \"\"\"\n return self._bitmap\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def SetPreserveAspectRatio(self, preserve):\n \"\"\" Set whether or not to preserve the image aspect ratio.\n\n Parameters\n ----------\n preserve : bool\n If True then the aspect ratio of the image will be preserved\n if it is scaled to fit. Otherwise, the aspect ratio will be\n ignored.\n\n \"\"\"\n self._preserve_aspect_ratio = preserve\n self.Refresh()\n <mask token>\n\n def SetAllowUpscaling(self, allow):\n \"\"\" Set whether or not to allow the image to be scaled beyond\n its natural size.\n\n Parameters\n ----------\n allow : bool\n If True, then the image may be scaled larger than its \n natural if it is scaled to fit. If False, the image will\n never be scaled larger than its natural size. In either\n case, the image may be scaled smaller.\n\n \"\"\"\n self._allow_upscaling = allow\n self.Refresh()\n\n\nclass WXImageView(WXControl, AbstractTkImageView):\n \"\"\" A Wx implementation of ImageView.\n\n \"\"\"\n _cached_size_hint = None\n\n def create(self, parent):\n \"\"\" Creates the underlying wxBitmapWidget control.\n\n \"\"\"\n self.widget = wxBitmapWidget(parent)\n\n def initialize(self):\n \"\"\" Initializes the attributes on the underlying control.\n\n \"\"\"\n super(WXImageView, self).initialize()\n shell = self.shell_obj\n self.set_image(shell.image)\n self.set_scale_to_fit(shell.scale_to_fit)\n self.set_preserve_aspect_ratio(shell.preserve_aspect_ratio)\n self.set_allow_upscaling(shell.allow_upscaling)\n\n def shell_image_changed(self, image):\n \"\"\" The change handler for the 'image' attribute on the shell \n component.\n\n \"\"\"\n self.set_image(image)\n\n def shell_scale_to_fit_changed(self, scale_to_fit):\n \"\"\" The change handler for the 'scale_to_fit' attribute on the \n shell component.\n\n \"\"\"\n self.set_scale_to_fit(scale_to_fit)\n\n def shell_preserve_aspect_ratio_changed(self, preserve):\n \"\"\" The change handler for the 'preserve_aspect_ratio' attribute\n on the shell component.\n\n \"\"\"\n self.set_preserve_aspect_ratio(preserve)\n\n def shell_allow_upscaling_changed(self, allow):\n \"\"\" The change handler for the 'allow_upscaling' attribute on \n the shell component.\n\n \"\"\"\n self.set_allow_upscaling(allow)\n\n def set_image(self, image):\n \"\"\" Sets the image on the underlying wxBitmapWidget.\n\n \"\"\"\n bmp = image.as_wxBitmap() if image is not None else None\n self.widget.SetBitmap(bmp)\n cached = self._cached_size_hint\n hint = self._cached_size_hint = self.size_hint()\n if cached != hint:\n self.shell_obj.size_hint_updated()\n\n def set_scale_to_fit(self, scale_to_fit):\n \"\"\" Sets whether or not the image scales with the underlying \n control.\n\n \"\"\"\n self.widget.SetScaledContents(scale_to_fit)\n\n def set_preserve_aspect_ratio(self, preserve):\n \"\"\" Sets whether or not to preserve the aspect ratio of the \n image when scaling.\n\n \"\"\"\n self.widget.SetPreserveAspectRatio(preserve)\n\n def set_allow_upscaling(self, allow):\n \"\"\" Sets whether or not the image will scale beyond its natural\n size.\n\n \"\"\"\n self.widget.SetAllowUpscaling(allow)\n",
"step-3": "<mask token>\n\n\nclass wxBitmapWidget(wx.Panel):\n <mask token>\n\n def __init__(self, parent):\n \"\"\" Initialize a wxBitmapWidget.\n\n Parameters\n ----------\n parent : wx.Window\n The wx.Window object which serves as the widget parent.\n \n \"\"\"\n super(wxBitmapWidget, self).__init__(parent)\n self._bitmap = None\n self._scaled_contents = False\n self._preserve_aspect_ratio = False\n self._allow_upscaling = False\n self._resize_timer = None\n self._resizing = False\n self.Bind(wx.EVT_PAINT, self.OnPaint)\n\n def OnPaint(self, event):\n \"\"\" The paint event handler for the widget.\n\n \"\"\"\n bmp = self._bitmap\n if bmp is None:\n return\n bmp_width, bmp_height = bmp.GetWidth(), bmp.GetHeight()\n if bmp_width == 0 or bmp_height == 0:\n return\n evt_x = 0\n evt_y = 0\n evt_width, evt_height = self.GetSize().asTuple()\n if not self._scaled_contents:\n paint_x = max(0, int(evt_width / 2.0 - bmp_width / 2.0 + evt_x))\n paint_y = max(0, int(evt_height / 2.0 - bmp_height / 2.0 + evt_y))\n paint_width = bmp_width\n paint_height = bmp_height\n else:\n if self._preserve_aspect_ratio:\n bmp_ratio = float(bmp_width) / bmp_height\n evt_ratio = float(evt_width) / evt_height\n if evt_ratio >= bmp_ratio:\n if self._allow_upscaling:\n paint_height = evt_height\n else:\n paint_height = min(bmp_height, evt_height)\n paint_width = int(paint_height * bmp_ratio)\n else:\n if self._allow_upscaling:\n paint_width = evt_width\n else:\n paint_width = min(bmp_width, evt_width)\n paint_height = int(paint_width / bmp_ratio)\n elif self._allow_upscaling:\n paint_height = evt_height\n paint_width = evt_width\n else:\n paint_height = min(bmp_height, evt_height)\n paint_width = min(bmp_width, evt_width)\n paint_x = int(evt_width / 2.0 - paint_width / 2.0 + evt_x)\n paint_y = int(evt_height / 2.0 - paint_height / 2.0 + evt_y)\n if paint_width != bmp_width or paint_height != bmp_height:\n img = bmp.ConvertToImage()\n if self._resizing:\n quality = wx.IMAGE_QUALITY_NORMAL\n else:\n quality = wx.IMAGE_QUALITY_HIGH\n img.Rescale(paint_width, paint_height, quality)\n bmp = wx.BitmapFromImage(img)\n dc = wx.PaintDC(self)\n dc.DrawBitmap(bmp, paint_x, paint_y)\n\n def OnResize(self, event):\n \"\"\" The resize event handler for the widget.\n\n This method is only bound and called when content scaling is\n enabled. It starts(restarts) a timer to perform a high quality\n scaled repaint when resizing is finished.\n\n \"\"\"\n self._resizing = True\n self._resize_timer.Start(60, True)\n <mask token>\n\n def GetBestSize(self):\n \"\"\" Overridden method to return the size of the bitmap as the \n best size for the widget.\n\n \"\"\"\n bmp = self._bitmap\n return wx.Size(bmp.GetWidth(), bmp.GetHeight())\n\n def GetBestSizeTuple(self):\n \"\"\" Overridden method to return the size of the bitmap as the \n best size for the widget.\n\n \"\"\"\n return self.GetBestSize().asTuple()\n\n def GetBitmap(self, bitmap):\n \"\"\" Get the underlying wx.Bitmap used to paint the control.\n\n Returns\n -------\n result : wx.Bitmap or None\n The bitmap being used to paint the control, or None if\n no bitmap has been supplied.\n\n \"\"\"\n return self._bitmap\n\n def SetBitmap(self, bitmap):\n \"\"\" Set the underlying wx.Bitmap and refresh the widget.\n\n Parameters\n ----------\n bitmap : wx.Bitmap\n The bitmap to paint on the widget.\n \n \"\"\"\n self._bitmap = bitmap\n self.Refresh()\n\n def GetScaledContents(self):\n \"\"\" Whether or not the bitmap is scaled to fit the bounds.\n\n Returns\n -------\n result : bool\n Whether or not the bitmap is scaled to fit the bounds of\n the widget.\n \n \"\"\"\n return self._scaled_contents\n <mask token>\n\n def GetPreserveAspectRatio(self):\n \"\"\" Returns whether or not the aspect ratio of the image is \n maintained during a resize.\n\n \"\"\"\n return self._preserve_aspect_ratio\n\n def SetPreserveAspectRatio(self, preserve):\n \"\"\" Set whether or not to preserve the image aspect ratio.\n\n Parameters\n ----------\n preserve : bool\n If True then the aspect ratio of the image will be preserved\n if it is scaled to fit. Otherwise, the aspect ratio will be\n ignored.\n\n \"\"\"\n self._preserve_aspect_ratio = preserve\n self.Refresh()\n <mask token>\n\n def SetAllowUpscaling(self, allow):\n \"\"\" Set whether or not to allow the image to be scaled beyond\n its natural size.\n\n Parameters\n ----------\n allow : bool\n If True, then the image may be scaled larger than its \n natural if it is scaled to fit. If False, the image will\n never be scaled larger than its natural size. In either\n case, the image may be scaled smaller.\n\n \"\"\"\n self._allow_upscaling = allow\n self.Refresh()\n\n\nclass WXImageView(WXControl, AbstractTkImageView):\n \"\"\" A Wx implementation of ImageView.\n\n \"\"\"\n _cached_size_hint = None\n\n def create(self, parent):\n \"\"\" Creates the underlying wxBitmapWidget control.\n\n \"\"\"\n self.widget = wxBitmapWidget(parent)\n\n def initialize(self):\n \"\"\" Initializes the attributes on the underlying control.\n\n \"\"\"\n super(WXImageView, self).initialize()\n shell = self.shell_obj\n self.set_image(shell.image)\n self.set_scale_to_fit(shell.scale_to_fit)\n self.set_preserve_aspect_ratio(shell.preserve_aspect_ratio)\n self.set_allow_upscaling(shell.allow_upscaling)\n\n def shell_image_changed(self, image):\n \"\"\" The change handler for the 'image' attribute on the shell \n component.\n\n \"\"\"\n self.set_image(image)\n\n def shell_scale_to_fit_changed(self, scale_to_fit):\n \"\"\" The change handler for the 'scale_to_fit' attribute on the \n shell component.\n\n \"\"\"\n self.set_scale_to_fit(scale_to_fit)\n\n def shell_preserve_aspect_ratio_changed(self, preserve):\n \"\"\" The change handler for the 'preserve_aspect_ratio' attribute\n on the shell component.\n\n \"\"\"\n self.set_preserve_aspect_ratio(preserve)\n\n def shell_allow_upscaling_changed(self, allow):\n \"\"\" The change handler for the 'allow_upscaling' attribute on \n the shell component.\n\n \"\"\"\n self.set_allow_upscaling(allow)\n\n def set_image(self, image):\n \"\"\" Sets the image on the underlying wxBitmapWidget.\n\n \"\"\"\n bmp = image.as_wxBitmap() if image is not None else None\n self.widget.SetBitmap(bmp)\n cached = self._cached_size_hint\n hint = self._cached_size_hint = self.size_hint()\n if cached != hint:\n self.shell_obj.size_hint_updated()\n\n def set_scale_to_fit(self, scale_to_fit):\n \"\"\" Sets whether or not the image scales with the underlying \n control.\n\n \"\"\"\n self.widget.SetScaledContents(scale_to_fit)\n\n def set_preserve_aspect_ratio(self, preserve):\n \"\"\" Sets whether or not to preserve the aspect ratio of the \n image when scaling.\n\n \"\"\"\n self.widget.SetPreserveAspectRatio(preserve)\n\n def set_allow_upscaling(self, allow):\n \"\"\" Sets whether or not the image will scale beyond its natural\n size.\n\n \"\"\"\n self.widget.SetAllowUpscaling(allow)\n",
"step-4": "<mask token>\n\n\nclass wxBitmapWidget(wx.Panel):\n <mask token>\n\n def __init__(self, parent):\n \"\"\" Initialize a wxBitmapWidget.\n\n Parameters\n ----------\n parent : wx.Window\n The wx.Window object which serves as the widget parent.\n \n \"\"\"\n super(wxBitmapWidget, self).__init__(parent)\n self._bitmap = None\n self._scaled_contents = False\n self._preserve_aspect_ratio = False\n self._allow_upscaling = False\n self._resize_timer = None\n self._resizing = False\n self.Bind(wx.EVT_PAINT, self.OnPaint)\n\n def OnPaint(self, event):\n \"\"\" The paint event handler for the widget.\n\n \"\"\"\n bmp = self._bitmap\n if bmp is None:\n return\n bmp_width, bmp_height = bmp.GetWidth(), bmp.GetHeight()\n if bmp_width == 0 or bmp_height == 0:\n return\n evt_x = 0\n evt_y = 0\n evt_width, evt_height = self.GetSize().asTuple()\n if not self._scaled_contents:\n paint_x = max(0, int(evt_width / 2.0 - bmp_width / 2.0 + evt_x))\n paint_y = max(0, int(evt_height / 2.0 - bmp_height / 2.0 + evt_y))\n paint_width = bmp_width\n paint_height = bmp_height\n else:\n if self._preserve_aspect_ratio:\n bmp_ratio = float(bmp_width) / bmp_height\n evt_ratio = float(evt_width) / evt_height\n if evt_ratio >= bmp_ratio:\n if self._allow_upscaling:\n paint_height = evt_height\n else:\n paint_height = min(bmp_height, evt_height)\n paint_width = int(paint_height * bmp_ratio)\n else:\n if self._allow_upscaling:\n paint_width = evt_width\n else:\n paint_width = min(bmp_width, evt_width)\n paint_height = int(paint_width / bmp_ratio)\n elif self._allow_upscaling:\n paint_height = evt_height\n paint_width = evt_width\n else:\n paint_height = min(bmp_height, evt_height)\n paint_width = min(bmp_width, evt_width)\n paint_x = int(evt_width / 2.0 - paint_width / 2.0 + evt_x)\n paint_y = int(evt_height / 2.0 - paint_height / 2.0 + evt_y)\n if paint_width != bmp_width or paint_height != bmp_height:\n img = bmp.ConvertToImage()\n if self._resizing:\n quality = wx.IMAGE_QUALITY_NORMAL\n else:\n quality = wx.IMAGE_QUALITY_HIGH\n img.Rescale(paint_width, paint_height, quality)\n bmp = wx.BitmapFromImage(img)\n dc = wx.PaintDC(self)\n dc.DrawBitmap(bmp, paint_x, paint_y)\n\n def OnResize(self, event):\n \"\"\" The resize event handler for the widget.\n\n This method is only bound and called when content scaling is\n enabled. It starts(restarts) a timer to perform a high quality\n scaled repaint when resizing is finished.\n\n \"\"\"\n self._resizing = True\n self._resize_timer.Start(60, True)\n\n def OnResizeEnd(self, event):\n \"\"\" The repaint timer event handler.\n\n This method is only bound and called when content scaling is\n enabled and resizing has completed. It triggers a high quality\n repaint.\n\n \"\"\"\n self._resizing = False\n self.Refresh()\n\n def GetBestSize(self):\n \"\"\" Overridden method to return the size of the bitmap as the \n best size for the widget.\n\n \"\"\"\n bmp = self._bitmap\n return wx.Size(bmp.GetWidth(), bmp.GetHeight())\n\n def GetBestSizeTuple(self):\n \"\"\" Overridden method to return the size of the bitmap as the \n best size for the widget.\n\n \"\"\"\n return self.GetBestSize().asTuple()\n\n def GetBitmap(self, bitmap):\n \"\"\" Get the underlying wx.Bitmap used to paint the control.\n\n Returns\n -------\n result : wx.Bitmap or None\n The bitmap being used to paint the control, or None if\n no bitmap has been supplied.\n\n \"\"\"\n return self._bitmap\n\n def SetBitmap(self, bitmap):\n \"\"\" Set the underlying wx.Bitmap and refresh the widget.\n\n Parameters\n ----------\n bitmap : wx.Bitmap\n The bitmap to paint on the widget.\n \n \"\"\"\n self._bitmap = bitmap\n self.Refresh()\n\n def GetScaledContents(self):\n \"\"\" Whether or not the bitmap is scaled to fit the bounds.\n\n Returns\n -------\n result : bool\n Whether or not the bitmap is scaled to fit the bounds of\n the widget.\n \n \"\"\"\n return self._scaled_contents\n\n def SetScaledContents(self, scaled):\n \"\"\" Set whether or not the bitmap should be scaled to fit the\n bounds of the widget.\n\n Parameters\n ----------\n scaled : bool\n Whether or not to scale the bitmap to fit the bounds of the\n widget.\n \n \"\"\"\n if scaled:\n if not self._scaled_contents:\n self._scaled_contents = True\n self._resize_timer = wx.Timer(self)\n self.Bind(wx.EVT_TIMER, self.OnResizeEnd)\n self.Bind(wx.EVT_SIZE, self.OnResize)\n elif self._scaled_contents:\n self._scaled_contents = False\n self._timer = None\n self.Unbind(wx.EVT_TIMER, handler=self.OnResizeEnd)\n self.Unbind(wx.EVT_SIZE, handler=self.OnResize)\n self.Refresh()\n\n def GetPreserveAspectRatio(self):\n \"\"\" Returns whether or not the aspect ratio of the image is \n maintained during a resize.\n\n \"\"\"\n return self._preserve_aspect_ratio\n\n def SetPreserveAspectRatio(self, preserve):\n \"\"\" Set whether or not to preserve the image aspect ratio.\n\n Parameters\n ----------\n preserve : bool\n If True then the aspect ratio of the image will be preserved\n if it is scaled to fit. Otherwise, the aspect ratio will be\n ignored.\n\n \"\"\"\n self._preserve_aspect_ratio = preserve\n self.Refresh()\n\n def GetAllowUpscaling(self):\n \"\"\" Returns whether or not the image can be scaled greater than\n its natural size.\n\n \"\"\"\n return self._allow_upscaling\n\n def SetAllowUpscaling(self, allow):\n \"\"\" Set whether or not to allow the image to be scaled beyond\n its natural size.\n\n Parameters\n ----------\n allow : bool\n If True, then the image may be scaled larger than its \n natural if it is scaled to fit. If False, the image will\n never be scaled larger than its natural size. In either\n case, the image may be scaled smaller.\n\n \"\"\"\n self._allow_upscaling = allow\n self.Refresh()\n\n\nclass WXImageView(WXControl, AbstractTkImageView):\n \"\"\" A Wx implementation of ImageView.\n\n \"\"\"\n _cached_size_hint = None\n\n def create(self, parent):\n \"\"\" Creates the underlying wxBitmapWidget control.\n\n \"\"\"\n self.widget = wxBitmapWidget(parent)\n\n def initialize(self):\n \"\"\" Initializes the attributes on the underlying control.\n\n \"\"\"\n super(WXImageView, self).initialize()\n shell = self.shell_obj\n self.set_image(shell.image)\n self.set_scale_to_fit(shell.scale_to_fit)\n self.set_preserve_aspect_ratio(shell.preserve_aspect_ratio)\n self.set_allow_upscaling(shell.allow_upscaling)\n\n def shell_image_changed(self, image):\n \"\"\" The change handler for the 'image' attribute on the shell \n component.\n\n \"\"\"\n self.set_image(image)\n\n def shell_scale_to_fit_changed(self, scale_to_fit):\n \"\"\" The change handler for the 'scale_to_fit' attribute on the \n shell component.\n\n \"\"\"\n self.set_scale_to_fit(scale_to_fit)\n\n def shell_preserve_aspect_ratio_changed(self, preserve):\n \"\"\" The change handler for the 'preserve_aspect_ratio' attribute\n on the shell component.\n\n \"\"\"\n self.set_preserve_aspect_ratio(preserve)\n\n def shell_allow_upscaling_changed(self, allow):\n \"\"\" The change handler for the 'allow_upscaling' attribute on \n the shell component.\n\n \"\"\"\n self.set_allow_upscaling(allow)\n\n def set_image(self, image):\n \"\"\" Sets the image on the underlying wxBitmapWidget.\n\n \"\"\"\n bmp = image.as_wxBitmap() if image is not None else None\n self.widget.SetBitmap(bmp)\n cached = self._cached_size_hint\n hint = self._cached_size_hint = self.size_hint()\n if cached != hint:\n self.shell_obj.size_hint_updated()\n\n def set_scale_to_fit(self, scale_to_fit):\n \"\"\" Sets whether or not the image scales with the underlying \n control.\n\n \"\"\"\n self.widget.SetScaledContents(scale_to_fit)\n\n def set_preserve_aspect_ratio(self, preserve):\n \"\"\" Sets whether or not to preserve the aspect ratio of the \n image when scaling.\n\n \"\"\"\n self.widget.SetPreserveAspectRatio(preserve)\n\n def set_allow_upscaling(self, allow):\n \"\"\" Sets whether or not the image will scale beyond its natural\n size.\n\n \"\"\"\n self.widget.SetAllowUpscaling(allow)\n",
"step-5": "#------------------------------------------------------------------------------\n# Copyright (c) 2011, Enthought, Inc.\n# All rights reserved.\n#------------------------------------------------------------------------------\nimport wx\n\nfrom .wx_control import WXControl\n\nfrom ...components.image_view import AbstractTkImageView\n\n\nclass wxBitmapWidget(wx.Panel):\n \"\"\" A wx.Panel subclass which paints a provided wx.Bitmap. \n\n This differs from wx.StaticBitmap in that it provides the option to\n scale the provided bitmap to the bounds of the widget. If the widget\n is set to scale its contents, low quality scaling will occur during\n resize, with a high quality pass performed once resizing as finished.\n\n \"\"\"\n def __init__(self, parent):\n \"\"\" Initialize a wxBitmapWidget.\n\n Parameters\n ----------\n parent : wx.Window\n The wx.Window object which serves as the widget parent.\n \n \"\"\"\n super(wxBitmapWidget, self).__init__(parent)\n self._bitmap = None\n self._scaled_contents = False\n self._preserve_aspect_ratio = False\n self._allow_upscaling = False\n self._resize_timer = None\n self._resizing = False\n self.Bind(wx.EVT_PAINT, self.OnPaint)\n\n #--------------------------------------------------------------------------\n # Private API\n #--------------------------------------------------------------------------\n def OnPaint(self, event):\n \"\"\" The paint event handler for the widget.\n\n \"\"\"\n bmp = self._bitmap\n if bmp is None:\n return\n\n bmp_width, bmp_height = bmp.GetWidth(), bmp.GetHeight()\n if bmp_width == 0 or bmp_height == 0:\n return\n\n evt_x = 0\n evt_y = 0\n evt_width, evt_height = self.GetSize().asTuple()\n\n if not self._scaled_contents:\n # If the image isn't scaled, it is centered if possible.\n # Otherwise, it's painted at the origin and clipped.\n paint_x = max(0, int((evt_width / 2. - bmp_width / 2.) + evt_x))\n paint_y = max(0, int((evt_height / 2. - bmp_height / 2.) + evt_y))\n paint_width = bmp_width\n paint_height = bmp_height\n else:\n # If the image *is* scaled, it's scaled size depends on the \n # size of the paint area as well as the other scaling flags.\n if self._preserve_aspect_ratio:\n bmp_ratio = float(bmp_width) / bmp_height\n evt_ratio = float(evt_width) / evt_height\n if evt_ratio >= bmp_ratio:\n if self._allow_upscaling:\n paint_height = evt_height\n else:\n paint_height = min(bmp_height, evt_height)\n paint_width = int(paint_height * bmp_ratio)\n else:\n if self._allow_upscaling:\n paint_width = evt_width\n else:\n paint_width = min(bmp_width, evt_width)\n paint_height = int(paint_width / bmp_ratio)\n else:\n if self._allow_upscaling:\n paint_height = evt_height\n paint_width = evt_width\n else:\n paint_height = min(bmp_height, evt_height)\n paint_width = min(bmp_width, evt_width)\n # In all cases of scaling, we know that the scaled image is\n # no larger than the paint area, and can thus be centered.\n paint_x = int((evt_width / 2. - paint_width / 2.) + evt_x)\n paint_y = int((evt_height / 2. - paint_height / 2.) + evt_y)\n\n # Scale the bitmap if needed, using a faster method if the\n # image is currently being resized\n if paint_width != bmp_width or paint_height != bmp_height:\n img = bmp.ConvertToImage()\n if self._resizing:\n quality = wx.IMAGE_QUALITY_NORMAL\n else:\n quality = wx.IMAGE_QUALITY_HIGH\n img.Rescale(paint_width, paint_height, quality)\n bmp = wx.BitmapFromImage(img)\n\n # Finally, draw the bitmap into the computed location\n dc = wx.PaintDC(self)\n dc.DrawBitmap(bmp, paint_x, paint_y)\n\n def OnResize(self, event):\n \"\"\" The resize event handler for the widget.\n\n This method is only bound and called when content scaling is\n enabled. It starts(restarts) a timer to perform a high quality\n scaled repaint when resizing is finished.\n\n \"\"\"\n self._resizing = True\n self._resize_timer.Start(60, True)\n\n def OnResizeEnd(self, event):\n \"\"\" The repaint timer event handler.\n\n This method is only bound and called when content scaling is\n enabled and resizing has completed. It triggers a high quality\n repaint.\n\n \"\"\"\n self._resizing = False\n self.Refresh()\n\n #--------------------------------------------------------------------------\n # Public API\n #--------------------------------------------------------------------------\n def GetBestSize(self):\n \"\"\" Overridden method to return the size of the bitmap as the \n best size for the widget.\n\n \"\"\"\n bmp = self._bitmap\n return wx.Size(bmp.GetWidth(), bmp.GetHeight())\n\n def GetBestSizeTuple(self):\n \"\"\" Overridden method to return the size of the bitmap as the \n best size for the widget.\n\n \"\"\"\n return self.GetBestSize().asTuple()\n\n def GetBitmap(self, bitmap):\n \"\"\" Get the underlying wx.Bitmap used to paint the control.\n\n Returns\n -------\n result : wx.Bitmap or None\n The bitmap being used to paint the control, or None if\n no bitmap has been supplied.\n\n \"\"\"\n return self._bitmap\n\n def SetBitmap(self, bitmap):\n \"\"\" Set the underlying wx.Bitmap and refresh the widget.\n\n Parameters\n ----------\n bitmap : wx.Bitmap\n The bitmap to paint on the widget.\n \n \"\"\"\n self._bitmap = bitmap\n self.Refresh()\n\n def GetScaledContents(self):\n \"\"\" Whether or not the bitmap is scaled to fit the bounds.\n\n Returns\n -------\n result : bool\n Whether or not the bitmap is scaled to fit the bounds of\n the widget.\n \n \"\"\"\n return self._scaled_contents\n \n def SetScaledContents(self, scaled):\n \"\"\" Set whether or not the bitmap should be scaled to fit the\n bounds of the widget.\n\n Parameters\n ----------\n scaled : bool\n Whether or not to scale the bitmap to fit the bounds of the\n widget.\n \n \"\"\"\n if scaled:\n if not self._scaled_contents:\n self._scaled_contents = True\n self._resize_timer = wx.Timer(self)\n self.Bind(wx.EVT_TIMER, self.OnResizeEnd)\n self.Bind(wx.EVT_SIZE, self.OnResize)\n else:\n if self._scaled_contents:\n self._scaled_contents = False\n self._timer = None\n self.Unbind(wx.EVT_TIMER, handler=self.OnResizeEnd)\n self.Unbind(wx.EVT_SIZE, handler=self.OnResize)\n self.Refresh()\n\n def GetPreserveAspectRatio(self):\n \"\"\" Returns whether or not the aspect ratio of the image is \n maintained during a resize.\n\n \"\"\"\n return self._preserve_aspect_ratio\n\n def SetPreserveAspectRatio(self, preserve):\n \"\"\" Set whether or not to preserve the image aspect ratio.\n\n Parameters\n ----------\n preserve : bool\n If True then the aspect ratio of the image will be preserved\n if it is scaled to fit. Otherwise, the aspect ratio will be\n ignored.\n\n \"\"\"\n self._preserve_aspect_ratio = preserve\n self.Refresh()\n \n def GetAllowUpscaling(self):\n \"\"\" Returns whether or not the image can be scaled greater than\n its natural size.\n\n \"\"\"\n return self._allow_upscaling\n\n def SetAllowUpscaling(self, allow):\n \"\"\" Set whether or not to allow the image to be scaled beyond\n its natural size.\n\n Parameters\n ----------\n allow : bool\n If True, then the image may be scaled larger than its \n natural if it is scaled to fit. If False, the image will\n never be scaled larger than its natural size. In either\n case, the image may be scaled smaller.\n\n \"\"\"\n self._allow_upscaling = allow\n self.Refresh()\n\n\nclass WXImageView(WXControl, AbstractTkImageView):\n \"\"\" A Wx implementation of ImageView.\n\n \"\"\"\n #: The internal cached size hint which is used to determine whether\n #: of not a size hint updated event should be emitted when the text\n #: in the label changes\n _cached_size_hint = None\n\n #--------------------------------------------------------------------------\n # Setup methods\n #--------------------------------------------------------------------------\n def create(self, parent):\n \"\"\" Creates the underlying wxBitmapWidget control.\n\n \"\"\"\n self.widget = wxBitmapWidget(parent)\n\n def initialize(self):\n \"\"\" Initializes the attributes on the underlying control.\n\n \"\"\"\n super(WXImageView, self).initialize()\n shell = self.shell_obj\n self.set_image(shell.image)\n self.set_scale_to_fit(shell.scale_to_fit)\n self.set_preserve_aspect_ratio(shell.preserve_aspect_ratio)\n self.set_allow_upscaling(shell.allow_upscaling)\n\n #--------------------------------------------------------------------------\n # Implementation\n #--------------------------------------------------------------------------\n def shell_image_changed(self, image):\n \"\"\" The change handler for the 'image' attribute on the shell \n component.\n\n \"\"\"\n self.set_image(image)\n \n def shell_scale_to_fit_changed(self, scale_to_fit):\n \"\"\" The change handler for the 'scale_to_fit' attribute on the \n shell component.\n\n \"\"\"\n self.set_scale_to_fit(scale_to_fit)\n\n def shell_preserve_aspect_ratio_changed(self, preserve):\n \"\"\" The change handler for the 'preserve_aspect_ratio' attribute\n on the shell component.\n\n \"\"\"\n self.set_preserve_aspect_ratio(preserve)\n\n def shell_allow_upscaling_changed(self, allow):\n \"\"\" The change handler for the 'allow_upscaling' attribute on \n the shell component.\n\n \"\"\"\n self.set_allow_upscaling(allow)\n\n #--------------------------------------------------------------------------\n # Widget Update Methods\n #--------------------------------------------------------------------------\n def set_image(self, image):\n \"\"\" Sets the image on the underlying wxBitmapWidget.\n\n \"\"\"\n bmp = image.as_wxBitmap() if image is not None else None\n self.widget.SetBitmap(bmp)\n # Emit a size hint updated event if the size hint has actually\n # changed. This is an optimization so that a constraints update\n # only occurs when the size hint has actually changed. This \n # logic must be implemented here so that the label has been\n # updated before the new size hint is computed. Placing this\n # logic on the shell object would not guarantee that the label\n # has been updated at the time the change handler is called.\n cached = self._cached_size_hint\n hint = self._cached_size_hint = self.size_hint()\n if cached != hint:\n self.shell_obj.size_hint_updated()\n \n def set_scale_to_fit(self, scale_to_fit): \n \"\"\" Sets whether or not the image scales with the underlying \n control.\n\n \"\"\"\n self.widget.SetScaledContents(scale_to_fit)\n\n def set_preserve_aspect_ratio(self, preserve):\n \"\"\" Sets whether or not to preserve the aspect ratio of the \n image when scaling.\n\n \"\"\"\n self.widget.SetPreserveAspectRatio(preserve)\n\n def set_allow_upscaling(self, allow):\n \"\"\" Sets whether or not the image will scale beyond its natural\n size.\n\n \"\"\"\n self.widget.SetAllowUpscaling(allow)\n\n",
"step-ids": [
18,
20,
25,
28,
31
]
}
|
[
18,
20,
25,
28,
31
] |
#print pathToConnectionsList(['A','C','B','D','E'])
#['EA','CB','AC','BD', 'DE']
#print independantPathPieces()
#print pathToConnectionsList(pathGenerator())
#print geneFormatToPathSegmentsMini(['CD', 'AB', 'BE', 'EC']) #DA
#print independantPathPieces(['EAC', 'CBD', 'ACB', 'BDE', 'DEA'])
#print greedyCrossover(['EC', 'CD', 'AB', 'BE','DF','FA'],['EC', 'XX', 'XX', 'XX','XX','xx'], 3)
#['ABECD', '', '__', '__']
# def joinPathBits(pathBits):
# index = 0
# for index in range(len(pathBits)):
# # figure out nex and prev point
# while matchFound:
# matchFound = False
# next = pathBits[index][-1]
# prev = pathBits[index][0]
# while True
# index2 = 1
# if next == pathBits[index2][0] and next != '_':
# join one way
# matchFound = True
# elif prev == pathBits[index2][-1] and prev != '_':
# join another
# matchFound = True
# def findpaths(segments):
# path_starts = {} # path_start:path
# path_ends = {} # path_end:path
# starts = {} # start:end of a segment
# #path_prefixes = []
# for segment in segments:
# starts[segment[0]] = segment[1]
# for start in starts:
# next = segment[start]
# if next in starts: # a longer path's been found
def writeToGene(toOrFromPos,whichCodon,whichGene,whatToWrite):
if toOrFromPos == 'to': pos = 1
if toOrFromPos == 'from': pos = 0
#print "which codon: " + str(whichCodon)
#print "postion: " + str(pos)
# check if whichgene[whichcodon is empty]
if whichCodon == 88: return whichGene # this may be the worlds ugliest hack, depending on
# _ not being a reserved char aka being in the charset but also depending on the num of cities
# in the prob to be less that 88
spot = whichGene[whichCodon]
val = whichGene[whichCodon][pos]
#print "current value: " + str(val)
if val == whatToWrite: return whichGene
if val == "_":
#spot = ['','']
#print "spot:"
#print spot
spot = list(spot)
spot[pos] = whatToWrite
#print "spot:"
#print spot
#check if val is empty
newGene = whichGene[0:whichCodon] + ["".join(spot)] + whichGene[whichCodon+1:len(whichGene)]
return newGene
return "ERROR, NON CONSISTANT VALUE ALREADY IN POS."
#print writeToGene('to',2,['__','__','__','__','__','__','xx','xx'],'o')
#writeToGene('to',3,['','','','','','','',''],"x")
def tspGeneTemplater(gene,locCodes):
# assumes that it gets a valid gene which was constructed by common elements in two parents and an additional random element from on parent.
gene = codeBlankSpots(gene)
genecopy = gene
charset = theCharset()
for codonLoc in range(len(gene)):
codon = gene[codonLoc]
if codon !='__':
whereFrom = codon[0]
whereTo = codon[1]
current = locCodes[codonLoc]
whereFromIndex = charset.index(whereFrom)
whereToIndex = charset.index(whereTo)
current = locCodes[codonLoc]
genecopy = writeToGene('from',whereToIndex,genecopy,current)
genecopy = writeToGene('to',whereFromIndex,genecopy,current)
#at this point we should have a template!!!!
# that we can fill in.
return genecopy
#print tspGeneTemplater(['BD', 'CA', '_B', 'A_'], theCharset())
def templateToGene(gene):
# GETS A FULLY TEMPLATED GENE
# MUST NOW FILL UP THE CHARS TO MAKE A VALID GENE! WHAT A DAUNTING TASK!!
# FIRST WE GET THE CHARSETS WE ARE WORKING WITH
# ONE FOR TO AND ONE FOR FROM POSITIONS
#init
chars = theCharset()[0:len(gene)]
toChars = chars
fromChars = chars
# remove already existing chars
for codon in gene:
if codon[0] != "_": fromChars = fromChars.replace(codon[0],'',1)
if codon[1] != "_":
toChars = toChars.replace(codon[1],'',1)
else:
anEmptyToSpot = gene.index(codon)
currentLoc = chars[anEmptyToSpot]
# now we have a list of to and from chars that need to be placed in a valid configuration.
# choose a blank spot to start from (anEmptyTospot)
gene = writeToGene('from',anEmptyToSpot,gene,currentLoc)
cont = True
while cont:
toLoc = random.choice(toChars)
toChars = toChars.replace(toLoc,'',1)
gene = writeToGene('from',anEmptyToSpot,gene,currentLoc)
currentLoc = toLoc
writeToGene('to',2,['__','__','x_','__','__','__','xx','xx'],'o')
return connectionList
def geneFormatToPathSegments(gene):
charset = theCharset()
segments = []
for i in range(len(gene)):
spot = charset[i]
if gene[i] != '__':
segment = str(gene[i][0]) + str(spot) + str(gene[i][1])
segments.append(segment)
return segments
def indPathPieces(segmentsList):
for thisSegment in segmentsList:
for anotherSegment in segmentsList:
if thisSegment[1:2] == anotherSegment[-2:]:
newSegment = thisSegment
def independantPathPieces(path_segments = []):
# TAKES EDGE SEGMENTS FOR EACH GENE OR SOME SUBSET OF GENES AND MAKES A STRING PATH OF MIN LENGTH
#path_segments = ['LOP','BAC','FYZ','CDF','REX', 'XWL']
#path_segments = ['EAC','CBD']
path_segments = ['EA','CB','AC','BD', 'DE']
# CAREFUL: THERE IS SOME INSANITY LOGIC GOING ON HERE!
#print "path seg: " + str(path_segments)
index = 0
while index < len(path_segments):
next = path_segments[index][-1]
for j in range(len(path_segments)):
prev = path_segments[j][0]
print "next: " + next
print "prev: " + prev
print "index:" + str(index)
print path_segments
if (next == prev) and (next != '_') :
path_segments[index] = path_segments[index] + path_segments[j][1:]
path_segments[j] = '_'
next = path_segments[index][-1]
#index -=1
print path_segments
index +=1
path_segments = [x for x in path_segments if x != '_']
#print "path seg: " + str(path_segments)
return path_segments
def makeTSPGeneX(numLocations):
# this time we are going to do things smarter.
if numLocations < 3 or numLocations > 94:
print "MAX LOCATIONS IS 94, MIN LOCATIONS IS 3."
quit()
# intialize
locationsCharset = theCharset()[0:numLocations]
path = pathMaker(numLocations)
#fromLocations = locationsCharset
locIndex = dict()
locValue = dict()
# BUILD THE INDEX AND VALUE DICTS
for i in range(numLocations):
locIndex[locationsCharset[i]] = i
locValue[i] = locationsCharset[i]
connectionList = ["" for x in range(numLocations)]
return connectionList
def completeTSPGene(pGene):
# this time we are going to do things smarter.
numLocations = len(pGene)
# intialize
locationsCharset = theCharset()[0:numLocations]
toLocations = locationsCharset
fromLocations = locationsCharset
locIndex = dict()
locValue = dict()
# BUILD THE INDEX AND VALUE DICTS
for i in range(numLocations):
locIndex[locationsCharset[i]] = i
locValue[i] = locationsCharset[i]
#connectionList = ["__" for x in range(numLocations)]
# remove existing options from charsrets.
for codon in pGene:
if codon[0] != "_": fromLocations = fromLocations.replace(codon[0],'',1)
if codon[1] != "_":
toLocations = toLocations.replace(codon[1],'',1)
else:
# grab details about a codon where the to location is empty.
anEmptyToSpot = pGene.index(codon)
currentLoc = locationsCharset[anEmptyToSpot]
# we define an empty fromLoc, we have a currentLoc, and we get a toLoc!
fromLoc = "_"
#toLoc = random.choice(toLocations)
#toLocations = toLocations.replace(currentLoc, "")
for i in range(numLocations+1):
print len(toLocations)
print len(fromLocations)
print "wherefrom: " + fromLoc
print "currentloc: " + currentLoc
print "to locs options: " + str(toLocations)
print "from locs: " + str(fromLocations)
print pGene
print
#place the from loc in the from position of the current loc
if fromLoc != "_":
pGene[locIndex[currentLoc]] = str(fromLoc) + str(pGene[locIndex[currentLoc]][1])
fromLocations = fromLocations.replace(fromLoc,'',1)
if len(toLocations) == 0:
pGene[locIndex[currentLoc]] = str(fromLoc[0] ) + str(pGene[locIndex[currentLoc]][1])
return pGene
toLoc = pGene[locIndex[currentLoc]][1]
if toLoc == "_":
# get a to loc only if needed
#if len(toLocations) == 2 and len(fromLocations) == 1 and (fromLocations == toLoc)
toLoc = currentLoc
while (toLoc == currentLoc) or (toLoc == fromLoc) :
if len(toLocations) == 0:
toLoc = locValue[anEmptyToSpot]
else:
toLoc = random.choice(toLocations)
toLocations = toLocations.replace(toLoc, "")
#place it in the to position of the current loc
pGene[locIndex[currentLoc]] = str(pGene[locIndex[currentLoc]][0]) + str(toLoc)
#prepare to move to the new loc!
fromLoc = currentLoc
currentLoc = toLoc
pGene[locIndex[currentLoc]] = str(fromLoc) + str(pGene[locIndex[currentLoc]][0])
return pGene
#print completeTSPGene(['__','CD','_B','B_','__','__','AC','FI','HA'])
|
normal
|
{
"blob_id": "b4a96d5df56acd545e9919e202c462ee710a0339",
"index": 5339,
"step-1": "#print pathToConnectionsList(['A','C','B','D','E'])\n#['EA','CB','AC','BD', 'DE']\n#print independantPathPieces()\n#print pathToConnectionsList(pathGenerator())\n#print geneFormatToPathSegmentsMini(['CD', 'AB', 'BE', 'EC']) #DA\n#print independantPathPieces(['EAC', 'CBD', 'ACB', 'BDE', 'DEA'])\n#print greedyCrossover(['EC', 'CD', 'AB', 'BE','DF','FA'],['EC', 'XX', 'XX', 'XX','XX','xx'], 3)\n\n\n#['ABECD', '', '__', '__']\n\n# def joinPathBits(pathBits):\n# \tindex = 0\n# \tfor index in range(len(pathBits)):\n# \t\t# figure out nex and prev point\n\t\t\n# \t\twhile matchFound:\n# \t\t\tmatchFound = False\n# \t\t\tnext = pathBits[index][-1]\n# \t\t\tprev = pathBits[index][0]\n\n# \t\t\twhile True\n# \t\t\tindex2 = 1\t\t\t\t\n# \t\t\tif next == pathBits[index2][0] and next != '_':\n# \t\t\t\tjoin one way\n# \t\t\t\tmatchFound = True\n# \t\t\telif prev == pathBits[index2][-1] and prev != '_':\n# \t\t\t\tjoin another\n# \t\t\t\tmatchFound = True\n\n\n\n# def findpaths(segments):\n# \tpath_starts = {} # path_start:path\n# \tpath_ends = {} # path_end:path\n# \tstarts = {} # start:end of a segment\n# \t#path_prefixes = []\n# \tfor segment in segments:\n# \t\tstarts[segment[0]] = segment[1]\n# \tfor start in starts:\n# \t\tnext = segment[start]\n# \t\tif next in starts: # a longer path's been found\n\ndef writeToGene(toOrFromPos,whichCodon,whichGene,whatToWrite):\n\tif toOrFromPos == 'to': pos = 1\n\tif toOrFromPos == 'from': pos = 0\n\t#print \"which codon: \" + str(whichCodon)\n\t#print \"postion: \" + str(pos) \n\t# check if whichgene[whichcodon is empty]\n\t\n\tif whichCodon == 88: return whichGene # this may be the worlds ugliest hack, depending on\n\t# _ not being a reserved char aka being in the charset but also depending on the num of cities\n\t# in the prob to be less that 88\n\t\n\tspot = whichGene[whichCodon]\n\tval = whichGene[whichCodon][pos]\n\t#print \"current value: \" + str(val)\n\n\tif val == whatToWrite: return whichGene\n\tif val == \"_\":\n\t\t#spot = ['','']\n\t\t#print \"spot:\"\n\t\t#print spot\n\t\tspot = list(spot)\n\t\tspot[pos] = whatToWrite\n\t\t#print \"spot:\"\n\t\t#print spot\n\n\t\t#check if val is empty\n\t\tnewGene = whichGene[0:whichCodon] + [\"\".join(spot)] + whichGene[whichCodon+1:len(whichGene)]\n\t\treturn newGene\n\t\n\treturn \"ERROR, NON CONSISTANT VALUE ALREADY IN POS.\"\n\n#print writeToGene('to',2,['__','__','__','__','__','__','xx','xx'],'o')\n#writeToGene('to',3,['','','','','','','',''],\"x\")\n\n\n\ndef tspGeneTemplater(gene,locCodes):\n\t# assumes that it gets a valid gene which was constructed by common elements in two parents and an additional random element from on parent.\n\tgene = codeBlankSpots(gene)\n\tgenecopy = gene\n\tcharset = theCharset()\n\n\tfor codonLoc in range(len(gene)):\n\t\tcodon = gene[codonLoc]\n\t\tif codon !='__':\n\t\t\twhereFrom = codon[0]\n\t\t\twhereTo = codon[1]\n\t\t\tcurrent = locCodes[codonLoc]\n\n\t\t\twhereFromIndex = charset.index(whereFrom) \n\t\t\twhereToIndex = charset.index(whereTo)\n\t\t\tcurrent = locCodes[codonLoc]\n\n\t\t\tgenecopy = writeToGene('from',whereToIndex,genecopy,current)\n\t\t\tgenecopy = writeToGene('to',whereFromIndex,genecopy,current)\n\n\t#at this point we should have a template!!!!\n\t# that we can fill in.\n\treturn genecopy\n\n#print tspGeneTemplater(['BD', 'CA', '_B', 'A_'], theCharset())\n\ndef templateToGene(gene):\n\t# GETS A FULLY TEMPLATED GENE\n\t# MUST NOW FILL UP THE CHARS TO MAKE A VALID GENE! WHAT A DAUNTING TASK!!\n\n\t# FIRST WE GET THE CHARSETS WE ARE WORKING WITH\n\t# ONE FOR TO AND ONE FOR FROM POSITIONS\n\t#init\n\tchars = theCharset()[0:len(gene)]\n\ttoChars = chars\n\tfromChars = chars\n\n\t# remove already existing chars\n\tfor codon in gene:\n\t\tif codon[0] != \"_\": fromChars = fromChars.replace(codon[0],'',1)\n\t\tif codon[1] != \"_\":\n\t\t\ttoChars = toChars.replace(codon[1],'',1)\n\t\telse:\n\t\t\tanEmptyToSpot = gene.index(codon)\n\t\t\tcurrentLoc = chars[anEmptyToSpot]\n\n\t# now we have a list of to and from chars that need to be placed in a valid configuration.\n\t# choose a blank spot to start from (anEmptyTospot)\n\tgene = writeToGene('from',anEmptyToSpot,gene,currentLoc)\n\tcont = True\n\twhile cont:\t\n\t\ttoLoc = random.choice(toChars)\n\t\ttoChars = toChars.replace(toLoc,'',1)\n\t\tgene = writeToGene('from',anEmptyToSpot,gene,currentLoc)\n\n\t\tcurrentLoc = toLoc\n\n\twriteToGene('to',2,['__','__','x_','__','__','__','xx','xx'],'o')\n\treturn connectionList\n\n\ndef geneFormatToPathSegments(gene):\n\tcharset = theCharset()\n\tsegments = []\n\tfor i in range(len(gene)):\n\t\tspot = charset[i]\n\t\tif gene[i] != '__':\n\t\t\tsegment = str(gene[i][0]) + str(spot) + str(gene[i][1])\n\t\t\tsegments.append(segment)\n\treturn segments\n\n\n\ndef indPathPieces(segmentsList):\n\tfor thisSegment in segmentsList:\n\n\t\tfor anotherSegment in segmentsList:\n\t\t\tif thisSegment[1:2] == anotherSegment[-2:]:\n\t\t\t\tnewSegment = thisSegment\n\ndef independantPathPieces(path_segments = []):\n\t# TAKES EDGE SEGMENTS FOR EACH GENE OR SOME SUBSET OF GENES AND MAKES A STRING PATH OF MIN LENGTH\n\t#path_segments = ['LOP','BAC','FYZ','CDF','REX', 'XWL']\n\t#path_segments = ['EAC','CBD']\n\tpath_segments = ['EA','CB','AC','BD', 'DE']\n\t# CAREFUL: THERE IS SOME INSANITY LOGIC GOING ON HERE!\n\t#print \"path seg: \" + str(path_segments)\n\tindex = 0\n\twhile index < len(path_segments):\n\t\tnext = path_segments[index][-1]\n\t\t\n\t\n\t\tfor j in range(len(path_segments)):\n\t\t\tprev = path_segments[j][0]\n\t\t\tprint \"next: \" + next\n\t\t\tprint \"prev: \" + prev\n\t\t\tprint \"index:\" + str(index)\n\t\t\tprint path_segments\n\t\t\tif (next == prev) and (next != '_') :\n\t\t\t\tpath_segments[index] = path_segments[index] + path_segments[j][1:]\n\t\t\t\tpath_segments[j] = '_'\n\t\t\t\tnext = path_segments[index][-1]\n\t\t\t\t#index -=1\n\n\t\t\tprint path_segments\n\t\tindex +=1\n\tpath_segments = [x for x in path_segments if x != '_']\n\t#print \"path seg: \" + str(path_segments)\n\treturn path_segments\n\n\tdef makeTSPGeneX(numLocations):\n\t# this time we are going to do things smarter.\n\tif numLocations < 3 or numLocations > 94:\n\t\tprint \"MAX LOCATIONS IS 94, MIN LOCATIONS IS 3.\"\n\t\tquit()\n\n\t# intialize\n\tlocationsCharset = theCharset()[0:numLocations]\n\tpath = pathMaker(numLocations)\n\t#fromLocations = locationsCharset\n\n\tlocIndex = dict()\n\tlocValue = dict()\n\t\n\t# BUILD THE INDEX AND VALUE DICTS\n\tfor i in range(numLocations):\n\t\tlocIndex[locationsCharset[i]] = i\n\t\tlocValue[i] = locationsCharset[i]\n\t\tconnectionList = [\"\" for x in range(numLocations)]\n\n\treturn connectionList\n\n\ndef completeTSPGene(pGene):\n\t# this time we are going to do things smarter.\n\tnumLocations = len(pGene) \n\n\t# intialize\n\tlocationsCharset = theCharset()[0:numLocations]\n\ttoLocations = locationsCharset\n\tfromLocations = locationsCharset\n\n\tlocIndex = dict()\n\tlocValue = dict()\n\t\n\t# BUILD THE INDEX AND VALUE DICTS\n\tfor i in range(numLocations):\n\t\tlocIndex[locationsCharset[i]] = i\n\t\tlocValue[i] = locationsCharset[i]\n\t\t#connectionList = [\"__\" for x in range(numLocations)]\n\n\t# remove existing options from charsrets.\n\tfor codon in pGene:\n\t\tif codon[0] != \"_\": fromLocations = fromLocations.replace(codon[0],'',1)\n\t\tif codon[1] != \"_\":\n\t\t\ttoLocations = toLocations.replace(codon[1],'',1)\n\t\telse:\n\t\t\t# grab details about a codon where the to location is empty. \n\t\t\tanEmptyToSpot = pGene.index(codon)\n\t\t\tcurrentLoc = locationsCharset[anEmptyToSpot]\n\n\t# we define an empty fromLoc, we have a currentLoc, and we get a toLoc!\n\tfromLoc = \"_\"\n\t#toLoc = random.choice(toLocations)\n\t#toLocations = toLocations.replace(currentLoc, \"\")\n\n\t\n\tfor i in range(numLocations+1):\n\t\tprint len(toLocations)\n\t\tprint len(fromLocations)\n\t\tprint \"wherefrom: \" + fromLoc\n\t\tprint \"currentloc: \" + currentLoc\n\t\tprint \"to locs options: \" + str(toLocations)\n\t\tprint \"from locs: \" + str(fromLocations)\n\t\tprint pGene\n\t\tprint \n\t\t#place the from loc in the from position of the current loc\n\t\tif fromLoc != \"_\": \n\t\t\tpGene[locIndex[currentLoc]] = str(fromLoc) + str(pGene[locIndex[currentLoc]][1])\n\t\t\tfromLocations = fromLocations.replace(fromLoc,'',1)\n\n\n\t\tif len(toLocations) == 0:\n\t\t\tpGene[locIndex[currentLoc]] = str(fromLoc[0] ) + str(pGene[locIndex[currentLoc]][1])\n\t\t\treturn pGene\n\n\t\ttoLoc = pGene[locIndex[currentLoc]][1]\n\t\tif toLoc == \"_\":\n\t\t\t# get a to loc only if needed\n\t\t\t#if len(toLocations) == 2 and len(fromLocations) == 1 and (fromLocations == toLoc)\n\n\t\t\ttoLoc = currentLoc\n\t\t\twhile (toLoc == currentLoc) or (toLoc == fromLoc) :\n\t\t\t\tif len(toLocations) == 0:\n\t\t\t\t\ttoLoc = locValue[anEmptyToSpot]\n\t\t\t\telse:\t\t\t\n\t\t\t\t\ttoLoc = random.choice(toLocations)\n\t\t\ttoLocations = toLocations.replace(toLoc, \"\")\n\n\t\t#place it in the to position of the current loc\n\t\tpGene[locIndex[currentLoc]] = str(pGene[locIndex[currentLoc]][0]) + str(toLoc)\n\n\t\t#prepare to move to the new loc!\n\t\tfromLoc = currentLoc\n\t\tcurrentLoc = toLoc\n\n\tpGene[locIndex[currentLoc]] = str(fromLoc) + str(pGene[locIndex[currentLoc]][0])\n\treturn pGene\n\n#print completeTSPGene(['__','CD','_B','B_','__','__','AC','FI','HA'])",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('gestionadmin', '0133_auto_20200618_1339')]
operations = [migrations.RemoveField(model_name='comprasenc', name=
'empleado')]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('gestionadmin', '0133_auto_20200618_1339')]
operations = [migrations.RemoveField(model_name='comprasenc', name=
'empleado')]
<|reserved_special_token_1|>
# Generated by Django 2.2.6 on 2020-06-18 14:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gestionadmin', '0133_auto_20200618_1339'),
]
operations = [
migrations.RemoveField(
model_name='comprasenc',
name='empleado',
),
]
|
flexible
|
{
"blob_id": "f96a7bef48e7df2899343029a2fae9697125a5b2",
"index": 5203,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('gestionadmin', '0133_auto_20200618_1339')]\n operations = [migrations.RemoveField(model_name='comprasenc', name=\n 'empleado')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('gestionadmin', '0133_auto_20200618_1339')]\n operations = [migrations.RemoveField(model_name='comprasenc', name=\n 'empleado')]\n",
"step-5": "# Generated by Django 2.2.6 on 2020-06-18 14:16\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('gestionadmin', '0133_auto_20200618_1339'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='comprasenc',\n name='empleado',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/local/bin/python3.3
'''
http://projecteuler.net/problem=127()
abc-hits
Problem 127
The radical of n, rad(n), is the product of distinct prime factors of n. For example, 504 = 23 × 32 × 7, so rad(504) = 2 × 3 × 7 = 42.
We shall define the triplet of positive integers (a, b, c) to be an abc-hit if:
GCD(a, b) = GCD(a, c) = GCD(b, c) = 1
a < b
a + b = c
rad(abc) < c
For example, (5, 27, 32) is an abc-hit, because:
GCD(5, 27) = GCD(5, 32) = GCD(27, 32) = 1
5 < 27
5 + 27 = 32
rad(4320) = 30 < 32
It turns out that abc-hits are quite rare and there are only thirty-one abc-hits for c < 1000, with ∑c = 12523.
Find ∑c for c < 120000.
'''
'''
Notes on problem 127():
Very slow
'''
from PE_factors import genFactors
from PE_basic import product
def problem127():
GOAL = 120000
rad = {} # rad[6] = {2,3}, radn[8] = {2}
for primes in genFactors(GOAL):
rad[product(primes)] = (set(primes), product(set(primes)))
def relprime(s, t):
return s & t == set()
found = 0
total = 0
for b in range(1, GOAL):
for a in range(1, min(b, GOAL - b)):
c = a + b
x, y, z = rad[a], rad[b], rad[c]
if x[0] & y[0] != set():
continue
if x[1] * y[1] * z[1] < c:
found += 1
total += c
return total
if __name__ == "__main__":
print(problem127() == 18407904)
|
normal
|
{
"blob_id": "646f6a0afc3dc129250c26270dda4355b8cea080",
"index": 1003,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef problem127():\n GOAL = 120000\n rad = {}\n for primes in genFactors(GOAL):\n rad[product(primes)] = set(primes), product(set(primes))\n\n def relprime(s, t):\n return s & t == set()\n found = 0\n total = 0\n for b in range(1, GOAL):\n for a in range(1, min(b, GOAL - b)):\n c = a + b\n x, y, z = rad[a], rad[b], rad[c]\n if x[0] & y[0] != set():\n continue\n if x[1] * y[1] * z[1] < c:\n found += 1\n total += c\n return total\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef problem127():\n GOAL = 120000\n rad = {}\n for primes in genFactors(GOAL):\n rad[product(primes)] = set(primes), product(set(primes))\n\n def relprime(s, t):\n return s & t == set()\n found = 0\n total = 0\n for b in range(1, GOAL):\n for a in range(1, min(b, GOAL - b)):\n c = a + b\n x, y, z = rad[a], rad[b], rad[c]\n if x[0] & y[0] != set():\n continue\n if x[1] * y[1] * z[1] < c:\n found += 1\n total += c\n return total\n\n\nif __name__ == '__main__':\n print(problem127() == 18407904)\n",
"step-4": "<mask token>\nfrom PE_factors import genFactors\nfrom PE_basic import product\n\n\ndef problem127():\n GOAL = 120000\n rad = {}\n for primes in genFactors(GOAL):\n rad[product(primes)] = set(primes), product(set(primes))\n\n def relprime(s, t):\n return s & t == set()\n found = 0\n total = 0\n for b in range(1, GOAL):\n for a in range(1, min(b, GOAL - b)):\n c = a + b\n x, y, z = rad[a], rad[b], rad[c]\n if x[0] & y[0] != set():\n continue\n if x[1] * y[1] * z[1] < c:\n found += 1\n total += c\n return total\n\n\nif __name__ == '__main__':\n print(problem127() == 18407904)\n",
"step-5": "#!/usr/local/bin/python3.3\n\n'''\nhttp://projecteuler.net/problem=127()\nabc-hits\nProblem 127\nThe radical of n, rad(n), is the product of distinct prime factors of n. For example, 504 = 23 × 32 × 7, so rad(504) = 2 × 3 × 7 = 42.\n\nWe shall define the triplet of positive integers (a, b, c) to be an abc-hit if:\n\nGCD(a, b) = GCD(a, c) = GCD(b, c) = 1\na < b\na + b = c\nrad(abc) < c\nFor example, (5, 27, 32) is an abc-hit, because:\n\nGCD(5, 27) = GCD(5, 32) = GCD(27, 32) = 1\n5 < 27\n5 + 27 = 32\nrad(4320) = 30 < 32\nIt turns out that abc-hits are quite rare and there are only thirty-one abc-hits for c < 1000, with ∑c = 12523.\n\nFind ∑c for c < 120000.\n'''\n\n'''\nNotes on problem 127():\nVery slow\n'''\n\nfrom PE_factors import genFactors\nfrom PE_basic import product\n\ndef problem127():\n GOAL = 120000\n\n rad = {} # rad[6] = {2,3}, radn[8] = {2}\n for primes in genFactors(GOAL):\n rad[product(primes)] = (set(primes), product(set(primes)))\n\n def relprime(s, t):\n return s & t == set()\n\n found = 0\n total = 0\n for b in range(1, GOAL):\n for a in range(1, min(b, GOAL - b)):\n c = a + b\n x, y, z = rad[a], rad[b], rad[c]\n if x[0] & y[0] != set():\n continue\n if x[1] * y[1] * z[1] < c:\n found += 1\n total += c\n return total\n\n\nif __name__ == \"__main__\":\n print(problem127() == 18407904)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
message += """
Enter 'quit' to stop entering toppings"""
<|reserved_special_token_0|>
while True:
pizza = input(message1)
topping = input(message)
if topping == 'quit':
break
else:
pizzas[pizza] = topping
print(pizzas)
<|reserved_special_token_1|>
message1 = 'What Pizza do you want?'
message = 'What type of Pizza topping do you want?'
message += """
Enter 'quit' to stop entering toppings"""
pizzas = {}
while True:
pizza = input(message1)
topping = input(message)
if topping == 'quit':
break
else:
pizzas[pizza] = topping
print(pizzas)
<|reserved_special_token_1|>
#def pizzaTopping():
message1 = "What Pizza do you want?"
message = "What type of Pizza topping do you want?"
message += "\n Enter 'quit' to stop entering toppings"
pizzas = {}
while True:
pizza = input(message1)
topping = input(message)
if topping == "quit":
break
else:
pizzas[pizza] = topping
#toppings.append(topping)
#return toppings
#print(pizzaTopping())
#print('We will add the following toppings: ' + str(toppings))
print(pizzas)
|
flexible
|
{
"blob_id": "bb3cba9847f2318a5043975e4b659265a7442177",
"index": 6309,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmessage += \"\"\"\n Enter 'quit' to stop entering toppings\"\"\"\n<mask token>\nwhile True:\n pizza = input(message1)\n topping = input(message)\n if topping == 'quit':\n break\n else:\n pizzas[pizza] = topping\nprint(pizzas)\n",
"step-3": "message1 = 'What Pizza do you want?'\nmessage = 'What type of Pizza topping do you want?'\nmessage += \"\"\"\n Enter 'quit' to stop entering toppings\"\"\"\npizzas = {}\nwhile True:\n pizza = input(message1)\n topping = input(message)\n if topping == 'quit':\n break\n else:\n pizzas[pizza] = topping\nprint(pizzas)\n",
"step-4": "#def pizzaTopping():\nmessage1 = \"What Pizza do you want?\"\nmessage = \"What type of Pizza topping do you want?\"\nmessage += \"\\n Enter 'quit' to stop entering toppings\" \n\npizzas = {}\n\n\nwhile True:\n pizza = input(message1)\n topping = input(message)\n\n if topping == \"quit\":\n break\n else:\n pizzas[pizza] = topping\n #toppings.append(topping)\n\n\n#return toppings\n#print(pizzaTopping())\n#print('We will add the following toppings: ' + str(toppings))\nprint(pizzas)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def format_amount(a):
return a.replace(',', '').strip().replace('%', '').replace('$', '')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def format_amount(a):
return a.replace(',', '').strip().replace('%', '').replace('$', '')
def create_json(gdp, coords):
line_list = gdp.split('\n')
column_list = [x.split('\t') for x in line_list if x != '']
line_list = coords.split('\n')
coord_list = [x.split(',') for x in line_list if x != '']
coord_dict = {}
for i in coord_list:
coord_dict[format_amount(i[0])] = i[1:]
out = """// This file is automatically generated by game-statics/utils/countryRON.py.
// Please do not edit."""
out += '\n['
for index in range(len(column_list)):
coords = coord_dict[format_amount(column_list[index][1])]
print(coords)
out += '('
out += 'name:"' + format_amount(column_list[index][1]) + '",'
out += 'gdp:' + format_amount(column_list[index][2]) + ','
out += 'population:' + format_amount(column_list[index][5]) + ','
out += 'lat:' + format_amount(coords[1]) + ','
out += 'long:' + format_amount(coords[2]) + ''
out += ')'
if index != len(column_list) - 1:
out += ','
out += ']'
return out
def create_file():
data = create_json(d, coords)
file = open('../assets/Countries.ron', 'w', encoding='utf8')
file.write(data)
file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def format_amount(a):
return a.replace(',', '').strip().replace('%', '').replace('$', '')
def create_json(gdp, coords):
line_list = gdp.split('\n')
column_list = [x.split('\t') for x in line_list if x != '']
line_list = coords.split('\n')
coord_list = [x.split(',') for x in line_list if x != '']
coord_dict = {}
for i in coord_list:
coord_dict[format_amount(i[0])] = i[1:]
out = """// This file is automatically generated by game-statics/utils/countryRON.py.
// Please do not edit."""
out += '\n['
for index in range(len(column_list)):
coords = coord_dict[format_amount(column_list[index][1])]
print(coords)
out += '('
out += 'name:"' + format_amount(column_list[index][1]) + '",'
out += 'gdp:' + format_amount(column_list[index][2]) + ','
out += 'population:' + format_amount(column_list[index][5]) + ','
out += 'lat:' + format_amount(coords[1]) + ','
out += 'long:' + format_amount(coords[2]) + ''
out += ')'
if index != len(column_list) - 1:
out += ','
out += ']'
return out
def create_file():
data = create_json(d, coords)
file = open('../assets/Countries.ron', 'w', encoding='utf8')
file.write(data)
file.close()
<|reserved_special_token_0|>
create_file()
<|reserved_special_token_1|>
def format_amount(a):
return a.replace(',', '').strip().replace('%', '').replace('$', '')
def create_json(gdp, coords):
line_list = gdp.split('\n')
column_list = [x.split('\t') for x in line_list if x != '']
line_list = coords.split('\n')
coord_list = [x.split(',') for x in line_list if x != '']
coord_dict = {}
for i in coord_list:
coord_dict[format_amount(i[0])] = i[1:]
out = """// This file is automatically generated by game-statics/utils/countryRON.py.
// Please do not edit."""
out += '\n['
for index in range(len(column_list)):
coords = coord_dict[format_amount(column_list[index][1])]
print(coords)
out += '('
out += 'name:"' + format_amount(column_list[index][1]) + '",'
out += 'gdp:' + format_amount(column_list[index][2]) + ','
out += 'population:' + format_amount(column_list[index][5]) + ','
out += 'lat:' + format_amount(coords[1]) + ','
out += 'long:' + format_amount(coords[2]) + ''
out += ')'
if index != len(column_list) - 1:
out += ','
out += ']'
return out
def create_file():
data = create_json(d, coords)
file = open('../assets/Countries.ron', 'w', encoding='utf8')
file.write(data)
file.close()
d = """
1 United States $19,485,394,000,000 $19.485 trillion 2.27% 325,084,756 $59,939 24.08%
2 China $12,237,700,479,375 $12.238 trillion 6.90% 1,421,021,791 $8,612 15.12%
3 Japan $4,872,415,104,315 $4.872 trillion 1.71% 127,502,725 $38,214 6.02%
4 Germany $3,693,204,332,230 $3.693 trillion 2.22% 82,658,409 $44,680 4.56%
5 India $2,650,725,335,364 $2.651 trillion 6.68% 1,338,676,785 $1,980 3.28%
6 United Kingdom $2,637,866,340,434 $2.638 trillion 1.79% 66,727,461 $39,532 3.26%
7 France $2,582,501,307,216 $2.583 trillion 1.82% 64,842,509 $39,827 3.19%
8 Brazil $2,053,594,877,013 $2.054 trillion 0.98% 207,833,823 $9,881 2.54%
9 Italy $1,943,835,376,342 $1.944 trillion 1.50% 60,673,701 $32,038 2.40%
10 Canada $1,647,120,175,449 $1.647 trillion 3.05% 36,732,095 $44,841 2.04%
11 Russia $1,578,417,211,937 $1.578 trillion 1.55% 145,530,082 $10,846 1.95%
12 South Korea $1,530,750,923,149 $1.531 trillion 3.06% 51,096,415 $29,958 1.89%
13 Australia $1,323,421,072,479 $1.323 trillion 1.96% 24,584,620 $53,831 1.64%
14 Spain $1,314,314,164,402 $1.314 trillion 3.05% 46,647,428 $28,175 1.62%
15 Mexico $1,150,887,823,404 $1.151 trillion 2.04% 124,777,324 $9,224 1.42%
16 Indonesia $1,015,420,587,285 $1.015 trillion 5.07% 264,650,963 $3,837 1.25%
17 Turkey $851,549,299,635 $852 billion 7.44% 81,116,450 $10,498 1.05%
18 Netherlands $830,572,618,850 $831 billion 3.16% 17,021,347 $48,796 1.03%
19 Saudi Arabia $686,738,400,000 $687 billion -0.86% 33,101,179 $20,747 0.85%
20 Switzerland $678,965,423,322 $679 billion 1.09% 8,455,804 $80,296 0.84%
21 Argentina $637,430,331,479 $637 billion 2.85% 43,937,140 $14,508 0.79%
22 Sweden $535,607,385,506 $536 billion 2.29% 9,904,896 $54,075 0.66%
23 Poland $526,465,839,003 $526 billion 4.81% 37,953,180 $13,871 0.65%
24 Belgium $494,763,551,891 $495 billion 1.73% 11,419,748 $43,325 0.61%
25 Thailand $455,302,682,986 $455 billion 3.91% 69,209,810 $6,579 0.56%
26 Iran $454,012,768,724 $454 billion 3.76% 80,673,883 $5,628 0.56%
27 Austria $416,835,975,862 $417 billion 3.04% 8,819,901 $47,261 0.52%
28 Norway $399,488,897,844 $399 billion 1.92% 5,296,326 $75,428 0.49%
29 United Arab Emirates $382,575,085,092 $383 billion 0.79% 9,487,203 $40,325 0.47%
30 Nigeria $375,745,486,521 $376 billion 0.81% 190,873,244 $1,969 0.46%
31 Israel $353,268,411,919 $353 billion 3.33% 8,243,848 $42,852 0.44%
32 South Africa $348,871,647,960 $349 billion 1.32% 57,009,756 $6,120 0.43%
33 Hong Kong $341,449,340,451 $341 billion 3.79% 7,306,322 $46,733 0.42%
34 Ireland $331,430,014,003 $331 billion 7.80% 4,753,279 $69,727 0.41%
35 Denmark $329,865,537,183 $330 billion 2.24% 5,732,274 $57,545 0.41%
36 Singapore $323,907,234,412 $324 billion 3.62% 5,708,041 $56,746 0.40%
37 Malaysia $314,710,259,511 $315 billion 5.90% 31,104,646 $10,118 0.39%
38 Colombia $314,457,601,860 $314 billion 1.79% 48,909,839 $6,429 0.39%
39 Philippines $313,595,208,737 $314 billion 6.68% 105,172,925 $2,982 0.39%
40 Pakistan $304,951,818,494 $305 billion 5.70% 207,906,209 $1,467 0.38%
41 Chile $277,075,944,402 $277 billion 1.49% 18,470,439 $15,001 0.34%
42 Finland $252,301,837,573 $252 billion 2.63% 5,511,371 $45,778 0.31%
43 Bangladesh $249,723,862,487 $250 billion 7.28% 159,685,424 $1,564 0.31%
44 Egypt $235,369,129,338 $235 billion 4.18% 96,442,591 $2,441 0.29%
45 Vietnam $223,779,865,815 $224 billion 6.81% 94,600,648 $2,366 0.28%
46 Portugal $219,308,128,887 $219 billion 2.68% 10,288,527 $21,316 0.27%
47 Czech Republic $215,913,545,038 $216 billion 4.29% 10,641,034 $20,291 0.27%
48 Romania $211,883,923,504 $212 billion 7.26% 19,653,969 $10,781 0.26%
49 Peru $211,389,272,242 $211 billion 2.53% 31,444,298 $6,723 0.26%
50 New Zealand $204,139,049,909 $204 billion 3.03% 4,702,034 $43,415 0.25%
51 Greece $203,085,551,429 $203 billion 1.35% 10,569,450 $19,214 0.25%
52 Iraq $192,060,810,811 $192 billion -2.07% 37,552,781 $5,114 0.24%
53 Algeria $167,555,280,113 $168 billion 1.60% 41,389,189 $4,048 0.21%
54 Qatar $166,928,571,429 $167 billion 1.58% 2,724,728 $61,264 0.21%
55 Kazakhstan $162,886,867,832 $163 billion 4.10% 18,080,019 $9,009 0.20%
56 Hungary $139,761,138,103 $140 billion 3.99% 9,729,823 $14,364 0.17%
57 Angola $122,123,822,334 $122 billion -0.15% 29,816,766 $4,096 0.15%
58 Kuwait $120,126,277,613 $120 billion -2.87% 4,056,099 $29,616 0.15%
59 Sudan $117,487,857,143 $117 billion 4.28% 40,813,397 $2,879 0.15%
60 Ukraine $112,154,185,121 $112 billion 2.52% 44,487,709 $2,521 0.14%
61 Morocco $109,708,728,849 $110 billion 4.09% 35,581,255 $3,083 0.14%
62 Ecuador $104,295,862,000 $104 billion 2.37% 16,785,361 $6,214 0.13%
63 Cuba $96,851,000,000 $96.85 billion 1.78% 11,339,254 $8,541 0.12%
64 Slovakia $95,617,670,260 $95.62 billion 3.40% 5,447,900 $17,551 0.12%
65 Sri Lanka $87,357,205,923 $87.36 billion 3.31% 21,128,032 $4,135 0.11%
66 Ethiopia $80,561,496,134 $80.56 billion 10.25% 106,399,924 $757 0.10%
67 Kenya $79,263,075,749 $79.26 billion 4.87% 50,221,142 $1,578 0.10%
68 Dominican Republic $75,931,656,815 $75.93 billion 4.55% 10,513,104 $7,223 0.09%
69 Guatemala $75,620,095,538 $75.62 billion 2.76% 16,914,970 $4,471 0.09%
70 Oman $70,783,875,163 $70.78 billion -0.27% 4,665,928 $15,170 0.09%
71 Myanmar $67,068,745,521 $67.07 billion 6.76% 53,382,523 $1,256 0.08%
72 Luxembourg $62,316,359,824 $62.32 billion 2.30% 591,910 $105,280 0.08%
73 Panama $62,283,756,584 $62.28 billion 5.32% 4,106,769 $15,166 0.08%
74 Ghana $58,996,776,238 $59.00 billion 8.14% 29,121,465 $2,026 0.07%
75 Bulgaria $58,220,973,783 $58.22 billion 3.81% 7,102,444 $8,197 0.07%
76 Costa Rica $57,285,984,448 $57.29 billion 3.28% 4,949,954 $11,573 0.07%
77 Uruguay $56,156,972,158 $56.16 billion 2.66% 3,436,641 $16,341 0.07%
78 Croatia $55,213,087,271 $55.21 billion 2.92% 4,182,857 $13,200 0.07%
79 Belarus $54,456,465,473 $54.46 billion 2.42% 9,450,231 $5,762 0.07%
80 Lebanon $53,576,985,687 $53.58 billion 1.53% 6,819,373 $7,857 0.07%
81 Tanzania $53,320,625,959 $53.32 billion 7.10% 54,660,339 $975 0.07%
82 Macau $50,361,201,096 $50.36 billion 9.10% 622,585 $80,890 0.06%
83 Uzbekistan $49,677,172,714 $49.68 billion 5.30% 31,959,785 $1,554 0.06%
84 Slovenia $48,769,655,479 $48.77 billion 5.00% 2,076,394 $23,488 0.06%
85 Lithuania $47,544,459,559 $47.54 billion 3.83% 2,845,414 $16,709 0.06%
86 Serbia $41,431,648,801 $41.43 billion 1.87% 8,829,628 $4,692 0.05%
87 Azerbaijan $40,747,792,238 $40.75 billion 0.10% 9,845,320 $4,139 0.05%
88 Jordan $40,068,308,451 $40.07 billion 1.97% 9,785,843 $4,095 0.05%
89 Tunisia $39,952,095,561 $39.95 billion 1.96% 11,433,443 $3,494 0.05%
90 Paraguay $39,667,400,816 $39.67 billion 5.21% 6,867,061 $5,776 0.05%
91 Libya $38,107,728,083 $38.11 billion 26.68% 6,580,724 $5,791 0.05%
92 Turkmenistan $37,926,285,714 $37.93 billion 6.50% 5,757,667 $6,587 0.05%
93 DR Congo $37,642,482,562 $37.64 billion 3.70% 81,398,764 $462 0.05%
94 Bolivia $37,508,642,113 $37.51 billion 4.20% 11,192,855 $3,351 0.05%
95 Côte d'Ivoire $37,353,276,059 $37.35 billion 7.70% 24,437,470 $1,529 0.05%
96 Bahrain $35,432,686,170 $35.43 billion 3.88% 1,494,076 $23,715 0.04%
97 Cameroon $34,922,782,311 $34.92 billion 3.55% 24,566,073 $1,422 0.04%
98 Yemen $31,267,675,216 $31.27 billion -5.94% 27,834,819 $1,123 0.04%
99 Latvia $30,463,302,414 $30.46 billion 4.55% 1,951,097 $15,613 0.04%
100 Estonia $26,611,651,599 $26.61 billion 4.85% 1,319,390 $20,170 0.03%
101 Uganda $25,995,031,850 $26.00 billion 3.86% 41,166,588 $631 0.03%
102 Zambia $25,868,142,073 $25.87 billion 3.40% 16,853,599 $1,535 0.03%
103 Nepal $24,880,266,905 $24.88 billion 7.91% 27,632,681 $900 0.03%
104 El Salvador $24,805,439,600 $24.81 billion 2.32% 6,388,126 $3,883 0.03%
105 Iceland $24,488,467,010 $24.49 billion 3.64% 334,393 $73,233 0.03%
106 Honduras $22,978,532,897 $22.98 billion 4.79% 9,429,013 $2,437 0.03%
107 Cambodia $22,158,209,503 $22.16 billion 7.10% 16,009,409 $1,384 0.03%
108 Trinidad and Tobago $22,079,017,627 $22.08 billion -2.34% 1,384,059 $15,952 0.03%
109 Cyprus $22,054,225,828 $22.05 billion 4.23% 1,179,678 $18,695 0.03%
110 Zimbabwe $22,040,902,300 $22.04 billion 4.70% 14,236,595 $1,548 0.03%
111 Senegal $21,070,225,735 $21.07 billion 7.15% 15,419,355 $1,366 0.03%
112 Papua New Guinea $20,536,314,601 $20.54 billion 2.55% 8,438,036 $2,434 0.03%
113 Afghanistan $19,543,976,895 $19.54 billion 2.67% 36,296,113 $538 0.02%
114 Bosnia and Herzegovina $18,054,854,789 $18.05 billion 3.19% 3,351,525 $5,387 0.02%
115 Botswana $17,406,565,823 $17.41 billion 2.36% 2,205,080 $7,894 0.02%
116 Laos $16,853,087,485 $16.85 billion 6.89% 6,953,035 $2,424 0.02%
117 Mali $15,334,336,144 $15.33 billion 5.40% 18,512,430 $828 0.02%
118 Georgia $15,081,338,092 $15.08 billion 4.83% 4,008,716 $3,762 0.02%
119 Gabon $15,013,950,984 $15.01 billion 0.50% 2,064,823 $7,271 0.02%
120 Jamaica $14,781,107,822 $14.78 billion 0.98% 2,920,848 $5,061 0.02%
121 Palestine $14,498,100,000 $14.50 billion 3.14% 4,747,227 $3,054 0.02%
122 Nicaragua $13,814,261,536 $13.81 billion 4.86% 6,384,846 $2,164 0.02%
123 Mauritius $13,266,427,697 $13.27 billion 3.82% 1,264,499 $10,491 0.02%
124 Namibia $13,253,698,015 $13.25 billion -0.95% 2,402,633 $5,516 0.02%
125 Albania $13,038,538,300 $13.04 billion 3.84% 2,884,169 $4,521 0.02%
126 Mozambique $12,645,508,634 $12.65 billion 3.74% 28,649,018 $441 0.02%
127 Malta $12,518,134,319 $12.52 billion 6.42% 437,933 $28,585 0.02%
128 Burkina Faso $12,322,864,245 $12.32 billion 6.30% 19,193,234 $642 0.02%
129 Equatorial Guinea $12,293,579,173 $12.29 billion -4.92% 1,262,002 $9,741 0.02%
130 Bahamas $12,162,100,000 $12.16 billion 1.44% 381,755 $31,858 0.02%
131 Brunei $12,128,089,002 $12.13 billion 1.33% 424,473 $28,572 0.01%
132 Armenia $11,536,590,636 $11.54 billion 7.50% 2,944,791 $3,918 0.01%
133 Madagascar $11,499,803,807 $11.50 billion 4.17% 25,570,512 $450 0.01%
134 Mongolia $11,433,635,876 $11.43 billion 5.30% 3,113,786 $3,672 0.01%
135 North Macedonia $11,279,509,014 $11.28 billion 0.24% 2,081,996 $5,418 0.01%
136 Guinea $10,472,514,515 $10.47 billion 10.60% 12,067,519 $868 0.01%
137 Chad $9,871,247,732 $9.87 billion -2.95% 15,016,753 $657 0.01%
138 Benin $9,246,696,924 $9.25 billion 5.84% 11,175,198 $827 0.01%
139 Rwanda $9,135,454,442 $9.14 billion 6.06% 11,980,961 $762 0.01%
140 Congo $8,701,334,800 $8.70 billion -3.10% 5,110,695 $1,703 0.01%
141 Haiti $8,408,150,518 $8.41 billion 1.17% 10,982,366 $766 0.01%
142 Moldova $8,128,493,432 $8.13 billion 4.50% 4,059,684 $2,002 0.01%
143 Niger $8,119,710,126 $8.12 billion 4.89% 21,602,382 $376 0.01%
144 Kyrgyzstan $7,564,738,836 $7.56 billion 4.58% 6,189,733 $1,222 0.01%
145 Tajikistan $7,146,449,583 $7.15 billion 7.62% 8,880,268 $805 0.01%
146 Malawi $6,303,292,264 $6.30 billion 4.00% 17,670,196 $357 0.01%
147 Guam $5,859,000,000 $5.86 billion 0.19% 164,281 $35,665 0.01%
148 Fiji $5,061,202,767 $5.06 billion 3.80% 877,459 $5,768 0.01%
149 Mauritania $5,024,708,656 $5.02 billion 3.50% 4,282,570 $1,173 0.01%
150 Maldives $4,865,546,027 $4.87 billion 6.91% 496,402 $9,802 0.01%
151 Montenegro $4,844,592,067 $4.84 billion 4.70% 627,563 $7,720 0.01%
152 Togo $4,757,776,485 $4.76 billion 4.40% 7,698,474 $618 0.01%
153 Barbados $4,673,500,000 $4.67 billion 1.00% 286,232 $16,328 0.01%
154 Eswatini $4,433,664,364 $4.43 billion 1.87% 1,124,805 $3,942 0.01%
155 Sierra Leone $3,775,047,334 $3.78 billion 4.21% 7,488,423 $504 0.00%
156 Guyana $3,621,046,005 $3.62 billion 2.92% 775,222 $4,671 0.00%
157 Liberia $3,285,455,000 $3.29 billion 2.47% 4,702,226 $699 0.00%
158 Burundi $3,172,416,146 $3.17 billion 0.50% 10,827,019 $293 0.00%
159 Andorra $3,012,914,131 $3.01 billion 1.87% 77,001 $39,128 0.00%
160 Suriname $2,995,827,901 $3.00 billion 1.69% 570,496 $5,251 0.00%
161 Timor-Leste $2,954,621,000 $2.95 billion -8.00% 1,243,258 $2,377 0.00%
162 Aruba $2,700,558,659 $2.70 billion 1.33% 105,366 $25,630 0.00%
163 Lesotho $2,578,265,358 $2.58 billion -2.29% 2,091,534 $1,233 0.00%
164 Bhutan $2,528,007,911 $2.53 billion 4.63% 745,563 $3,391 0.00%
165 Central African Republic $1,949,411,659 $1.95 billion 4.30% 4,596,023 $424 0.00%
166 Belize $1,862,614,800 $1.86 billion 1.44% 375,769 $4,957 0.00%
167 Cape Verde $1,772,706,451 $1.77 billion 4.01% 537,498 $3,298 0.00%
168 Saint Lucia $1,737,504,296 $1.74 billion 3.82% 180,954 $9,602 0.00%
169 San Marino $1,632,860,041 $1.63 billion 1.50% 33,671 $48,495 0.00%
170 Northern Mariana Islands $1,593,000,000 $1.59 billion 25.14% 56,562 $28,164 0.00%
171 Antigua and Barbuda $1,510,084,751 $1.51 billion 3.03% 95,426 $15,825 0.00%
172 Seychelles $1,497,959,569 $1.50 billion 5.28% 96,418 $15,536 0.00%
173 Gambia $1,489,464,788 $1.49 billion 4.56% 2,213,889 $673 0.00%
174 Guinea-Bissau $1,346,841,897 $1.35 billion 5.92% 1,828,145 $737 0.00%
175 Solomon Islands $1,303,453,622 $1.30 billion 3.24% 636,039 $2,049 0.00%
176 Grenada $1,126,882,296 $1.13 billion 5.06% 110,874 $10,164 0.00%
177 Comoros $1,068,124,330 $1.07 billion 2.71% 813,892 $1,312 0.00%
178 Saint Kitts and Nevis $992,007,403 $992 million 1.17% 52,045 $19,061 0.00%
179 Vanuatu $862,879,789 $863 million 4.50% 285,510 $3,022 0.00%
180 Samoa $840,927,997 $841 million 2.70% 195,352 $4,305 0.00%
181 Saint Vincent and the Grenadines $785,222,509 $785 million 0.86% 109,827 $7,150 0.00%
182 American Samoa $634,000,000 $634 million -5.38% 55,620 $11,399 0.00%
183 Dominica $496,727,000 $497 million -9.53% 71,458 $6,951 0.00%
184 Tonga $427,659,795 $428 million 2.70% 101,998 $4,193 0.00%
185 São Tomé and Príncipe $392,570,293 $393 million 3.87% 207,089 $1,896 0.00%
186 Micronesia $336,427,500 $336 million 3.20% 532,899 $631 0.00%
187 Palau $289,823,500 $290 million -3.57% 17,808 $16,275 0.00%
188 Marshall Islands $204,173,430 $204 million 3.60% 58,058 $3,517 0.00%
189 Kiribati $185,572,502 $186 million 0.33% 114,158 $1,626 0.00%
190 Tuvalu $39,731,317 $40 million 3.24% 11,370 $3,494 0.00%"""
coords = """Abkhazia,Sukhumi,43.001525,41.023415
Afghanistan,Kabul,34.575503,69.240073
Aland Islands,Mariehamn,60.1,19.933333
Albania,Tirana,41.327546,19.818698
Algeria,Algiers,36.752887,3.042048
American Samoa,Pago Pago,-14.275632,-170.702036
Andorra,Andorra la Vella,42.506317,1.521835
Angola,Luanda,-8.839988,13.289437
Anguilla,The Valley,18.214813,-63.057441
Antarctica,South Pole,-90,0
Antigua and Barbuda,Saint John's,17.12741,-61.846772
Argentina,Buenos Aires,-34.603684,-58.381559
Armenia,Yerevan,40.179186,44.499103
Aruba,Oranjestad,12.509204,-70.008631
Australia,Canberra,-35.282,149.128684
Austria,Vienna,48.208174,16.373819
Azerbaijan,Baku,40.409262,49.867092
Bahamas,Nassau,25.047984,-77.355413
Bahrain,Manama,26.228516,50.58605
Bangladesh,Dhaka,23.810332,90.412518
Barbados,Bridgetown,13.113222,-59.598809
Belarus,Minsk,53.90454,27.561524
Belgium,Brussels,50.85034,4.35171
Belize,Belmopan,17.251011,-88.75902
Benin,Porto-Novo,6.496857,2.628852
Bermuda,Hamilton,32.294816,-64.781375
Bhutan,Thimphu,27.472792,89.639286
Bolivia,La Paz,-16.489689,-68.119294
Bosnia and Herzegovina,Sarajevo,43.856259,18.413076
Botswana,Gaborone,-24.628208,25.923147
Bouvet Island,Bouvet Island,-54.43,3.38
Brazil,Brasília,-15.794229,-47.882166
British Indian Ocean Territory,Camp Justice,21.3419,55.4778
British Virgin Islands,Road Town,18.428612,-64.618466
Brunei,Bandar Seri Begawan,4.903052,114.939821
Bulgaria,Sofia,42.697708,23.321868
Burkina Faso,Ouagadougou,12.371428,-1.51966
Burundi,Bujumbura,-3.361378,29.359878
Cambodia,Phnom Penh,11.544873,104.892167
Cameroon,Yaoundé,3.848033,11.502075
Canada,Ottawa,45.42153,-75.697193
Cape Verde,Praia,14.93305,-23.513327
Cayman Islands,George Town,19.286932,-81.367439
Central African Republic,Bangui,4.394674,18.55819
Chad,N'Djamena,12.134846,15.055742
Chile,Santiago,-33.44889,-70.669265
China,Beijing,39.904211,116.407395
Christmas Island,Flying Fish Cove,-10.420686,105.679379
Cocos (Keeling) Islands,West Island,-12.188834,96.829316
Colombia,Bogotá,4.710989,-74.072092
Comoros,Moroni,-11.717216,43.247315
DR Congo,Kinshasa,-4.441931,15.266293
Congo,Brazzaville,-4.26336,15.242885
Cook Islands,Avarua,-21.212901,-159.782306
Costa Rica,San José,9.928069,-84.090725
Côte d'Ivoire,Yamoussoukro,6.827623,-5.289343
Croatia,Zagreb ,45.815011,15.981919
Cuba,Havana,23.05407,-82.345189
Curaçao,Willemstad,12.122422,-68.882423
Cyprus,Nicosia,35.185566,33.382276
Czech Republic,Prague,50.075538,14.4378
Denmark,Copenhagen,55.676097,12.568337
Djibouti,Djibouti,11.572077,43.145647
Dominica,Roseau,15.309168,-61.379355
Dominican Republic,Santo Domingo,18.486058,-69.931212
Ecuador,Quito,-0.180653,-78.467838
Egypt,Cairo,30.04442,31.235712
El Salvador,San Salvador,13.69294,-89.218191
Equatorial Guinea,Malabo,3.750412,8.737104
Eritrea,Asmara,15.322877,38.925052
Estonia,Tallinn,59.436961,24.753575
Ethiopia,Addis Ababa,8.980603,38.757761
Falkland Islands (Islas Malvinas),Stanley,-51.697713,-57.851663
Faroe Islands,Tórshavn,62.007864,-6.790982
Fiji,Suva,-18.124809,178.450079
Finland,Helsinki,60.173324,24.941025
France,Paris,48.856614,2.352222
French Guiana,Cayenne,4.92242,-52.313453
French Polynesia,Papeete,-17.551625,-149.558476
French Southern Territories,Saint-Pierre ,-21.3419,55.4778
Gabon,Libreville,0.416198,9.467268
Gambia,Banjul,13.454876,-16.579032
Georgia,Tbilisi,41.715138,44.827096
Germany,Berlin,52.520007,13.404954
Ghana,Accra,5.603717,-0.186964
Gibraltar,Gibraltar,36.140773,-5.353599
Greece,Athens,37.983917,23.72936
Greenland,Nuuk,64.18141,-51.694138
Grenada,Saint George's,12.056098,-61.7488
Guadeloupe,Basse-Terre,16.014453,-61.706411
Guam,Hagåtña,13.470891,144.751278
Guatemala,Guatemala City,14.634915,-90.506882
Guernsey,Saint Peter Port,49.455443,-2.536871
Guinea,Conakry,9.641185,-13.578401
Guinea-Bissau,Bissau,11.881655,-15.617794
Guyana,Georgetown,6.801279,-58.155125
Haiti,Port-au-Prince,18.594395,-72.307433
Honduras,Tegucigalpa,14.072275,-87.192136
Hong Kong,Hong Kong,22.396428,114.109497
Hungary,Budapest,47.497912,19.040235
Iceland,Reykjavík,64.126521,-21.817439
India,New Delhi,28.613939,77.209021
Indonesia,Jakarta,-6.208763,106.845599
Iran,Tehran,35.689198,51.388974
Iraq,Baghdad,33.312806,44.361488
Ireland,Dublin,53.349805,-6.26031
Isle of Man,Douglas,54.152337,-4.486123
Israel,Tel Aviv,32.0853,34.781768
Italy,Rome,41.902784,12.496366
Jamaica,Kingston,18.042327,-76.802893
Japan,Tokyo,35.709026,139.731992
Jersey,Saint Helier,49.186823,-2.106568
Jordan,Amman,31.956578,35.945695
Kazakhstan,Astana,51.160523,71.470356
Kenya,Nairobi,-1.292066,36.821946
Kiribati,Tarawa Atoll,1.451817,172.971662
Kosovo,Pristina,42.662914,21.165503
Kuwait,Kuwait City,29.375859,47.977405
Kyrgyzstan,Bishkek,42.874621,74.569762
Laos,Vientiane,17.975706,102.633104
Latvia,Riga,56.949649,24.105186
Lebanon,Beirut,33.888629,35.495479
Lesotho,Maseru,-29.363219,27.51436
Liberia,Monrovia,6.290743,-10.760524
Libya,Tripoli,32.887209,13.191338
Liechtenstein,Vaduz,47.14103,9.520928
Lithuania,Vilnius,54.687156,25.279651
Luxembourg,Luxembourg,49.611621,6.131935
Macau,Macau,22.166667,113.55
North Macedonia,Skopje,41.997346,21.427996
Madagascar,Antananarivo,-18.87919,47.507905
Malawi,Lilongwe,-13.962612,33.774119
Malaysia,Kuala Lumpur,3.139003,101.686855
Maldives,Malé,4.175496,73.509347
Mali,Bamako,12.639232,-8.002889
Malta,Valletta,35.898909,14.514553
Marshall Islands,Majuro,7.116421,171.185774
Martinique,Fort-de-France,14.616065,-61.05878
Mauritania,Nouakchott,18.07353,-15.958237
Mauritius,Port Louis,-20.166896,57.502332
Mayotte,Mamoudzou,-12.780949,45.227872
Mexico,Mexico City,19.432608,-99.133208
Micronesia,Palikir,6.914712,158.161027
Moldova,Chisinau,47.010453,28.86381
Monaco,Monaco,43.737411,7.420816
Mongolia,Ulaanbaatar,47.886399,106.905744
Montenegro,Podgorica,42.43042,19.259364
Montserrat,Plymouth,16.706523,-62.215738
Morocco,Rabat,33.97159,-6.849813
Mozambique,Maputo,-25.891968,32.605135
Myanmar,Naypyidaw,19.763306,96.07851
Nagorno-Karabakh Republic,Stepanakert,39.826385,46.763595
Namibia,Windhoek,-22.560881,17.065755
Nauru,Yaren,-0.546686,166.921091
Nepal,Kathmandu,27.717245,85.323961
Netherlands,Amsterdam,52.370216,4.895168
Netherlands Antilles,Willemstad ,12.1091242,-68.9316546
New Caledonia,Nouméa,-22.255823,166.450524
New Zealand,Wellington,-41.28646,174.776236
Nicaragua,Managua,12.114993,-86.236174
Niger,Niamey,13.511596,2.125385
Nigeria,Abuja,9.076479,7.398574
Niue,Alofi,-19.055371,-169.917871
Norfolk Island,Kingston,-29.056394,167.959588
North Korea,Pyongyang,39.039219,125.762524
Northern Cyprus,Nicosia,35.185566,33.382276
Northern Mariana Islands,Saipan,15.177801,145.750967
Norway,Oslo,59.913869,10.752245
Oman,Muscat,23.58589,58.405923
Pakistan,Islamabad,33.729388,73.093146
Palau,Ngerulmud,7.500384,134.624289
Palestine,Ramallah,31.9073509,35.5354719
Panama,Panama City,9.101179,-79.402864
Papua New Guinea,Port Moresby,-9.4438,147.180267
Paraguay,Asuncion,-25.26374,-57.575926
Peru,Lima,-12.046374,-77.042793
Philippines,Manila,14.599512,120.98422
Pitcairn Islands,Adamstown,-25.06629,-130.100464
Poland,Warsaw,52.229676,21.012229
Portugal,Lisbon,38.722252,-9.139337
Puerto Rico,San Juan,18.466334,-66.105722
Qatar,Doha,25.285447,51.53104
Réunion,Saint-Denis,-20.882057,55.450675
Romania,Bucharest,44.426767,26.102538
Russia,Moscow,55.755826,37.6173
Rwanda,Kigali,-1.957875,30.112735
Saint Pierre and Miquelon,Saint Pierre,46.775846,-56.180636
Saint Vincent and the Grenadines,Kingstown,13.160025,-61.224816
Samoa,Apia,-13.850696,-171.751355
San Marino,San Marino,43.935591,12.447281
São Tomé and Príncipe,São Tomé,0.330192,6.733343
Saudi Arabia,Riyadh,24.749403,46.902838
Senegal,Dakar,14.764504,-17.366029
Serbia,Belgrade,44.786568,20.448922
Seychelles,Victoria,-4.619143,55.451315
Sierra Leone,Freetown,8.465677,-13.231722
Singapore,Singapore,1.280095,103.850949
Slovakia,Bratislava,48.145892,17.107137
Slovenia,Ljubljana,46.056947,14.505751
Solomon Islands,Honiara,-9.445638,159.9729
Somalia,Mogadishu,2.046934,45.318162
South Africa,Pretoria,-25.747868,28.229271
South Georgia and the South Sandwich Islands,King Edward Point,-54.28325,-36.493735
South Korea,Seoul,37.566535,126.977969
South Ossetia,Tskhinvali,42.22146,43.964405
South Sudan,Juba,4.859363,31.57125
Spain,Madrid,40.416775,-3.70379
Sri Lanka,Sri Jayawardenepura Kotte,6.89407,79.902478
Saint Barthélemy,Gustavia,17.896435,-62.852201
Saint Kitts and Nevis,Basseterre,17.302606,-62.717692
Saint Lucia,Castries,14.010109,-60.987469
Saint Martin,Marigot,18.067519,-63.082466
Sudan,Khartoum,15.500654,32.559899
Suriname,Paramaribo,5.852036,-55.203828
Svalbard and Jan Mayen,Longyearbyen ,78.062,22.055
Eswatini,Mbabane,-26.305448,31.136672
Sweden,Stockholm,59.329323,18.068581
Switzerland,Bern,46.947974,7.447447
Syria,Damascus,33.513807,36.276528
Taiwan,Taipei,25.032969,121.565418
Tajikistan,Dushanbe,38.559772,68.787038
Tanzania,Dodoma,-6.162959,35.751607
Thailand,Bangkok,13.756331,100.501765
Timor-Leste,Dili,-8.556856,125.560314
Togo,Lomé,6.172497,1.231362
Tokelau,Nukunonu,-9.2005,-171.848
Tonga,Nukuʻalofa,-21.139342,-175.204947
Transnistria,Tiraspol,46.848185,29.596805
Trinidad and Tobago,Port of Spain,10.654901,-61.501926
Tristan da Cunha,Edinburgh of the Seven Seas,-37.068042,-12.311315
Tunisia,Tunis,36.806495,10.181532
Turkey,Ankara,39.933364,32.859742
Turkmenistan,Ashgabat,37.960077,58.326063
Turks and Caicos Islands,Cockburn Town,21.467458,-71.13891
Tuvalu,Funafuti,-8.520066,179.198128
U.S. Virgin Islands,Charlotte Amalie,18.3419,-64.930701
Uganda,Kampala,0.347596,32.58252
Ukraine,Kiev,50.4501,30.5234
United Arab Emirates,Abu Dhabi,24.299174,54.697277
United Kingdom,London,51.507351,-0.127758
United States,Washington,38.907192,-77.036871
Uruguay,Montevideo,-34.901113,-56.164531
Uzbekistan,Tashkent,41.299496,69.240073
Vanuatu,Port Vila,-17.733251,168.327325
Vatican City,Vatican City,41.902179,12.453601
Venezuela,Caracas,10.480594,-66.903606
Vietnam,Hanoi,21.027764,105.83416
Wallis and Futuna,Mata-Utu,-13.282509,-176.176447
Western Sahara,El Aaiún,27.125287,-13.1625
Yemen,Sana'a,15.369445,44.191007
Zambia,Lusaka,-15.387526,28.322817
Zimbabwe,Harare,-17.825166,31.03351"""
create_file()
<|reserved_special_token_1|>
def format_amount(a):
return a.replace(",","").strip().replace("%","").replace("$","")
def create_json(gdp, coords):
# ------------ Split gdp data ------------ #
line_list=gdp.split('\n')
column_list = [x.split('\t') for x in line_list if x!=""]
# ------------ Split coord data ------------ #
line_list=coords.split('\n')
coord_list = [x.split(',') for x in line_list if x!=""]
coord_dict = {}
for i in coord_list:
coord_dict[format_amount(i[0])] = i[1:]
# ------------ Begin File ------------ #
out = "// This file is automatically generated by game-statics/utils/countryRON.py.\n// Please do not edit."
out += "\n["
# -------- Add country list -------- #
for index in range(len(column_list)):
coords = coord_dict[format_amount(column_list[index][1]) ]
print(coords)
out += "("
out+='name:"' + format_amount(column_list[index][1]) + '",'
out+='gdp:' + format_amount(column_list[index][2]) + ','
out+='population:' + format_amount(column_list[index][5]) + ','
out+='lat:' + format_amount(coords [1]) + ','
out+='long:' + format_amount(coords [2]) + ''
out+=")"
if index!=len(column_list)-1:
out+=','
# ----------- End File ----------- #
out+="]"
return out
def create_file():
data = create_json(d, coords)
file = open("../assets/Countries.ron","w",encoding='utf8')
file.write(data)
file.close()
# Copied from https://www.worldometers.info/gdp/gdp-by-country/
# Country GDP GDP formated GDP change Population GDP per capita share of word GDP
d='''
1 United States $19,485,394,000,000 $19.485 trillion 2.27% 325,084,756 $59,939 24.08%
2 China $12,237,700,479,375 $12.238 trillion 6.90% 1,421,021,791 $8,612 15.12%
3 Japan $4,872,415,104,315 $4.872 trillion 1.71% 127,502,725 $38,214 6.02%
4 Germany $3,693,204,332,230 $3.693 trillion 2.22% 82,658,409 $44,680 4.56%
5 India $2,650,725,335,364 $2.651 trillion 6.68% 1,338,676,785 $1,980 3.28%
6 United Kingdom $2,637,866,340,434 $2.638 trillion 1.79% 66,727,461 $39,532 3.26%
7 France $2,582,501,307,216 $2.583 trillion 1.82% 64,842,509 $39,827 3.19%
8 Brazil $2,053,594,877,013 $2.054 trillion 0.98% 207,833,823 $9,881 2.54%
9 Italy $1,943,835,376,342 $1.944 trillion 1.50% 60,673,701 $32,038 2.40%
10 Canada $1,647,120,175,449 $1.647 trillion 3.05% 36,732,095 $44,841 2.04%
11 Russia $1,578,417,211,937 $1.578 trillion 1.55% 145,530,082 $10,846 1.95%
12 South Korea $1,530,750,923,149 $1.531 trillion 3.06% 51,096,415 $29,958 1.89%
13 Australia $1,323,421,072,479 $1.323 trillion 1.96% 24,584,620 $53,831 1.64%
14 Spain $1,314,314,164,402 $1.314 trillion 3.05% 46,647,428 $28,175 1.62%
15 Mexico $1,150,887,823,404 $1.151 trillion 2.04% 124,777,324 $9,224 1.42%
16 Indonesia $1,015,420,587,285 $1.015 trillion 5.07% 264,650,963 $3,837 1.25%
17 Turkey $851,549,299,635 $852 billion 7.44% 81,116,450 $10,498 1.05%
18 Netherlands $830,572,618,850 $831 billion 3.16% 17,021,347 $48,796 1.03%
19 Saudi Arabia $686,738,400,000 $687 billion -0.86% 33,101,179 $20,747 0.85%
20 Switzerland $678,965,423,322 $679 billion 1.09% 8,455,804 $80,296 0.84%
21 Argentina $637,430,331,479 $637 billion 2.85% 43,937,140 $14,508 0.79%
22 Sweden $535,607,385,506 $536 billion 2.29% 9,904,896 $54,075 0.66%
23 Poland $526,465,839,003 $526 billion 4.81% 37,953,180 $13,871 0.65%
24 Belgium $494,763,551,891 $495 billion 1.73% 11,419,748 $43,325 0.61%
25 Thailand $455,302,682,986 $455 billion 3.91% 69,209,810 $6,579 0.56%
26 Iran $454,012,768,724 $454 billion 3.76% 80,673,883 $5,628 0.56%
27 Austria $416,835,975,862 $417 billion 3.04% 8,819,901 $47,261 0.52%
28 Norway $399,488,897,844 $399 billion 1.92% 5,296,326 $75,428 0.49%
29 United Arab Emirates $382,575,085,092 $383 billion 0.79% 9,487,203 $40,325 0.47%
30 Nigeria $375,745,486,521 $376 billion 0.81% 190,873,244 $1,969 0.46%
31 Israel $353,268,411,919 $353 billion 3.33% 8,243,848 $42,852 0.44%
32 South Africa $348,871,647,960 $349 billion 1.32% 57,009,756 $6,120 0.43%
33 Hong Kong $341,449,340,451 $341 billion 3.79% 7,306,322 $46,733 0.42%
34 Ireland $331,430,014,003 $331 billion 7.80% 4,753,279 $69,727 0.41%
35 Denmark $329,865,537,183 $330 billion 2.24% 5,732,274 $57,545 0.41%
36 Singapore $323,907,234,412 $324 billion 3.62% 5,708,041 $56,746 0.40%
37 Malaysia $314,710,259,511 $315 billion 5.90% 31,104,646 $10,118 0.39%
38 Colombia $314,457,601,860 $314 billion 1.79% 48,909,839 $6,429 0.39%
39 Philippines $313,595,208,737 $314 billion 6.68% 105,172,925 $2,982 0.39%
40 Pakistan $304,951,818,494 $305 billion 5.70% 207,906,209 $1,467 0.38%
41 Chile $277,075,944,402 $277 billion 1.49% 18,470,439 $15,001 0.34%
42 Finland $252,301,837,573 $252 billion 2.63% 5,511,371 $45,778 0.31%
43 Bangladesh $249,723,862,487 $250 billion 7.28% 159,685,424 $1,564 0.31%
44 Egypt $235,369,129,338 $235 billion 4.18% 96,442,591 $2,441 0.29%
45 Vietnam $223,779,865,815 $224 billion 6.81% 94,600,648 $2,366 0.28%
46 Portugal $219,308,128,887 $219 billion 2.68% 10,288,527 $21,316 0.27%
47 Czech Republic $215,913,545,038 $216 billion 4.29% 10,641,034 $20,291 0.27%
48 Romania $211,883,923,504 $212 billion 7.26% 19,653,969 $10,781 0.26%
49 Peru $211,389,272,242 $211 billion 2.53% 31,444,298 $6,723 0.26%
50 New Zealand $204,139,049,909 $204 billion 3.03% 4,702,034 $43,415 0.25%
51 Greece $203,085,551,429 $203 billion 1.35% 10,569,450 $19,214 0.25%
52 Iraq $192,060,810,811 $192 billion -2.07% 37,552,781 $5,114 0.24%
53 Algeria $167,555,280,113 $168 billion 1.60% 41,389,189 $4,048 0.21%
54 Qatar $166,928,571,429 $167 billion 1.58% 2,724,728 $61,264 0.21%
55 Kazakhstan $162,886,867,832 $163 billion 4.10% 18,080,019 $9,009 0.20%
56 Hungary $139,761,138,103 $140 billion 3.99% 9,729,823 $14,364 0.17%
57 Angola $122,123,822,334 $122 billion -0.15% 29,816,766 $4,096 0.15%
58 Kuwait $120,126,277,613 $120 billion -2.87% 4,056,099 $29,616 0.15%
59 Sudan $117,487,857,143 $117 billion 4.28% 40,813,397 $2,879 0.15%
60 Ukraine $112,154,185,121 $112 billion 2.52% 44,487,709 $2,521 0.14%
61 Morocco $109,708,728,849 $110 billion 4.09% 35,581,255 $3,083 0.14%
62 Ecuador $104,295,862,000 $104 billion 2.37% 16,785,361 $6,214 0.13%
63 Cuba $96,851,000,000 $96.85 billion 1.78% 11,339,254 $8,541 0.12%
64 Slovakia $95,617,670,260 $95.62 billion 3.40% 5,447,900 $17,551 0.12%
65 Sri Lanka $87,357,205,923 $87.36 billion 3.31% 21,128,032 $4,135 0.11%
66 Ethiopia $80,561,496,134 $80.56 billion 10.25% 106,399,924 $757 0.10%
67 Kenya $79,263,075,749 $79.26 billion 4.87% 50,221,142 $1,578 0.10%
68 Dominican Republic $75,931,656,815 $75.93 billion 4.55% 10,513,104 $7,223 0.09%
69 Guatemala $75,620,095,538 $75.62 billion 2.76% 16,914,970 $4,471 0.09%
70 Oman $70,783,875,163 $70.78 billion -0.27% 4,665,928 $15,170 0.09%
71 Myanmar $67,068,745,521 $67.07 billion 6.76% 53,382,523 $1,256 0.08%
72 Luxembourg $62,316,359,824 $62.32 billion 2.30% 591,910 $105,280 0.08%
73 Panama $62,283,756,584 $62.28 billion 5.32% 4,106,769 $15,166 0.08%
74 Ghana $58,996,776,238 $59.00 billion 8.14% 29,121,465 $2,026 0.07%
75 Bulgaria $58,220,973,783 $58.22 billion 3.81% 7,102,444 $8,197 0.07%
76 Costa Rica $57,285,984,448 $57.29 billion 3.28% 4,949,954 $11,573 0.07%
77 Uruguay $56,156,972,158 $56.16 billion 2.66% 3,436,641 $16,341 0.07%
78 Croatia $55,213,087,271 $55.21 billion 2.92% 4,182,857 $13,200 0.07%
79 Belarus $54,456,465,473 $54.46 billion 2.42% 9,450,231 $5,762 0.07%
80 Lebanon $53,576,985,687 $53.58 billion 1.53% 6,819,373 $7,857 0.07%
81 Tanzania $53,320,625,959 $53.32 billion 7.10% 54,660,339 $975 0.07%
82 Macau $50,361,201,096 $50.36 billion 9.10% 622,585 $80,890 0.06%
83 Uzbekistan $49,677,172,714 $49.68 billion 5.30% 31,959,785 $1,554 0.06%
84 Slovenia $48,769,655,479 $48.77 billion 5.00% 2,076,394 $23,488 0.06%
85 Lithuania $47,544,459,559 $47.54 billion 3.83% 2,845,414 $16,709 0.06%
86 Serbia $41,431,648,801 $41.43 billion 1.87% 8,829,628 $4,692 0.05%
87 Azerbaijan $40,747,792,238 $40.75 billion 0.10% 9,845,320 $4,139 0.05%
88 Jordan $40,068,308,451 $40.07 billion 1.97% 9,785,843 $4,095 0.05%
89 Tunisia $39,952,095,561 $39.95 billion 1.96% 11,433,443 $3,494 0.05%
90 Paraguay $39,667,400,816 $39.67 billion 5.21% 6,867,061 $5,776 0.05%
91 Libya $38,107,728,083 $38.11 billion 26.68% 6,580,724 $5,791 0.05%
92 Turkmenistan $37,926,285,714 $37.93 billion 6.50% 5,757,667 $6,587 0.05%
93 DR Congo $37,642,482,562 $37.64 billion 3.70% 81,398,764 $462 0.05%
94 Bolivia $37,508,642,113 $37.51 billion 4.20% 11,192,855 $3,351 0.05%
95 Côte d'Ivoire $37,353,276,059 $37.35 billion 7.70% 24,437,470 $1,529 0.05%
96 Bahrain $35,432,686,170 $35.43 billion 3.88% 1,494,076 $23,715 0.04%
97 Cameroon $34,922,782,311 $34.92 billion 3.55% 24,566,073 $1,422 0.04%
98 Yemen $31,267,675,216 $31.27 billion -5.94% 27,834,819 $1,123 0.04%
99 Latvia $30,463,302,414 $30.46 billion 4.55% 1,951,097 $15,613 0.04%
100 Estonia $26,611,651,599 $26.61 billion 4.85% 1,319,390 $20,170 0.03%
101 Uganda $25,995,031,850 $26.00 billion 3.86% 41,166,588 $631 0.03%
102 Zambia $25,868,142,073 $25.87 billion 3.40% 16,853,599 $1,535 0.03%
103 Nepal $24,880,266,905 $24.88 billion 7.91% 27,632,681 $900 0.03%
104 El Salvador $24,805,439,600 $24.81 billion 2.32% 6,388,126 $3,883 0.03%
105 Iceland $24,488,467,010 $24.49 billion 3.64% 334,393 $73,233 0.03%
106 Honduras $22,978,532,897 $22.98 billion 4.79% 9,429,013 $2,437 0.03%
107 Cambodia $22,158,209,503 $22.16 billion 7.10% 16,009,409 $1,384 0.03%
108 Trinidad and Tobago $22,079,017,627 $22.08 billion -2.34% 1,384,059 $15,952 0.03%
109 Cyprus $22,054,225,828 $22.05 billion 4.23% 1,179,678 $18,695 0.03%
110 Zimbabwe $22,040,902,300 $22.04 billion 4.70% 14,236,595 $1,548 0.03%
111 Senegal $21,070,225,735 $21.07 billion 7.15% 15,419,355 $1,366 0.03%
112 Papua New Guinea $20,536,314,601 $20.54 billion 2.55% 8,438,036 $2,434 0.03%
113 Afghanistan $19,543,976,895 $19.54 billion 2.67% 36,296,113 $538 0.02%
114 Bosnia and Herzegovina $18,054,854,789 $18.05 billion 3.19% 3,351,525 $5,387 0.02%
115 Botswana $17,406,565,823 $17.41 billion 2.36% 2,205,080 $7,894 0.02%
116 Laos $16,853,087,485 $16.85 billion 6.89% 6,953,035 $2,424 0.02%
117 Mali $15,334,336,144 $15.33 billion 5.40% 18,512,430 $828 0.02%
118 Georgia $15,081,338,092 $15.08 billion 4.83% 4,008,716 $3,762 0.02%
119 Gabon $15,013,950,984 $15.01 billion 0.50% 2,064,823 $7,271 0.02%
120 Jamaica $14,781,107,822 $14.78 billion 0.98% 2,920,848 $5,061 0.02%
121 Palestine $14,498,100,000 $14.50 billion 3.14% 4,747,227 $3,054 0.02%
122 Nicaragua $13,814,261,536 $13.81 billion 4.86% 6,384,846 $2,164 0.02%
123 Mauritius $13,266,427,697 $13.27 billion 3.82% 1,264,499 $10,491 0.02%
124 Namibia $13,253,698,015 $13.25 billion -0.95% 2,402,633 $5,516 0.02%
125 Albania $13,038,538,300 $13.04 billion 3.84% 2,884,169 $4,521 0.02%
126 Mozambique $12,645,508,634 $12.65 billion 3.74% 28,649,018 $441 0.02%
127 Malta $12,518,134,319 $12.52 billion 6.42% 437,933 $28,585 0.02%
128 Burkina Faso $12,322,864,245 $12.32 billion 6.30% 19,193,234 $642 0.02%
129 Equatorial Guinea $12,293,579,173 $12.29 billion -4.92% 1,262,002 $9,741 0.02%
130 Bahamas $12,162,100,000 $12.16 billion 1.44% 381,755 $31,858 0.02%
131 Brunei $12,128,089,002 $12.13 billion 1.33% 424,473 $28,572 0.01%
132 Armenia $11,536,590,636 $11.54 billion 7.50% 2,944,791 $3,918 0.01%
133 Madagascar $11,499,803,807 $11.50 billion 4.17% 25,570,512 $450 0.01%
134 Mongolia $11,433,635,876 $11.43 billion 5.30% 3,113,786 $3,672 0.01%
135 North Macedonia $11,279,509,014 $11.28 billion 0.24% 2,081,996 $5,418 0.01%
136 Guinea $10,472,514,515 $10.47 billion 10.60% 12,067,519 $868 0.01%
137 Chad $9,871,247,732 $9.87 billion -2.95% 15,016,753 $657 0.01%
138 Benin $9,246,696,924 $9.25 billion 5.84% 11,175,198 $827 0.01%
139 Rwanda $9,135,454,442 $9.14 billion 6.06% 11,980,961 $762 0.01%
140 Congo $8,701,334,800 $8.70 billion -3.10% 5,110,695 $1,703 0.01%
141 Haiti $8,408,150,518 $8.41 billion 1.17% 10,982,366 $766 0.01%
142 Moldova $8,128,493,432 $8.13 billion 4.50% 4,059,684 $2,002 0.01%
143 Niger $8,119,710,126 $8.12 billion 4.89% 21,602,382 $376 0.01%
144 Kyrgyzstan $7,564,738,836 $7.56 billion 4.58% 6,189,733 $1,222 0.01%
145 Tajikistan $7,146,449,583 $7.15 billion 7.62% 8,880,268 $805 0.01%
146 Malawi $6,303,292,264 $6.30 billion 4.00% 17,670,196 $357 0.01%
147 Guam $5,859,000,000 $5.86 billion 0.19% 164,281 $35,665 0.01%
148 Fiji $5,061,202,767 $5.06 billion 3.80% 877,459 $5,768 0.01%
149 Mauritania $5,024,708,656 $5.02 billion 3.50% 4,282,570 $1,173 0.01%
150 Maldives $4,865,546,027 $4.87 billion 6.91% 496,402 $9,802 0.01%
151 Montenegro $4,844,592,067 $4.84 billion 4.70% 627,563 $7,720 0.01%
152 Togo $4,757,776,485 $4.76 billion 4.40% 7,698,474 $618 0.01%
153 Barbados $4,673,500,000 $4.67 billion 1.00% 286,232 $16,328 0.01%
154 Eswatini $4,433,664,364 $4.43 billion 1.87% 1,124,805 $3,942 0.01%
155 Sierra Leone $3,775,047,334 $3.78 billion 4.21% 7,488,423 $504 0.00%
156 Guyana $3,621,046,005 $3.62 billion 2.92% 775,222 $4,671 0.00%
157 Liberia $3,285,455,000 $3.29 billion 2.47% 4,702,226 $699 0.00%
158 Burundi $3,172,416,146 $3.17 billion 0.50% 10,827,019 $293 0.00%
159 Andorra $3,012,914,131 $3.01 billion 1.87% 77,001 $39,128 0.00%
160 Suriname $2,995,827,901 $3.00 billion 1.69% 570,496 $5,251 0.00%
161 Timor-Leste $2,954,621,000 $2.95 billion -8.00% 1,243,258 $2,377 0.00%
162 Aruba $2,700,558,659 $2.70 billion 1.33% 105,366 $25,630 0.00%
163 Lesotho $2,578,265,358 $2.58 billion -2.29% 2,091,534 $1,233 0.00%
164 Bhutan $2,528,007,911 $2.53 billion 4.63% 745,563 $3,391 0.00%
165 Central African Republic $1,949,411,659 $1.95 billion 4.30% 4,596,023 $424 0.00%
166 Belize $1,862,614,800 $1.86 billion 1.44% 375,769 $4,957 0.00%
167 Cape Verde $1,772,706,451 $1.77 billion 4.01% 537,498 $3,298 0.00%
168 Saint Lucia $1,737,504,296 $1.74 billion 3.82% 180,954 $9,602 0.00%
169 San Marino $1,632,860,041 $1.63 billion 1.50% 33,671 $48,495 0.00%
170 Northern Mariana Islands $1,593,000,000 $1.59 billion 25.14% 56,562 $28,164 0.00%
171 Antigua and Barbuda $1,510,084,751 $1.51 billion 3.03% 95,426 $15,825 0.00%
172 Seychelles $1,497,959,569 $1.50 billion 5.28% 96,418 $15,536 0.00%
173 Gambia $1,489,464,788 $1.49 billion 4.56% 2,213,889 $673 0.00%
174 Guinea-Bissau $1,346,841,897 $1.35 billion 5.92% 1,828,145 $737 0.00%
175 Solomon Islands $1,303,453,622 $1.30 billion 3.24% 636,039 $2,049 0.00%
176 Grenada $1,126,882,296 $1.13 billion 5.06% 110,874 $10,164 0.00%
177 Comoros $1,068,124,330 $1.07 billion 2.71% 813,892 $1,312 0.00%
178 Saint Kitts and Nevis $992,007,403 $992 million 1.17% 52,045 $19,061 0.00%
179 Vanuatu $862,879,789 $863 million 4.50% 285,510 $3,022 0.00%
180 Samoa $840,927,997 $841 million 2.70% 195,352 $4,305 0.00%
181 Saint Vincent and the Grenadines $785,222,509 $785 million 0.86% 109,827 $7,150 0.00%
182 American Samoa $634,000,000 $634 million -5.38% 55,620 $11,399 0.00%
183 Dominica $496,727,000 $497 million -9.53% 71,458 $6,951 0.00%
184 Tonga $427,659,795 $428 million 2.70% 101,998 $4,193 0.00%
185 São Tomé and Príncipe $392,570,293 $393 million 3.87% 207,089 $1,896 0.00%
186 Micronesia $336,427,500 $336 million 3.20% 532,899 $631 0.00%
187 Palau $289,823,500 $290 million -3.57% 17,808 $16,275 0.00%
188 Marshall Islands $204,173,430 $204 million 3.60% 58,058 $3,517 0.00%
189 Kiribati $185,572,502 $186 million 0.33% 114,158 $1,626 0.00%
190 Tuvalu $39,731,317 $40 million 3.24% 11,370 $3,494 0.00%'''
coords = '''Abkhazia,Sukhumi,43.001525,41.023415
Afghanistan,Kabul,34.575503,69.240073
Aland Islands,Mariehamn,60.1,19.933333
Albania,Tirana,41.327546,19.818698
Algeria,Algiers,36.752887,3.042048
American Samoa,Pago Pago,-14.275632,-170.702036
Andorra,Andorra la Vella,42.506317,1.521835
Angola,Luanda,-8.839988,13.289437
Anguilla,The Valley,18.214813,-63.057441
Antarctica,South Pole,-90,0
Antigua and Barbuda,Saint John's,17.12741,-61.846772
Argentina,Buenos Aires,-34.603684,-58.381559
Armenia,Yerevan,40.179186,44.499103
Aruba,Oranjestad,12.509204,-70.008631
Australia,Canberra,-35.282,149.128684
Austria,Vienna,48.208174,16.373819
Azerbaijan,Baku,40.409262,49.867092
Bahamas,Nassau,25.047984,-77.355413
Bahrain,Manama,26.228516,50.58605
Bangladesh,Dhaka,23.810332,90.412518
Barbados,Bridgetown,13.113222,-59.598809
Belarus,Minsk,53.90454,27.561524
Belgium,Brussels,50.85034,4.35171
Belize,Belmopan,17.251011,-88.75902
Benin,Porto-Novo,6.496857,2.628852
Bermuda,Hamilton,32.294816,-64.781375
Bhutan,Thimphu,27.472792,89.639286
Bolivia,La Paz,-16.489689,-68.119294
Bosnia and Herzegovina,Sarajevo,43.856259,18.413076
Botswana,Gaborone,-24.628208,25.923147
Bouvet Island,Bouvet Island,-54.43,3.38
Brazil,Brasília,-15.794229,-47.882166
British Indian Ocean Territory,Camp Justice,21.3419,55.4778
British Virgin Islands,Road Town,18.428612,-64.618466
Brunei,Bandar Seri Begawan,4.903052,114.939821
Bulgaria,Sofia,42.697708,23.321868
Burkina Faso,Ouagadougou,12.371428,-1.51966
Burundi,Bujumbura,-3.361378,29.359878
Cambodia,Phnom Penh,11.544873,104.892167
Cameroon,Yaoundé,3.848033,11.502075
Canada,Ottawa,45.42153,-75.697193
Cape Verde,Praia,14.93305,-23.513327
Cayman Islands,George Town,19.286932,-81.367439
Central African Republic,Bangui,4.394674,18.55819
Chad,N'Djamena,12.134846,15.055742
Chile,Santiago,-33.44889,-70.669265
China,Beijing,39.904211,116.407395
Christmas Island,Flying Fish Cove,-10.420686,105.679379
Cocos (Keeling) Islands,West Island,-12.188834,96.829316
Colombia,Bogotá,4.710989,-74.072092
Comoros,Moroni,-11.717216,43.247315
DR Congo,Kinshasa,-4.441931,15.266293
Congo,Brazzaville,-4.26336,15.242885
Cook Islands,Avarua,-21.212901,-159.782306
Costa Rica,San José,9.928069,-84.090725
Côte d'Ivoire,Yamoussoukro,6.827623,-5.289343
Croatia,Zagreb ,45.815011,15.981919
Cuba,Havana,23.05407,-82.345189
Curaçao,Willemstad,12.122422,-68.882423
Cyprus,Nicosia,35.185566,33.382276
Czech Republic,Prague,50.075538,14.4378
Denmark,Copenhagen,55.676097,12.568337
Djibouti,Djibouti,11.572077,43.145647
Dominica,Roseau,15.309168,-61.379355
Dominican Republic,Santo Domingo,18.486058,-69.931212
Ecuador,Quito,-0.180653,-78.467838
Egypt,Cairo,30.04442,31.235712
El Salvador,San Salvador,13.69294,-89.218191
Equatorial Guinea,Malabo,3.750412,8.737104
Eritrea,Asmara,15.322877,38.925052
Estonia,Tallinn,59.436961,24.753575
Ethiopia,Addis Ababa,8.980603,38.757761
Falkland Islands (Islas Malvinas),Stanley,-51.697713,-57.851663
Faroe Islands,Tórshavn,62.007864,-6.790982
Fiji,Suva,-18.124809,178.450079
Finland,Helsinki,60.173324,24.941025
France,Paris,48.856614,2.352222
French Guiana,Cayenne,4.92242,-52.313453
French Polynesia,Papeete,-17.551625,-149.558476
French Southern Territories,Saint-Pierre ,-21.3419,55.4778
Gabon,Libreville,0.416198,9.467268
Gambia,Banjul,13.454876,-16.579032
Georgia,Tbilisi,41.715138,44.827096
Germany,Berlin,52.520007,13.404954
Ghana,Accra,5.603717,-0.186964
Gibraltar,Gibraltar,36.140773,-5.353599
Greece,Athens,37.983917,23.72936
Greenland,Nuuk,64.18141,-51.694138
Grenada,Saint George's,12.056098,-61.7488
Guadeloupe,Basse-Terre,16.014453,-61.706411
Guam,Hagåtña,13.470891,144.751278
Guatemala,Guatemala City,14.634915,-90.506882
Guernsey,Saint Peter Port,49.455443,-2.536871
Guinea,Conakry,9.641185,-13.578401
Guinea-Bissau,Bissau,11.881655,-15.617794
Guyana,Georgetown,6.801279,-58.155125
Haiti,Port-au-Prince,18.594395,-72.307433
Honduras,Tegucigalpa,14.072275,-87.192136
Hong Kong,Hong Kong,22.396428,114.109497
Hungary,Budapest,47.497912,19.040235
Iceland,Reykjavík,64.126521,-21.817439
India,New Delhi,28.613939,77.209021
Indonesia,Jakarta,-6.208763,106.845599
Iran,Tehran,35.689198,51.388974
Iraq,Baghdad,33.312806,44.361488
Ireland,Dublin,53.349805,-6.26031
Isle of Man,Douglas,54.152337,-4.486123
Israel,Tel Aviv,32.0853,34.781768
Italy,Rome,41.902784,12.496366
Jamaica,Kingston,18.042327,-76.802893
Japan,Tokyo,35.709026,139.731992
Jersey,Saint Helier,49.186823,-2.106568
Jordan,Amman,31.956578,35.945695
Kazakhstan,Astana,51.160523,71.470356
Kenya,Nairobi,-1.292066,36.821946
Kiribati,Tarawa Atoll,1.451817,172.971662
Kosovo,Pristina,42.662914,21.165503
Kuwait,Kuwait City,29.375859,47.977405
Kyrgyzstan,Bishkek,42.874621,74.569762
Laos,Vientiane,17.975706,102.633104
Latvia,Riga,56.949649,24.105186
Lebanon,Beirut,33.888629,35.495479
Lesotho,Maseru,-29.363219,27.51436
Liberia,Monrovia,6.290743,-10.760524
Libya,Tripoli,32.887209,13.191338
Liechtenstein,Vaduz,47.14103,9.520928
Lithuania,Vilnius,54.687156,25.279651
Luxembourg,Luxembourg,49.611621,6.131935
Macau,Macau,22.166667,113.55
North Macedonia,Skopje,41.997346,21.427996
Madagascar,Antananarivo,-18.87919,47.507905
Malawi,Lilongwe,-13.962612,33.774119
Malaysia,Kuala Lumpur,3.139003,101.686855
Maldives,Malé,4.175496,73.509347
Mali,Bamako,12.639232,-8.002889
Malta,Valletta,35.898909,14.514553
Marshall Islands,Majuro,7.116421,171.185774
Martinique,Fort-de-France,14.616065,-61.05878
Mauritania,Nouakchott,18.07353,-15.958237
Mauritius,Port Louis,-20.166896,57.502332
Mayotte,Mamoudzou,-12.780949,45.227872
Mexico,Mexico City,19.432608,-99.133208
Micronesia,Palikir,6.914712,158.161027
Moldova,Chisinau,47.010453,28.86381
Monaco,Monaco,43.737411,7.420816
Mongolia,Ulaanbaatar,47.886399,106.905744
Montenegro,Podgorica,42.43042,19.259364
Montserrat,Plymouth,16.706523,-62.215738
Morocco,Rabat,33.97159,-6.849813
Mozambique,Maputo,-25.891968,32.605135
Myanmar,Naypyidaw,19.763306,96.07851
Nagorno-Karabakh Republic,Stepanakert,39.826385,46.763595
Namibia,Windhoek,-22.560881,17.065755
Nauru,Yaren,-0.546686,166.921091
Nepal,Kathmandu,27.717245,85.323961
Netherlands,Amsterdam,52.370216,4.895168
Netherlands Antilles,Willemstad ,12.1091242,-68.9316546
New Caledonia,Nouméa,-22.255823,166.450524
New Zealand,Wellington,-41.28646,174.776236
Nicaragua,Managua,12.114993,-86.236174
Niger,Niamey,13.511596,2.125385
Nigeria,Abuja,9.076479,7.398574
Niue,Alofi,-19.055371,-169.917871
Norfolk Island,Kingston,-29.056394,167.959588
North Korea,Pyongyang,39.039219,125.762524
Northern Cyprus,Nicosia,35.185566,33.382276
Northern Mariana Islands,Saipan,15.177801,145.750967
Norway,Oslo,59.913869,10.752245
Oman,Muscat,23.58589,58.405923
Pakistan,Islamabad,33.729388,73.093146
Palau,Ngerulmud,7.500384,134.624289
Palestine,Ramallah,31.9073509,35.5354719
Panama,Panama City,9.101179,-79.402864
Papua New Guinea,Port Moresby,-9.4438,147.180267
Paraguay,Asuncion,-25.26374,-57.575926
Peru,Lima,-12.046374,-77.042793
Philippines,Manila,14.599512,120.98422
Pitcairn Islands,Adamstown,-25.06629,-130.100464
Poland,Warsaw,52.229676,21.012229
Portugal,Lisbon,38.722252,-9.139337
Puerto Rico,San Juan,18.466334,-66.105722
Qatar,Doha,25.285447,51.53104
Réunion,Saint-Denis,-20.882057,55.450675
Romania,Bucharest,44.426767,26.102538
Russia,Moscow,55.755826,37.6173
Rwanda,Kigali,-1.957875,30.112735
Saint Pierre and Miquelon,Saint Pierre,46.775846,-56.180636
Saint Vincent and the Grenadines,Kingstown,13.160025,-61.224816
Samoa,Apia,-13.850696,-171.751355
San Marino,San Marino,43.935591,12.447281
São Tomé and Príncipe,São Tomé,0.330192,6.733343
Saudi Arabia,Riyadh,24.749403,46.902838
Senegal,Dakar,14.764504,-17.366029
Serbia,Belgrade,44.786568,20.448922
Seychelles,Victoria,-4.619143,55.451315
Sierra Leone,Freetown,8.465677,-13.231722
Singapore,Singapore,1.280095,103.850949
Slovakia,Bratislava,48.145892,17.107137
Slovenia,Ljubljana,46.056947,14.505751
Solomon Islands,Honiara,-9.445638,159.9729
Somalia,Mogadishu,2.046934,45.318162
South Africa,Pretoria,-25.747868,28.229271
South Georgia and the South Sandwich Islands,King Edward Point,-54.28325,-36.493735
South Korea,Seoul,37.566535,126.977969
South Ossetia,Tskhinvali,42.22146,43.964405
South Sudan,Juba,4.859363,31.57125
Spain,Madrid,40.416775,-3.70379
Sri Lanka,Sri Jayawardenepura Kotte,6.89407,79.902478
Saint Barthélemy,Gustavia,17.896435,-62.852201
Saint Kitts and Nevis,Basseterre,17.302606,-62.717692
Saint Lucia,Castries,14.010109,-60.987469
Saint Martin,Marigot,18.067519,-63.082466
Sudan,Khartoum,15.500654,32.559899
Suriname,Paramaribo,5.852036,-55.203828
Svalbard and Jan Mayen,Longyearbyen ,78.062,22.055
Eswatini,Mbabane,-26.305448,31.136672
Sweden,Stockholm,59.329323,18.068581
Switzerland,Bern,46.947974,7.447447
Syria,Damascus,33.513807,36.276528
Taiwan,Taipei,25.032969,121.565418
Tajikistan,Dushanbe,38.559772,68.787038
Tanzania,Dodoma,-6.162959,35.751607
Thailand,Bangkok,13.756331,100.501765
Timor-Leste,Dili,-8.556856,125.560314
Togo,Lomé,6.172497,1.231362
Tokelau,Nukunonu,-9.2005,-171.848
Tonga,Nukuʻalofa,-21.139342,-175.204947
Transnistria,Tiraspol,46.848185,29.596805
Trinidad and Tobago,Port of Spain,10.654901,-61.501926
Tristan da Cunha,Edinburgh of the Seven Seas,-37.068042,-12.311315
Tunisia,Tunis,36.806495,10.181532
Turkey,Ankara,39.933364,32.859742
Turkmenistan,Ashgabat,37.960077,58.326063
Turks and Caicos Islands,Cockburn Town,21.467458,-71.13891
Tuvalu,Funafuti,-8.520066,179.198128
U.S. Virgin Islands,Charlotte Amalie,18.3419,-64.930701
Uganda,Kampala,0.347596,32.58252
Ukraine,Kiev,50.4501,30.5234
United Arab Emirates,Abu Dhabi,24.299174,54.697277
United Kingdom,London,51.507351,-0.127758
United States,Washington,38.907192,-77.036871
Uruguay,Montevideo,-34.901113,-56.164531
Uzbekistan,Tashkent,41.299496,69.240073
Vanuatu,Port Vila,-17.733251,168.327325
Vatican City,Vatican City,41.902179,12.453601
Venezuela,Caracas,10.480594,-66.903606
Vietnam,Hanoi,21.027764,105.83416
Wallis and Futuna,Mata-Utu,-13.282509,-176.176447
Western Sahara,El Aaiún,27.125287,-13.1625
Yemen,Sana'a,15.369445,44.191007
Zambia,Lusaka,-15.387526,28.322817
Zimbabwe,Harare,-17.825166,31.03351'''
create_file()
|
flexible
|
{
"blob_id": "1cbc37655e28ab3082fc31baf119cb2bab96379b",
"index": 3661,
"step-1": "def format_amount(a):\n return a.replace(',', '').strip().replace('%', '').replace('$', '')\n\n\n<mask token>\n",
"step-2": "def format_amount(a):\n return a.replace(',', '').strip().replace('%', '').replace('$', '')\n\n\ndef create_json(gdp, coords):\n line_list = gdp.split('\\n')\n column_list = [x.split('\\t') for x in line_list if x != '']\n line_list = coords.split('\\n')\n coord_list = [x.split(',') for x in line_list if x != '']\n coord_dict = {}\n for i in coord_list:\n coord_dict[format_amount(i[0])] = i[1:]\n out = \"\"\"// This file is automatically generated by game-statics/utils/countryRON.py.\n// Please do not edit.\"\"\"\n out += '\\n['\n for index in range(len(column_list)):\n coords = coord_dict[format_amount(column_list[index][1])]\n print(coords)\n out += '('\n out += 'name:\"' + format_amount(column_list[index][1]) + '\",'\n out += 'gdp:' + format_amount(column_list[index][2]) + ','\n out += 'population:' + format_amount(column_list[index][5]) + ','\n out += 'lat:' + format_amount(coords[1]) + ','\n out += 'long:' + format_amount(coords[2]) + ''\n out += ')'\n if index != len(column_list) - 1:\n out += ','\n out += ']'\n return out\n\n\ndef create_file():\n data = create_json(d, coords)\n file = open('../assets/Countries.ron', 'w', encoding='utf8')\n file.write(data)\n file.close()\n\n\n<mask token>\n",
"step-3": "def format_amount(a):\n return a.replace(',', '').strip().replace('%', '').replace('$', '')\n\n\ndef create_json(gdp, coords):\n line_list = gdp.split('\\n')\n column_list = [x.split('\\t') for x in line_list if x != '']\n line_list = coords.split('\\n')\n coord_list = [x.split(',') for x in line_list if x != '']\n coord_dict = {}\n for i in coord_list:\n coord_dict[format_amount(i[0])] = i[1:]\n out = \"\"\"// This file is automatically generated by game-statics/utils/countryRON.py.\n// Please do not edit.\"\"\"\n out += '\\n['\n for index in range(len(column_list)):\n coords = coord_dict[format_amount(column_list[index][1])]\n print(coords)\n out += '('\n out += 'name:\"' + format_amount(column_list[index][1]) + '\",'\n out += 'gdp:' + format_amount(column_list[index][2]) + ','\n out += 'population:' + format_amount(column_list[index][5]) + ','\n out += 'lat:' + format_amount(coords[1]) + ','\n out += 'long:' + format_amount(coords[2]) + ''\n out += ')'\n if index != len(column_list) - 1:\n out += ','\n out += ']'\n return out\n\n\ndef create_file():\n data = create_json(d, coords)\n file = open('../assets/Countries.ron', 'w', encoding='utf8')\n file.write(data)\n file.close()\n\n\n<mask token>\ncreate_file()\n",
"step-4": "def format_amount(a):\n return a.replace(',', '').strip().replace('%', '').replace('$', '')\n\n\ndef create_json(gdp, coords):\n line_list = gdp.split('\\n')\n column_list = [x.split('\\t') for x in line_list if x != '']\n line_list = coords.split('\\n')\n coord_list = [x.split(',') for x in line_list if x != '']\n coord_dict = {}\n for i in coord_list:\n coord_dict[format_amount(i[0])] = i[1:]\n out = \"\"\"// This file is automatically generated by game-statics/utils/countryRON.py.\n// Please do not edit.\"\"\"\n out += '\\n['\n for index in range(len(column_list)):\n coords = coord_dict[format_amount(column_list[index][1])]\n print(coords)\n out += '('\n out += 'name:\"' + format_amount(column_list[index][1]) + '\",'\n out += 'gdp:' + format_amount(column_list[index][2]) + ','\n out += 'population:' + format_amount(column_list[index][5]) + ','\n out += 'lat:' + format_amount(coords[1]) + ','\n out += 'long:' + format_amount(coords[2]) + ''\n out += ')'\n if index != len(column_list) - 1:\n out += ','\n out += ']'\n return out\n\n\ndef create_file():\n data = create_json(d, coords)\n file = open('../assets/Countries.ron', 'w', encoding='utf8')\n file.write(data)\n file.close()\n\n\nd = \"\"\"\n1\t United States\t $19,485,394,000,000\t $19.485 trillion\t 2.27%\t 325,084,756\t $59,939\t 24.08%\n2\t China\t $12,237,700,479,375\t $12.238 trillion\t 6.90%\t 1,421,021,791\t $8,612\t 15.12%\n3\t Japan\t $4,872,415,104,315\t $4.872 trillion\t 1.71%\t 127,502,725\t $38,214\t 6.02%\n4\t Germany\t $3,693,204,332,230\t $3.693 trillion\t 2.22%\t 82,658,409\t $44,680\t 4.56%\n5\t India\t $2,650,725,335,364\t $2.651 trillion\t 6.68%\t 1,338,676,785\t $1,980\t 3.28%\n6\t United Kingdom\t $2,637,866,340,434\t $2.638 trillion\t 1.79%\t 66,727,461\t $39,532\t 3.26%\n7\t France\t $2,582,501,307,216\t $2.583 trillion\t 1.82%\t 64,842,509\t $39,827\t 3.19%\n8\t Brazil\t $2,053,594,877,013\t $2.054 trillion\t 0.98%\t 207,833,823\t $9,881\t 2.54%\n9\t Italy\t $1,943,835,376,342\t $1.944 trillion\t 1.50%\t 60,673,701\t $32,038\t 2.40%\n10\t Canada\t $1,647,120,175,449\t $1.647 trillion\t 3.05%\t 36,732,095\t $44,841\t 2.04%\n11\t Russia\t $1,578,417,211,937\t $1.578 trillion\t 1.55%\t 145,530,082\t $10,846\t 1.95%\n12\t South Korea\t $1,530,750,923,149\t $1.531 trillion\t 3.06%\t 51,096,415\t $29,958\t 1.89%\n13\t Australia\t $1,323,421,072,479\t $1.323 trillion\t 1.96%\t 24,584,620\t $53,831\t 1.64%\n14\t Spain\t $1,314,314,164,402\t $1.314 trillion\t 3.05%\t 46,647,428\t $28,175\t 1.62%\n15\t Mexico\t $1,150,887,823,404\t $1.151 trillion\t 2.04%\t 124,777,324\t $9,224\t 1.42%\n16\t Indonesia\t $1,015,420,587,285\t $1.015 trillion\t 5.07%\t 264,650,963\t $3,837\t 1.25%\n17\t Turkey\t $851,549,299,635\t $852 billion\t 7.44%\t 81,116,450\t $10,498\t 1.05%\n18\t Netherlands\t $830,572,618,850\t $831 billion\t 3.16%\t 17,021,347\t $48,796\t 1.03%\n19\t Saudi Arabia\t $686,738,400,000\t $687 billion\t -0.86%\t 33,101,179\t $20,747\t 0.85%\n20\t Switzerland\t $678,965,423,322\t $679 billion\t 1.09%\t 8,455,804\t $80,296\t 0.84%\n21\t Argentina\t $637,430,331,479\t $637 billion\t 2.85%\t 43,937,140\t $14,508\t 0.79%\n22\t Sweden\t $535,607,385,506\t $536 billion\t 2.29%\t 9,904,896\t $54,075\t 0.66%\n23\t Poland\t $526,465,839,003\t $526 billion\t 4.81%\t 37,953,180\t $13,871\t 0.65%\n24\t Belgium\t $494,763,551,891\t $495 billion\t 1.73%\t 11,419,748\t $43,325\t 0.61%\n25\t Thailand\t $455,302,682,986\t $455 billion\t 3.91%\t 69,209,810\t $6,579\t 0.56%\n26\t Iran\t $454,012,768,724\t $454 billion\t 3.76%\t 80,673,883\t $5,628\t 0.56%\n27\t Austria\t $416,835,975,862\t $417 billion\t 3.04%\t 8,819,901\t $47,261\t 0.52%\n28\t Norway\t $399,488,897,844\t $399 billion\t 1.92%\t 5,296,326\t $75,428\t 0.49%\n29\t United Arab Emirates\t $382,575,085,092\t $383 billion\t 0.79%\t 9,487,203\t $40,325\t 0.47%\n30\t Nigeria\t $375,745,486,521\t $376 billion\t 0.81%\t 190,873,244\t $1,969\t 0.46%\n31\t Israel\t $353,268,411,919\t $353 billion\t 3.33%\t 8,243,848\t $42,852\t 0.44%\n32\t South Africa\t $348,871,647,960\t $349 billion\t 1.32%\t 57,009,756\t $6,120\t 0.43%\n33\t Hong Kong\t $341,449,340,451\t $341 billion\t 3.79%\t 7,306,322\t $46,733\t 0.42%\n34\t Ireland\t $331,430,014,003\t $331 billion\t 7.80%\t 4,753,279\t $69,727\t 0.41%\n35\t Denmark\t $329,865,537,183\t $330 billion\t 2.24%\t 5,732,274\t $57,545\t 0.41%\n36\t Singapore\t $323,907,234,412\t $324 billion\t 3.62%\t 5,708,041\t $56,746\t 0.40%\n37\t Malaysia\t $314,710,259,511\t $315 billion\t 5.90%\t 31,104,646\t $10,118\t 0.39%\n38\t Colombia\t $314,457,601,860\t $314 billion\t 1.79%\t 48,909,839\t $6,429\t 0.39%\n39\t Philippines\t $313,595,208,737\t $314 billion\t 6.68%\t 105,172,925\t $2,982\t 0.39%\n40\t Pakistan\t $304,951,818,494\t $305 billion\t 5.70%\t 207,906,209\t $1,467\t 0.38%\n41\t Chile\t $277,075,944,402\t $277 billion\t 1.49%\t 18,470,439\t $15,001\t 0.34%\n42\t Finland\t $252,301,837,573\t $252 billion\t 2.63%\t 5,511,371\t $45,778\t 0.31%\n43\t Bangladesh\t $249,723,862,487\t $250 billion\t 7.28%\t 159,685,424\t $1,564\t 0.31%\n44\t Egypt\t $235,369,129,338\t $235 billion\t 4.18%\t 96,442,591\t $2,441\t 0.29%\n45\t Vietnam\t $223,779,865,815\t $224 billion\t 6.81%\t 94,600,648\t $2,366\t 0.28%\n46\t Portugal\t $219,308,128,887\t $219 billion\t 2.68%\t 10,288,527\t $21,316\t 0.27%\n47\t Czech Republic\t $215,913,545,038\t $216 billion\t 4.29%\t 10,641,034\t $20,291\t 0.27%\n48\t Romania\t $211,883,923,504\t $212 billion\t 7.26%\t 19,653,969\t $10,781\t 0.26%\n49\t Peru\t $211,389,272,242\t $211 billion\t 2.53%\t 31,444,298\t $6,723\t 0.26%\n50\t New Zealand\t $204,139,049,909\t $204 billion\t 3.03%\t 4,702,034\t $43,415\t 0.25%\n51\t Greece\t $203,085,551,429\t $203 billion\t 1.35%\t 10,569,450\t $19,214\t 0.25%\n52\t Iraq\t $192,060,810,811\t $192 billion\t -2.07%\t 37,552,781\t $5,114\t 0.24%\n53\t Algeria\t $167,555,280,113\t $168 billion\t 1.60%\t 41,389,189\t $4,048\t 0.21%\n54\t Qatar\t $166,928,571,429\t $167 billion\t 1.58%\t 2,724,728\t $61,264\t 0.21%\n55\t Kazakhstan\t $162,886,867,832\t $163 billion\t 4.10%\t 18,080,019\t $9,009\t 0.20%\n56\t Hungary\t $139,761,138,103\t $140 billion\t 3.99%\t 9,729,823\t $14,364\t 0.17%\n57\t Angola\t $122,123,822,334\t $122 billion\t -0.15%\t 29,816,766\t $4,096\t 0.15%\n58\t Kuwait\t $120,126,277,613\t $120 billion\t -2.87%\t 4,056,099\t $29,616\t 0.15%\n59\t Sudan\t $117,487,857,143\t $117 billion\t 4.28%\t 40,813,397\t $2,879\t 0.15%\n60\t Ukraine\t $112,154,185,121\t $112 billion\t 2.52%\t 44,487,709\t $2,521\t 0.14%\n61\t Morocco\t $109,708,728,849\t $110 billion\t 4.09%\t 35,581,255\t $3,083\t 0.14%\n62\t Ecuador\t $104,295,862,000\t $104 billion\t 2.37%\t 16,785,361\t $6,214\t 0.13%\n63\t Cuba\t $96,851,000,000\t $96.85 billion\t 1.78%\t 11,339,254\t $8,541\t 0.12%\n64\t Slovakia\t $95,617,670,260\t $95.62 billion\t 3.40%\t 5,447,900\t $17,551\t 0.12%\n65\t Sri Lanka\t $87,357,205,923\t $87.36 billion\t 3.31%\t 21,128,032\t $4,135\t 0.11%\n66\t Ethiopia\t $80,561,496,134\t $80.56 billion\t 10.25%\t 106,399,924\t $757\t 0.10%\n67\t Kenya\t $79,263,075,749\t $79.26 billion\t 4.87%\t 50,221,142\t $1,578\t 0.10%\n68\t Dominican Republic\t $75,931,656,815\t $75.93 billion\t 4.55%\t 10,513,104\t $7,223\t 0.09%\n69\t Guatemala\t $75,620,095,538\t $75.62 billion\t 2.76%\t 16,914,970\t $4,471\t 0.09%\n70\t Oman\t $70,783,875,163\t $70.78 billion\t -0.27%\t 4,665,928\t $15,170\t 0.09%\n71\t Myanmar\t $67,068,745,521\t $67.07 billion\t 6.76%\t 53,382,523\t $1,256\t 0.08%\n72\t Luxembourg\t $62,316,359,824\t $62.32 billion\t 2.30%\t 591,910\t $105,280\t 0.08%\n73\t Panama\t $62,283,756,584\t $62.28 billion\t 5.32%\t 4,106,769\t $15,166\t 0.08%\n74\t Ghana\t $58,996,776,238\t $59.00 billion\t 8.14%\t 29,121,465\t $2,026\t 0.07%\n75\t Bulgaria\t $58,220,973,783\t $58.22 billion\t 3.81%\t 7,102,444\t $8,197\t 0.07%\n76\t Costa Rica\t $57,285,984,448\t $57.29 billion\t 3.28%\t 4,949,954\t $11,573\t 0.07%\n77\t Uruguay\t $56,156,972,158\t $56.16 billion\t 2.66%\t 3,436,641\t $16,341\t 0.07%\n78\t Croatia\t $55,213,087,271\t $55.21 billion\t 2.92%\t 4,182,857\t $13,200\t 0.07%\n79\t Belarus\t $54,456,465,473\t $54.46 billion\t 2.42%\t 9,450,231\t $5,762\t 0.07%\n80\t Lebanon\t $53,576,985,687\t $53.58 billion\t 1.53%\t 6,819,373\t $7,857\t 0.07%\n81\t Tanzania\t $53,320,625,959\t $53.32 billion\t 7.10%\t 54,660,339\t $975\t 0.07%\n82\t Macau\t $50,361,201,096\t $50.36 billion\t 9.10%\t 622,585\t $80,890\t 0.06%\n83\t Uzbekistan\t $49,677,172,714\t $49.68 billion\t 5.30%\t 31,959,785\t $1,554\t 0.06%\n84\t Slovenia\t $48,769,655,479\t $48.77 billion\t 5.00%\t 2,076,394\t $23,488\t 0.06%\n85\t Lithuania\t $47,544,459,559\t $47.54 billion\t 3.83%\t 2,845,414\t $16,709\t 0.06%\n86\t Serbia\t $41,431,648,801\t $41.43 billion\t 1.87%\t 8,829,628\t $4,692\t 0.05%\n87\t Azerbaijan\t $40,747,792,238\t $40.75 billion\t 0.10%\t 9,845,320\t $4,139\t 0.05%\n88\t Jordan\t $40,068,308,451\t $40.07 billion\t 1.97%\t 9,785,843\t $4,095\t 0.05%\n89\t Tunisia\t $39,952,095,561\t $39.95 billion\t 1.96%\t 11,433,443\t $3,494\t 0.05%\n90\t Paraguay\t $39,667,400,816\t $39.67 billion\t 5.21%\t 6,867,061\t $5,776\t 0.05%\n91\t Libya\t $38,107,728,083\t $38.11 billion\t 26.68%\t 6,580,724\t $5,791\t 0.05%\n92\t Turkmenistan\t $37,926,285,714\t $37.93 billion\t 6.50%\t 5,757,667\t $6,587\t 0.05%\n93\t DR Congo\t $37,642,482,562\t $37.64 billion\t 3.70%\t 81,398,764\t $462\t 0.05%\n94\t Bolivia\t $37,508,642,113\t $37.51 billion\t 4.20%\t 11,192,855\t $3,351\t 0.05%\n95\t Côte d'Ivoire\t $37,353,276,059\t $37.35 billion\t 7.70%\t 24,437,470\t $1,529\t 0.05%\n96\t Bahrain\t $35,432,686,170\t $35.43 billion\t 3.88%\t 1,494,076\t $23,715\t 0.04%\n97\t Cameroon\t $34,922,782,311\t $34.92 billion\t 3.55%\t 24,566,073\t $1,422\t 0.04%\n98\t Yemen\t $31,267,675,216\t $31.27 billion\t -5.94%\t 27,834,819\t $1,123\t 0.04%\n99\t Latvia\t $30,463,302,414\t $30.46 billion\t 4.55%\t 1,951,097\t $15,613\t 0.04%\n100\t Estonia\t $26,611,651,599\t $26.61 billion\t 4.85%\t 1,319,390\t $20,170\t 0.03%\n101\t Uganda\t $25,995,031,850\t $26.00 billion\t 3.86%\t 41,166,588\t $631\t 0.03%\n102\t Zambia\t $25,868,142,073\t $25.87 billion\t 3.40%\t 16,853,599\t $1,535\t 0.03%\n103\t Nepal\t $24,880,266,905\t $24.88 billion\t 7.91%\t 27,632,681\t $900\t 0.03%\n104\t El Salvador\t $24,805,439,600\t $24.81 billion\t 2.32%\t 6,388,126\t $3,883\t 0.03%\n105\t Iceland\t $24,488,467,010\t $24.49 billion\t 3.64%\t 334,393\t $73,233\t 0.03%\n106\t Honduras\t $22,978,532,897\t $22.98 billion\t 4.79%\t 9,429,013\t $2,437\t 0.03%\n107\t Cambodia\t $22,158,209,503\t $22.16 billion\t 7.10%\t 16,009,409\t $1,384\t 0.03%\n108\t Trinidad and Tobago\t $22,079,017,627\t $22.08 billion\t -2.34%\t 1,384,059\t $15,952\t 0.03%\n109\t Cyprus\t $22,054,225,828\t $22.05 billion\t 4.23%\t 1,179,678\t $18,695\t 0.03%\n110\t Zimbabwe\t $22,040,902,300\t $22.04 billion\t 4.70%\t 14,236,595\t $1,548\t 0.03%\n111\t Senegal\t $21,070,225,735\t $21.07 billion\t 7.15%\t 15,419,355\t $1,366\t 0.03%\n112\t Papua New Guinea\t $20,536,314,601\t $20.54 billion\t 2.55%\t 8,438,036\t $2,434\t 0.03%\n113\t Afghanistan\t $19,543,976,895\t $19.54 billion\t 2.67%\t 36,296,113\t $538\t 0.02%\n114\t Bosnia and Herzegovina\t $18,054,854,789\t $18.05 billion\t 3.19%\t 3,351,525\t $5,387\t 0.02%\n115\t Botswana\t $17,406,565,823\t $17.41 billion\t 2.36%\t 2,205,080\t $7,894\t 0.02%\n116\t Laos\t $16,853,087,485\t $16.85 billion\t 6.89%\t 6,953,035\t $2,424\t 0.02%\n117\t Mali\t $15,334,336,144\t $15.33 billion\t 5.40%\t 18,512,430\t $828\t 0.02%\n118\t Georgia\t $15,081,338,092\t $15.08 billion\t 4.83%\t 4,008,716\t $3,762\t 0.02%\n119\t Gabon\t $15,013,950,984\t $15.01 billion\t 0.50%\t 2,064,823\t $7,271\t 0.02%\n120\t Jamaica\t $14,781,107,822\t $14.78 billion\t 0.98%\t 2,920,848\t $5,061\t 0.02%\n121\t Palestine\t $14,498,100,000\t $14.50 billion\t 3.14%\t 4,747,227\t $3,054\t 0.02%\n122\t Nicaragua\t $13,814,261,536\t $13.81 billion\t 4.86%\t 6,384,846\t $2,164\t 0.02%\n123\t Mauritius\t $13,266,427,697\t $13.27 billion\t 3.82%\t 1,264,499\t $10,491\t 0.02%\n124\t Namibia\t $13,253,698,015\t $13.25 billion\t -0.95%\t 2,402,633\t $5,516\t 0.02%\n125\t Albania\t $13,038,538,300\t $13.04 billion\t 3.84%\t 2,884,169\t $4,521\t 0.02%\n126\t Mozambique\t $12,645,508,634\t $12.65 billion\t 3.74%\t 28,649,018\t $441\t 0.02%\n127\t Malta\t $12,518,134,319\t $12.52 billion\t 6.42%\t 437,933\t $28,585\t 0.02%\n128\t Burkina Faso\t $12,322,864,245\t $12.32 billion\t 6.30%\t 19,193,234\t $642\t 0.02%\n129\t Equatorial Guinea\t $12,293,579,173\t $12.29 billion\t -4.92%\t 1,262,002\t $9,741\t 0.02%\n130\t Bahamas\t $12,162,100,000\t $12.16 billion\t 1.44%\t 381,755\t $31,858\t 0.02%\n131\t Brunei\t $12,128,089,002\t $12.13 billion\t 1.33%\t 424,473\t $28,572\t 0.01%\n132\t Armenia\t $11,536,590,636\t $11.54 billion\t 7.50%\t 2,944,791\t $3,918\t 0.01%\n133\t Madagascar\t $11,499,803,807\t $11.50 billion\t 4.17%\t 25,570,512\t $450\t 0.01%\n134\t Mongolia\t $11,433,635,876\t $11.43 billion\t 5.30%\t 3,113,786\t $3,672\t 0.01%\n135\t North Macedonia\t $11,279,509,014\t $11.28 billion\t 0.24%\t 2,081,996\t $5,418\t 0.01%\n136\t Guinea\t $10,472,514,515\t $10.47 billion\t 10.60%\t 12,067,519\t $868\t 0.01%\n137\t Chad\t $9,871,247,732\t $9.87 billion\t -2.95%\t 15,016,753\t $657\t 0.01%\n138\t Benin\t $9,246,696,924\t $9.25 billion\t 5.84%\t 11,175,198\t $827\t 0.01%\n139\t Rwanda\t $9,135,454,442\t $9.14 billion\t 6.06%\t 11,980,961\t $762\t 0.01%\n140\t Congo\t $8,701,334,800\t $8.70 billion\t -3.10%\t 5,110,695\t $1,703\t 0.01%\n141\t Haiti\t $8,408,150,518\t $8.41 billion\t 1.17%\t 10,982,366\t $766\t 0.01%\n142\t Moldova\t $8,128,493,432\t $8.13 billion\t 4.50%\t 4,059,684\t $2,002\t 0.01%\n143\t Niger\t $8,119,710,126\t $8.12 billion\t 4.89%\t 21,602,382\t $376\t 0.01%\n144\t Kyrgyzstan\t $7,564,738,836\t $7.56 billion\t 4.58%\t 6,189,733\t $1,222\t 0.01%\n145\t Tajikistan\t $7,146,449,583\t $7.15 billion\t 7.62%\t 8,880,268\t $805\t 0.01%\n146\t Malawi\t $6,303,292,264\t $6.30 billion\t 4.00%\t 17,670,196\t $357\t 0.01%\n147\t Guam\t $5,859,000,000\t $5.86 billion\t 0.19%\t 164,281\t $35,665\t 0.01%\n148\t Fiji\t $5,061,202,767\t $5.06 billion\t 3.80%\t 877,459\t $5,768\t 0.01%\n149\t Mauritania\t $5,024,708,656\t $5.02 billion\t 3.50%\t 4,282,570\t $1,173\t 0.01%\n150\t Maldives\t $4,865,546,027\t $4.87 billion\t 6.91%\t 496,402\t $9,802\t 0.01%\n151\t Montenegro\t $4,844,592,067\t $4.84 billion\t 4.70%\t 627,563\t $7,720\t 0.01%\n152\t Togo\t $4,757,776,485\t $4.76 billion\t 4.40%\t 7,698,474\t $618\t 0.01%\n153\t Barbados\t $4,673,500,000\t $4.67 billion\t 1.00%\t 286,232\t $16,328\t 0.01%\n154\t Eswatini\t $4,433,664,364\t $4.43 billion\t 1.87%\t 1,124,805\t $3,942\t 0.01%\n155\t Sierra Leone\t $3,775,047,334\t $3.78 billion\t 4.21%\t 7,488,423\t $504\t 0.00%\n156\t Guyana\t $3,621,046,005\t $3.62 billion\t 2.92%\t 775,222\t $4,671\t 0.00%\n157\t Liberia\t $3,285,455,000\t $3.29 billion\t 2.47%\t 4,702,226\t $699\t 0.00%\n158\t Burundi\t $3,172,416,146\t $3.17 billion\t 0.50%\t 10,827,019\t $293\t 0.00%\n159\t Andorra\t $3,012,914,131\t $3.01 billion\t 1.87%\t 77,001\t $39,128\t 0.00%\n160\t Suriname\t $2,995,827,901\t $3.00 billion\t 1.69%\t 570,496\t $5,251\t 0.00%\n161\t Timor-Leste\t $2,954,621,000\t $2.95 billion\t -8.00%\t 1,243,258\t $2,377\t 0.00%\n162\t Aruba\t $2,700,558,659\t $2.70 billion\t 1.33%\t 105,366\t $25,630\t 0.00%\n163\t Lesotho\t $2,578,265,358\t $2.58 billion\t -2.29%\t 2,091,534\t $1,233\t 0.00%\n164\t Bhutan\t $2,528,007,911\t $2.53 billion\t 4.63%\t 745,563\t $3,391\t 0.00%\n165\t Central African Republic\t $1,949,411,659\t $1.95 billion\t 4.30%\t 4,596,023\t $424\t 0.00%\n166\t Belize\t $1,862,614,800\t $1.86 billion\t 1.44%\t 375,769\t $4,957\t 0.00%\n167\t Cape Verde\t $1,772,706,451\t $1.77 billion\t 4.01%\t 537,498\t $3,298\t 0.00%\n168\t Saint Lucia\t $1,737,504,296\t $1.74 billion\t 3.82%\t 180,954\t $9,602\t 0.00%\n169\t San Marino\t $1,632,860,041\t $1.63 billion\t 1.50%\t 33,671\t $48,495\t 0.00%\n170\t Northern Mariana Islands\t $1,593,000,000\t $1.59 billion\t 25.14%\t 56,562\t $28,164\t 0.00%\n171\t Antigua and Barbuda\t $1,510,084,751\t $1.51 billion\t 3.03%\t 95,426\t $15,825\t 0.00%\n172\t Seychelles\t $1,497,959,569\t $1.50 billion\t 5.28%\t 96,418\t $15,536\t 0.00%\n173\t Gambia\t $1,489,464,788\t $1.49 billion\t 4.56%\t 2,213,889\t $673\t 0.00%\n174\t Guinea-Bissau\t $1,346,841,897\t $1.35 billion\t 5.92%\t 1,828,145\t $737\t 0.00%\n175\t Solomon Islands\t $1,303,453,622\t $1.30 billion\t 3.24%\t 636,039\t $2,049\t 0.00%\n176\t Grenada\t $1,126,882,296\t $1.13 billion\t 5.06%\t 110,874\t $10,164\t 0.00%\n177\t Comoros\t $1,068,124,330\t $1.07 billion\t 2.71%\t 813,892\t $1,312\t 0.00%\n178\t Saint Kitts and Nevis\t $992,007,403\t $992 million\t 1.17%\t 52,045\t $19,061\t 0.00%\n179\t Vanuatu\t $862,879,789\t $863 million\t 4.50%\t 285,510\t $3,022\t 0.00%\n180\t Samoa\t $840,927,997\t $841 million\t 2.70%\t 195,352\t $4,305\t 0.00%\n181\t Saint Vincent and the Grenadines\t $785,222,509\t $785 million\t 0.86%\t 109,827\t $7,150\t 0.00%\n182\t American Samoa\t $634,000,000\t $634 million\t -5.38%\t 55,620\t $11,399\t 0.00%\n183\t Dominica\t $496,727,000\t $497 million\t -9.53%\t 71,458\t $6,951\t 0.00%\n184\t Tonga\t $427,659,795\t $428 million\t 2.70%\t 101,998\t $4,193\t 0.00%\n185\t São Tomé and Príncipe\t $392,570,293\t $393 million\t 3.87%\t 207,089\t $1,896\t 0.00%\n186\t Micronesia\t $336,427,500\t $336 million\t 3.20%\t 532,899\t $631\t 0.00%\n187\t Palau\t $289,823,500\t $290 million\t -3.57%\t 17,808\t $16,275\t 0.00%\n188\t Marshall Islands\t $204,173,430\t $204 million\t 3.60%\t 58,058\t $3,517\t 0.00%\n189\t Kiribati\t $185,572,502\t $186 million\t 0.33%\t 114,158\t $1,626\t 0.00%\n190\t Tuvalu\t $39,731,317\t $40 million\t 3.24%\t 11,370\t $3,494\t 0.00%\"\"\"\ncoords = \"\"\"Abkhazia,Sukhumi,43.001525,41.023415\nAfghanistan,Kabul,34.575503,69.240073\nAland Islands,Mariehamn,60.1,19.933333\nAlbania,Tirana,41.327546,19.818698\nAlgeria,Algiers,36.752887,3.042048\nAmerican Samoa,Pago Pago,-14.275632,-170.702036\nAndorra,Andorra la Vella,42.506317,1.521835\nAngola,Luanda,-8.839988,13.289437\nAnguilla,The Valley,18.214813,-63.057441\nAntarctica,South Pole,-90,0\nAntigua and Barbuda,Saint John's,17.12741,-61.846772\nArgentina,Buenos Aires,-34.603684,-58.381559\nArmenia,Yerevan,40.179186,44.499103\nAruba,Oranjestad,12.509204,-70.008631\nAustralia,Canberra,-35.282,149.128684\nAustria,Vienna,48.208174,16.373819\nAzerbaijan,Baku,40.409262,49.867092\nBahamas,Nassau,25.047984,-77.355413\nBahrain,Manama,26.228516,50.58605\nBangladesh,Dhaka,23.810332,90.412518\nBarbados,Bridgetown,13.113222,-59.598809\nBelarus,Minsk,53.90454,27.561524\nBelgium,Brussels,50.85034,4.35171\nBelize,Belmopan,17.251011,-88.75902\nBenin,Porto-Novo,6.496857,2.628852\nBermuda,Hamilton,32.294816,-64.781375\nBhutan,Thimphu,27.472792,89.639286\nBolivia,La Paz,-16.489689,-68.119294\nBosnia and Herzegovina,Sarajevo,43.856259,18.413076\nBotswana,Gaborone,-24.628208,25.923147\nBouvet Island,Bouvet Island,-54.43,3.38\nBrazil,Brasília,-15.794229,-47.882166\nBritish Indian Ocean Territory,Camp Justice,21.3419,55.4778\nBritish Virgin Islands,Road Town,18.428612,-64.618466\nBrunei,Bandar Seri Begawan,4.903052,114.939821\nBulgaria,Sofia,42.697708,23.321868\nBurkina Faso,Ouagadougou,12.371428,-1.51966\nBurundi,Bujumbura,-3.361378,29.359878\nCambodia,Phnom Penh,11.544873,104.892167\nCameroon,Yaoundé,3.848033,11.502075\nCanada,Ottawa,45.42153,-75.697193\nCape Verde,Praia,14.93305,-23.513327\nCayman Islands,George Town,19.286932,-81.367439\nCentral African Republic,Bangui,4.394674,18.55819\nChad,N'Djamena,12.134846,15.055742\nChile,Santiago,-33.44889,-70.669265\nChina,Beijing,39.904211,116.407395\nChristmas Island,Flying Fish Cove,-10.420686,105.679379\nCocos (Keeling) Islands,West Island,-12.188834,96.829316\nColombia,Bogotá,4.710989,-74.072092\nComoros,Moroni,-11.717216,43.247315\nDR Congo,Kinshasa,-4.441931,15.266293\nCongo,Brazzaville,-4.26336,15.242885\nCook Islands,Avarua,-21.212901,-159.782306\nCosta Rica,San José,9.928069,-84.090725\nCôte d'Ivoire,Yamoussoukro,6.827623,-5.289343\nCroatia,Zagreb ,45.815011,15.981919\nCuba,Havana,23.05407,-82.345189\nCuraçao,Willemstad,12.122422,-68.882423\nCyprus,Nicosia,35.185566,33.382276\nCzech Republic,Prague,50.075538,14.4378\nDenmark,Copenhagen,55.676097,12.568337\nDjibouti,Djibouti,11.572077,43.145647\nDominica,Roseau,15.309168,-61.379355\nDominican Republic,Santo Domingo,18.486058,-69.931212\nEcuador,Quito,-0.180653,-78.467838\nEgypt,Cairo,30.04442,31.235712\nEl Salvador,San Salvador,13.69294,-89.218191\nEquatorial Guinea,Malabo,3.750412,8.737104\nEritrea,Asmara,15.322877,38.925052\nEstonia,Tallinn,59.436961,24.753575\nEthiopia,Addis Ababa,8.980603,38.757761\nFalkland Islands (Islas Malvinas),Stanley,-51.697713,-57.851663\nFaroe Islands,Tórshavn,62.007864,-6.790982\nFiji,Suva,-18.124809,178.450079\nFinland,Helsinki,60.173324,24.941025\nFrance,Paris,48.856614,2.352222\nFrench Guiana,Cayenne,4.92242,-52.313453\nFrench Polynesia,Papeete,-17.551625,-149.558476\nFrench Southern Territories,Saint-Pierre ,-21.3419,55.4778\nGabon,Libreville,0.416198,9.467268\nGambia,Banjul,13.454876,-16.579032\nGeorgia,Tbilisi,41.715138,44.827096\nGermany,Berlin,52.520007,13.404954\nGhana,Accra,5.603717,-0.186964\nGibraltar,Gibraltar,36.140773,-5.353599\nGreece,Athens,37.983917,23.72936\nGreenland,Nuuk,64.18141,-51.694138\nGrenada,Saint George's,12.056098,-61.7488\nGuadeloupe,Basse-Terre,16.014453,-61.706411\nGuam,Hagåtña,13.470891,144.751278\nGuatemala,Guatemala City,14.634915,-90.506882\nGuernsey,Saint Peter Port,49.455443,-2.536871\nGuinea,Conakry,9.641185,-13.578401\nGuinea-Bissau,Bissau,11.881655,-15.617794\nGuyana,Georgetown,6.801279,-58.155125\nHaiti,Port-au-Prince,18.594395,-72.307433\nHonduras,Tegucigalpa,14.072275,-87.192136\nHong Kong,Hong Kong,22.396428,114.109497\nHungary,Budapest,47.497912,19.040235\nIceland,Reykjavík,64.126521,-21.817439\nIndia,New Delhi,28.613939,77.209021\nIndonesia,Jakarta,-6.208763,106.845599\nIran,Tehran,35.689198,51.388974\nIraq,Baghdad,33.312806,44.361488\nIreland,Dublin,53.349805,-6.26031\nIsle of Man,Douglas,54.152337,-4.486123\nIsrael,Tel Aviv,32.0853,34.781768\nItaly,Rome,41.902784,12.496366\nJamaica,Kingston,18.042327,-76.802893\nJapan,Tokyo,35.709026,139.731992\nJersey,Saint Helier,49.186823,-2.106568\nJordan,Amman,31.956578,35.945695\nKazakhstan,Astana,51.160523,71.470356\nKenya,Nairobi,-1.292066,36.821946\nKiribati,Tarawa Atoll,1.451817,172.971662\nKosovo,Pristina,42.662914,21.165503\nKuwait,Kuwait City,29.375859,47.977405\nKyrgyzstan,Bishkek,42.874621,74.569762\nLaos,Vientiane,17.975706,102.633104\nLatvia,Riga,56.949649,24.105186\nLebanon,Beirut,33.888629,35.495479\nLesotho,Maseru,-29.363219,27.51436\nLiberia,Monrovia,6.290743,-10.760524\nLibya,Tripoli,32.887209,13.191338\nLiechtenstein,Vaduz,47.14103,9.520928\nLithuania,Vilnius,54.687156,25.279651\nLuxembourg,Luxembourg,49.611621,6.131935\nMacau,Macau,22.166667,113.55\nNorth Macedonia,Skopje,41.997346,21.427996\nMadagascar,Antananarivo,-18.87919,47.507905\nMalawi,Lilongwe,-13.962612,33.774119\nMalaysia,Kuala Lumpur,3.139003,101.686855\nMaldives,Malé,4.175496,73.509347\nMali,Bamako,12.639232,-8.002889\nMalta,Valletta,35.898909,14.514553\nMarshall Islands,Majuro,7.116421,171.185774\nMartinique,Fort-de-France,14.616065,-61.05878\nMauritania,Nouakchott,18.07353,-15.958237\nMauritius,Port Louis,-20.166896,57.502332\nMayotte,Mamoudzou,-12.780949,45.227872\nMexico,Mexico City,19.432608,-99.133208\nMicronesia,Palikir,6.914712,158.161027\nMoldova,Chisinau,47.010453,28.86381\nMonaco,Monaco,43.737411,7.420816\nMongolia,Ulaanbaatar,47.886399,106.905744\nMontenegro,Podgorica,42.43042,19.259364\nMontserrat,Plymouth,16.706523,-62.215738\nMorocco,Rabat,33.97159,-6.849813\nMozambique,Maputo,-25.891968,32.605135\nMyanmar,Naypyidaw,19.763306,96.07851\nNagorno-Karabakh Republic,Stepanakert,39.826385,46.763595\nNamibia,Windhoek,-22.560881,17.065755\nNauru,Yaren,-0.546686,166.921091\nNepal,Kathmandu,27.717245,85.323961\nNetherlands,Amsterdam,52.370216,4.895168\nNetherlands Antilles,Willemstad ,12.1091242,-68.9316546\nNew Caledonia,Nouméa,-22.255823,166.450524\nNew Zealand,Wellington,-41.28646,174.776236\nNicaragua,Managua,12.114993,-86.236174\nNiger,Niamey,13.511596,2.125385\nNigeria,Abuja,9.076479,7.398574\nNiue,Alofi,-19.055371,-169.917871\nNorfolk Island,Kingston,-29.056394,167.959588\nNorth Korea,Pyongyang,39.039219,125.762524\nNorthern Cyprus,Nicosia,35.185566,33.382276\nNorthern Mariana Islands,Saipan,15.177801,145.750967\nNorway,Oslo,59.913869,10.752245\nOman,Muscat,23.58589,58.405923\nPakistan,Islamabad,33.729388,73.093146\nPalau,Ngerulmud,7.500384,134.624289\nPalestine,Ramallah,31.9073509,35.5354719\nPanama,Panama City,9.101179,-79.402864\nPapua New Guinea,Port Moresby,-9.4438,147.180267\nParaguay,Asuncion,-25.26374,-57.575926\nPeru,Lima,-12.046374,-77.042793\nPhilippines,Manila,14.599512,120.98422\nPitcairn Islands,Adamstown,-25.06629,-130.100464\nPoland,Warsaw,52.229676,21.012229\nPortugal,Lisbon,38.722252,-9.139337\nPuerto Rico,San Juan,18.466334,-66.105722\nQatar,Doha,25.285447,51.53104\nRéunion,Saint-Denis,-20.882057,55.450675\nRomania,Bucharest,44.426767,26.102538\nRussia,Moscow,55.755826,37.6173\nRwanda,Kigali,-1.957875,30.112735\nSaint Pierre and Miquelon,Saint Pierre,46.775846,-56.180636\nSaint Vincent and the Grenadines,Kingstown,13.160025,-61.224816\nSamoa,Apia,-13.850696,-171.751355\nSan Marino,San Marino,43.935591,12.447281\nSão Tomé and Príncipe,São Tomé,0.330192,6.733343\nSaudi Arabia,Riyadh,24.749403,46.902838\nSenegal,Dakar,14.764504,-17.366029\nSerbia,Belgrade,44.786568,20.448922\nSeychelles,Victoria,-4.619143,55.451315\nSierra Leone,Freetown,8.465677,-13.231722\nSingapore,Singapore,1.280095,103.850949\nSlovakia,Bratislava,48.145892,17.107137\nSlovenia,Ljubljana,46.056947,14.505751\nSolomon Islands,Honiara,-9.445638,159.9729\nSomalia,Mogadishu,2.046934,45.318162\nSouth Africa,Pretoria,-25.747868,28.229271\nSouth Georgia and the South Sandwich Islands,King Edward Point,-54.28325,-36.493735\nSouth Korea,Seoul,37.566535,126.977969\nSouth Ossetia,Tskhinvali,42.22146,43.964405\nSouth Sudan,Juba,4.859363,31.57125\nSpain,Madrid,40.416775,-3.70379\nSri Lanka,Sri Jayawardenepura Kotte,6.89407,79.902478\nSaint Barthélemy,Gustavia,17.896435,-62.852201\nSaint Kitts and Nevis,Basseterre,17.302606,-62.717692\nSaint Lucia,Castries,14.010109,-60.987469\nSaint Martin,Marigot,18.067519,-63.082466\nSudan,Khartoum,15.500654,32.559899\nSuriname,Paramaribo,5.852036,-55.203828\nSvalbard and Jan Mayen,Longyearbyen ,78.062,22.055\nEswatini,Mbabane,-26.305448,31.136672\nSweden,Stockholm,59.329323,18.068581\nSwitzerland,Bern,46.947974,7.447447\nSyria,Damascus,33.513807,36.276528\nTaiwan,Taipei,25.032969,121.565418\nTajikistan,Dushanbe,38.559772,68.787038\nTanzania,Dodoma,-6.162959,35.751607\nThailand,Bangkok,13.756331,100.501765\nTimor-Leste,Dili,-8.556856,125.560314\nTogo,Lomé,6.172497,1.231362\nTokelau,Nukunonu,-9.2005,-171.848\nTonga,Nukuʻalofa,-21.139342,-175.204947\nTransnistria,Tiraspol,46.848185,29.596805\nTrinidad and Tobago,Port of Spain,10.654901,-61.501926\nTristan da Cunha,Edinburgh of the Seven Seas,-37.068042,-12.311315\nTunisia,Tunis,36.806495,10.181532\nTurkey,Ankara,39.933364,32.859742\nTurkmenistan,Ashgabat,37.960077,58.326063\nTurks and Caicos Islands,Cockburn Town,21.467458,-71.13891\nTuvalu,Funafuti,-8.520066,179.198128\nU.S. Virgin Islands,Charlotte Amalie,18.3419,-64.930701\nUganda,Kampala,0.347596,32.58252\nUkraine,Kiev,50.4501,30.5234\nUnited Arab Emirates,Abu Dhabi,24.299174,54.697277\nUnited Kingdom,London,51.507351,-0.127758\nUnited States,Washington,38.907192,-77.036871\nUruguay,Montevideo,-34.901113,-56.164531\nUzbekistan,Tashkent,41.299496,69.240073\nVanuatu,Port Vila,-17.733251,168.327325\nVatican City,Vatican City,41.902179,12.453601\nVenezuela,Caracas,10.480594,-66.903606\nVietnam,Hanoi,21.027764,105.83416\nWallis and Futuna,Mata-Utu,-13.282509,-176.176447\nWestern Sahara,El Aaiún,27.125287,-13.1625\nYemen,Sana'a,15.369445,44.191007\nZambia,Lusaka,-15.387526,28.322817\nZimbabwe,Harare,-17.825166,31.03351\"\"\"\ncreate_file()\n",
"step-5": "def format_amount(a):\n\treturn a.replace(\",\",\"\").strip().replace(\"%\",\"\").replace(\"$\",\"\")\n\ndef create_json(gdp, coords):\n\n\t# ------------ Split gdp data ------------ #\n\tline_list=gdp.split('\\n')\n\tcolumn_list = [x.split('\\t') for x in line_list if x!=\"\"]\n\n\t# ------------ Split coord data ------------ #\n\tline_list=coords.split('\\n')\n\tcoord_list = [x.split(',') for x in line_list if x!=\"\"]\n\tcoord_dict = {}\n\tfor i in coord_list:\n\t\tcoord_dict[format_amount(i[0])] = i[1:]\n\n\n\t# ------------ Begin File ------------ #\n\tout = \"// This file is automatically generated by game-statics/utils/countryRON.py.\\n// Please do not edit.\"\n\tout += \"\\n[\"\n\n\t# -------- Add country list -------- #\n\tfor index in range(len(column_list)):\n\t\tcoords = coord_dict[format_amount(column_list[index][1]) ]\n\t\tprint(coords)\n\n\t\tout += \"(\"\n\t\tout+='name:\"' + format_amount(column_list[index][1]) + '\",'\n\t\tout+='gdp:' + format_amount(column_list[index][2]) + ','\n\t\tout+='population:' + format_amount(column_list[index][5]) + ','\n\t\tout+='lat:' + format_amount(coords [1]) + ','\n\t\tout+='long:' + format_amount(coords [2]) + ''\n\t\tout+=\")\"\n\t\tif index!=len(column_list)-1:\n\t\t\tout+=','\n\n\t# ----------- End File ----------- #\n\tout+=\"]\"\n\n\n\t\n\n\treturn out\n\ndef create_file():\n\tdata = create_json(d, coords)\n\tfile = open(\"../assets/Countries.ron\",\"w\",encoding='utf8') \n \n\tfile.write(data) \n \n\tfile.close() \n\n# Copied from https://www.worldometers.info/gdp/gdp-by-country/\n#\tCountry\t\t\tGDP\t\t\t\t\tGDP formated\t\tGDP change\tPopulation\tGDP per capita share of word GDP\nd='''\n1\t United States\t $19,485,394,000,000\t $19.485 trillion\t 2.27%\t 325,084,756\t $59,939\t 24.08%\n2\t China\t $12,237,700,479,375\t $12.238 trillion\t 6.90%\t 1,421,021,791\t $8,612\t 15.12%\n3\t Japan\t $4,872,415,104,315\t $4.872 trillion\t 1.71%\t 127,502,725\t $38,214\t 6.02%\n4\t Germany\t $3,693,204,332,230\t $3.693 trillion\t 2.22%\t 82,658,409\t $44,680\t 4.56%\n5\t India\t $2,650,725,335,364\t $2.651 trillion\t 6.68%\t 1,338,676,785\t $1,980\t 3.28%\n6\t United Kingdom\t $2,637,866,340,434\t $2.638 trillion\t 1.79%\t 66,727,461\t $39,532\t 3.26%\n7\t France\t $2,582,501,307,216\t $2.583 trillion\t 1.82%\t 64,842,509\t $39,827\t 3.19%\n8\t Brazil\t $2,053,594,877,013\t $2.054 trillion\t 0.98%\t 207,833,823\t $9,881\t 2.54%\n9\t Italy\t $1,943,835,376,342\t $1.944 trillion\t 1.50%\t 60,673,701\t $32,038\t 2.40%\n10\t Canada\t $1,647,120,175,449\t $1.647 trillion\t 3.05%\t 36,732,095\t $44,841\t 2.04%\n11\t Russia\t $1,578,417,211,937\t $1.578 trillion\t 1.55%\t 145,530,082\t $10,846\t 1.95%\n12\t South Korea\t $1,530,750,923,149\t $1.531 trillion\t 3.06%\t 51,096,415\t $29,958\t 1.89%\n13\t Australia\t $1,323,421,072,479\t $1.323 trillion\t 1.96%\t 24,584,620\t $53,831\t 1.64%\n14\t Spain\t $1,314,314,164,402\t $1.314 trillion\t 3.05%\t 46,647,428\t $28,175\t 1.62%\n15\t Mexico\t $1,150,887,823,404\t $1.151 trillion\t 2.04%\t 124,777,324\t $9,224\t 1.42%\n16\t Indonesia\t $1,015,420,587,285\t $1.015 trillion\t 5.07%\t 264,650,963\t $3,837\t 1.25%\n17\t Turkey\t $851,549,299,635\t $852 billion\t 7.44%\t 81,116,450\t $10,498\t 1.05%\n18\t Netherlands\t $830,572,618,850\t $831 billion\t 3.16%\t 17,021,347\t $48,796\t 1.03%\n19\t Saudi Arabia\t $686,738,400,000\t $687 billion\t -0.86%\t 33,101,179\t $20,747\t 0.85%\n20\t Switzerland\t $678,965,423,322\t $679 billion\t 1.09%\t 8,455,804\t $80,296\t 0.84%\n21\t Argentina\t $637,430,331,479\t $637 billion\t 2.85%\t 43,937,140\t $14,508\t 0.79%\n22\t Sweden\t $535,607,385,506\t $536 billion\t 2.29%\t 9,904,896\t $54,075\t 0.66%\n23\t Poland\t $526,465,839,003\t $526 billion\t 4.81%\t 37,953,180\t $13,871\t 0.65%\n24\t Belgium\t $494,763,551,891\t $495 billion\t 1.73%\t 11,419,748\t $43,325\t 0.61%\n25\t Thailand\t $455,302,682,986\t $455 billion\t 3.91%\t 69,209,810\t $6,579\t 0.56%\n26\t Iran\t $454,012,768,724\t $454 billion\t 3.76%\t 80,673,883\t $5,628\t 0.56%\n27\t Austria\t $416,835,975,862\t $417 billion\t 3.04%\t 8,819,901\t $47,261\t 0.52%\n28\t Norway\t $399,488,897,844\t $399 billion\t 1.92%\t 5,296,326\t $75,428\t 0.49%\n29\t United Arab Emirates\t $382,575,085,092\t $383 billion\t 0.79%\t 9,487,203\t $40,325\t 0.47%\n30\t Nigeria\t $375,745,486,521\t $376 billion\t 0.81%\t 190,873,244\t $1,969\t 0.46%\n31\t Israel\t $353,268,411,919\t $353 billion\t 3.33%\t 8,243,848\t $42,852\t 0.44%\n32\t South Africa\t $348,871,647,960\t $349 billion\t 1.32%\t 57,009,756\t $6,120\t 0.43%\n33\t Hong Kong\t $341,449,340,451\t $341 billion\t 3.79%\t 7,306,322\t $46,733\t 0.42%\n34\t Ireland\t $331,430,014,003\t $331 billion\t 7.80%\t 4,753,279\t $69,727\t 0.41%\n35\t Denmark\t $329,865,537,183\t $330 billion\t 2.24%\t 5,732,274\t $57,545\t 0.41%\n36\t Singapore\t $323,907,234,412\t $324 billion\t 3.62%\t 5,708,041\t $56,746\t 0.40%\n37\t Malaysia\t $314,710,259,511\t $315 billion\t 5.90%\t 31,104,646\t $10,118\t 0.39%\n38\t Colombia\t $314,457,601,860\t $314 billion\t 1.79%\t 48,909,839\t $6,429\t 0.39%\n39\t Philippines\t $313,595,208,737\t $314 billion\t 6.68%\t 105,172,925\t $2,982\t 0.39%\n40\t Pakistan\t $304,951,818,494\t $305 billion\t 5.70%\t 207,906,209\t $1,467\t 0.38%\n41\t Chile\t $277,075,944,402\t $277 billion\t 1.49%\t 18,470,439\t $15,001\t 0.34%\n42\t Finland\t $252,301,837,573\t $252 billion\t 2.63%\t 5,511,371\t $45,778\t 0.31%\n43\t Bangladesh\t $249,723,862,487\t $250 billion\t 7.28%\t 159,685,424\t $1,564\t 0.31%\n44\t Egypt\t $235,369,129,338\t $235 billion\t 4.18%\t 96,442,591\t $2,441\t 0.29%\n45\t Vietnam\t $223,779,865,815\t $224 billion\t 6.81%\t 94,600,648\t $2,366\t 0.28%\n46\t Portugal\t $219,308,128,887\t $219 billion\t 2.68%\t 10,288,527\t $21,316\t 0.27%\n47\t Czech Republic\t $215,913,545,038\t $216 billion\t 4.29%\t 10,641,034\t $20,291\t 0.27%\n48\t Romania\t $211,883,923,504\t $212 billion\t 7.26%\t 19,653,969\t $10,781\t 0.26%\n49\t Peru\t $211,389,272,242\t $211 billion\t 2.53%\t 31,444,298\t $6,723\t 0.26%\n50\t New Zealand\t $204,139,049,909\t $204 billion\t 3.03%\t 4,702,034\t $43,415\t 0.25%\n51\t Greece\t $203,085,551,429\t $203 billion\t 1.35%\t 10,569,450\t $19,214\t 0.25%\n52\t Iraq\t $192,060,810,811\t $192 billion\t -2.07%\t 37,552,781\t $5,114\t 0.24%\n53\t Algeria\t $167,555,280,113\t $168 billion\t 1.60%\t 41,389,189\t $4,048\t 0.21%\n54\t Qatar\t $166,928,571,429\t $167 billion\t 1.58%\t 2,724,728\t $61,264\t 0.21%\n55\t Kazakhstan\t $162,886,867,832\t $163 billion\t 4.10%\t 18,080,019\t $9,009\t 0.20%\n56\t Hungary\t $139,761,138,103\t $140 billion\t 3.99%\t 9,729,823\t $14,364\t 0.17%\n57\t Angola\t $122,123,822,334\t $122 billion\t -0.15%\t 29,816,766\t $4,096\t 0.15%\n58\t Kuwait\t $120,126,277,613\t $120 billion\t -2.87%\t 4,056,099\t $29,616\t 0.15%\n59\t Sudan\t $117,487,857,143\t $117 billion\t 4.28%\t 40,813,397\t $2,879\t 0.15%\n60\t Ukraine\t $112,154,185,121\t $112 billion\t 2.52%\t 44,487,709\t $2,521\t 0.14%\n61\t Morocco\t $109,708,728,849\t $110 billion\t 4.09%\t 35,581,255\t $3,083\t 0.14%\n62\t Ecuador\t $104,295,862,000\t $104 billion\t 2.37%\t 16,785,361\t $6,214\t 0.13%\n63\t Cuba\t $96,851,000,000\t $96.85 billion\t 1.78%\t 11,339,254\t $8,541\t 0.12%\n64\t Slovakia\t $95,617,670,260\t $95.62 billion\t 3.40%\t 5,447,900\t $17,551\t 0.12%\n65\t Sri Lanka\t $87,357,205,923\t $87.36 billion\t 3.31%\t 21,128,032\t $4,135\t 0.11%\n66\t Ethiopia\t $80,561,496,134\t $80.56 billion\t 10.25%\t 106,399,924\t $757\t 0.10%\n67\t Kenya\t $79,263,075,749\t $79.26 billion\t 4.87%\t 50,221,142\t $1,578\t 0.10%\n68\t Dominican Republic\t $75,931,656,815\t $75.93 billion\t 4.55%\t 10,513,104\t $7,223\t 0.09%\n69\t Guatemala\t $75,620,095,538\t $75.62 billion\t 2.76%\t 16,914,970\t $4,471\t 0.09%\n70\t Oman\t $70,783,875,163\t $70.78 billion\t -0.27%\t 4,665,928\t $15,170\t 0.09%\n71\t Myanmar\t $67,068,745,521\t $67.07 billion\t 6.76%\t 53,382,523\t $1,256\t 0.08%\n72\t Luxembourg\t $62,316,359,824\t $62.32 billion\t 2.30%\t 591,910\t $105,280\t 0.08%\n73\t Panama\t $62,283,756,584\t $62.28 billion\t 5.32%\t 4,106,769\t $15,166\t 0.08%\n74\t Ghana\t $58,996,776,238\t $59.00 billion\t 8.14%\t 29,121,465\t $2,026\t 0.07%\n75\t Bulgaria\t $58,220,973,783\t $58.22 billion\t 3.81%\t 7,102,444\t $8,197\t 0.07%\n76\t Costa Rica\t $57,285,984,448\t $57.29 billion\t 3.28%\t 4,949,954\t $11,573\t 0.07%\n77\t Uruguay\t $56,156,972,158\t $56.16 billion\t 2.66%\t 3,436,641\t $16,341\t 0.07%\n78\t Croatia\t $55,213,087,271\t $55.21 billion\t 2.92%\t 4,182,857\t $13,200\t 0.07%\n79\t Belarus\t $54,456,465,473\t $54.46 billion\t 2.42%\t 9,450,231\t $5,762\t 0.07%\n80\t Lebanon\t $53,576,985,687\t $53.58 billion\t 1.53%\t 6,819,373\t $7,857\t 0.07%\n81\t Tanzania\t $53,320,625,959\t $53.32 billion\t 7.10%\t 54,660,339\t $975\t 0.07%\n82\t Macau\t $50,361,201,096\t $50.36 billion\t 9.10%\t 622,585\t $80,890\t 0.06%\n83\t Uzbekistan\t $49,677,172,714\t $49.68 billion\t 5.30%\t 31,959,785\t $1,554\t 0.06%\n84\t Slovenia\t $48,769,655,479\t $48.77 billion\t 5.00%\t 2,076,394\t $23,488\t 0.06%\n85\t Lithuania\t $47,544,459,559\t $47.54 billion\t 3.83%\t 2,845,414\t $16,709\t 0.06%\n86\t Serbia\t $41,431,648,801\t $41.43 billion\t 1.87%\t 8,829,628\t $4,692\t 0.05%\n87\t Azerbaijan\t $40,747,792,238\t $40.75 billion\t 0.10%\t 9,845,320\t $4,139\t 0.05%\n88\t Jordan\t $40,068,308,451\t $40.07 billion\t 1.97%\t 9,785,843\t $4,095\t 0.05%\n89\t Tunisia\t $39,952,095,561\t $39.95 billion\t 1.96%\t 11,433,443\t $3,494\t 0.05%\n90\t Paraguay\t $39,667,400,816\t $39.67 billion\t 5.21%\t 6,867,061\t $5,776\t 0.05%\n91\t Libya\t $38,107,728,083\t $38.11 billion\t 26.68%\t 6,580,724\t $5,791\t 0.05%\n92\t Turkmenistan\t $37,926,285,714\t $37.93 billion\t 6.50%\t 5,757,667\t $6,587\t 0.05%\n93\t DR Congo\t $37,642,482,562\t $37.64 billion\t 3.70%\t 81,398,764\t $462\t 0.05%\n94\t Bolivia\t $37,508,642,113\t $37.51 billion\t 4.20%\t 11,192,855\t $3,351\t 0.05%\n95\t Côte d'Ivoire\t $37,353,276,059\t $37.35 billion\t 7.70%\t 24,437,470\t $1,529\t 0.05%\n96\t Bahrain\t $35,432,686,170\t $35.43 billion\t 3.88%\t 1,494,076\t $23,715\t 0.04%\n97\t Cameroon\t $34,922,782,311\t $34.92 billion\t 3.55%\t 24,566,073\t $1,422\t 0.04%\n98\t Yemen\t $31,267,675,216\t $31.27 billion\t -5.94%\t 27,834,819\t $1,123\t 0.04%\n99\t Latvia\t $30,463,302,414\t $30.46 billion\t 4.55%\t 1,951,097\t $15,613\t 0.04%\n100\t Estonia\t $26,611,651,599\t $26.61 billion\t 4.85%\t 1,319,390\t $20,170\t 0.03%\n101\t Uganda\t $25,995,031,850\t $26.00 billion\t 3.86%\t 41,166,588\t $631\t 0.03%\n102\t Zambia\t $25,868,142,073\t $25.87 billion\t 3.40%\t 16,853,599\t $1,535\t 0.03%\n103\t Nepal\t $24,880,266,905\t $24.88 billion\t 7.91%\t 27,632,681\t $900\t 0.03%\n104\t El Salvador\t $24,805,439,600\t $24.81 billion\t 2.32%\t 6,388,126\t $3,883\t 0.03%\n105\t Iceland\t $24,488,467,010\t $24.49 billion\t 3.64%\t 334,393\t $73,233\t 0.03%\n106\t Honduras\t $22,978,532,897\t $22.98 billion\t 4.79%\t 9,429,013\t $2,437\t 0.03%\n107\t Cambodia\t $22,158,209,503\t $22.16 billion\t 7.10%\t 16,009,409\t $1,384\t 0.03%\n108\t Trinidad and Tobago\t $22,079,017,627\t $22.08 billion\t -2.34%\t 1,384,059\t $15,952\t 0.03%\n109\t Cyprus\t $22,054,225,828\t $22.05 billion\t 4.23%\t 1,179,678\t $18,695\t 0.03%\n110\t Zimbabwe\t $22,040,902,300\t $22.04 billion\t 4.70%\t 14,236,595\t $1,548\t 0.03%\n111\t Senegal\t $21,070,225,735\t $21.07 billion\t 7.15%\t 15,419,355\t $1,366\t 0.03%\n112\t Papua New Guinea\t $20,536,314,601\t $20.54 billion\t 2.55%\t 8,438,036\t $2,434\t 0.03%\n113\t Afghanistan\t $19,543,976,895\t $19.54 billion\t 2.67%\t 36,296,113\t $538\t 0.02%\n114\t Bosnia and Herzegovina\t $18,054,854,789\t $18.05 billion\t 3.19%\t 3,351,525\t $5,387\t 0.02%\n115\t Botswana\t $17,406,565,823\t $17.41 billion\t 2.36%\t 2,205,080\t $7,894\t 0.02%\n116\t Laos\t $16,853,087,485\t $16.85 billion\t 6.89%\t 6,953,035\t $2,424\t 0.02%\n117\t Mali\t $15,334,336,144\t $15.33 billion\t 5.40%\t 18,512,430\t $828\t 0.02%\n118\t Georgia\t $15,081,338,092\t $15.08 billion\t 4.83%\t 4,008,716\t $3,762\t 0.02%\n119\t Gabon\t $15,013,950,984\t $15.01 billion\t 0.50%\t 2,064,823\t $7,271\t 0.02%\n120\t Jamaica\t $14,781,107,822\t $14.78 billion\t 0.98%\t 2,920,848\t $5,061\t 0.02%\n121\t Palestine\t $14,498,100,000\t $14.50 billion\t 3.14%\t 4,747,227\t $3,054\t 0.02%\n122\t Nicaragua\t $13,814,261,536\t $13.81 billion\t 4.86%\t 6,384,846\t $2,164\t 0.02%\n123\t Mauritius\t $13,266,427,697\t $13.27 billion\t 3.82%\t 1,264,499\t $10,491\t 0.02%\n124\t Namibia\t $13,253,698,015\t $13.25 billion\t -0.95%\t 2,402,633\t $5,516\t 0.02%\n125\t Albania\t $13,038,538,300\t $13.04 billion\t 3.84%\t 2,884,169\t $4,521\t 0.02%\n126\t Mozambique\t $12,645,508,634\t $12.65 billion\t 3.74%\t 28,649,018\t $441\t 0.02%\n127\t Malta\t $12,518,134,319\t $12.52 billion\t 6.42%\t 437,933\t $28,585\t 0.02%\n128\t Burkina Faso\t $12,322,864,245\t $12.32 billion\t 6.30%\t 19,193,234\t $642\t 0.02%\n129\t Equatorial Guinea\t $12,293,579,173\t $12.29 billion\t -4.92%\t 1,262,002\t $9,741\t 0.02%\n130\t Bahamas\t $12,162,100,000\t $12.16 billion\t 1.44%\t 381,755\t $31,858\t 0.02%\n131\t Brunei\t $12,128,089,002\t $12.13 billion\t 1.33%\t 424,473\t $28,572\t 0.01%\n132\t Armenia\t $11,536,590,636\t $11.54 billion\t 7.50%\t 2,944,791\t $3,918\t 0.01%\n133\t Madagascar\t $11,499,803,807\t $11.50 billion\t 4.17%\t 25,570,512\t $450\t 0.01%\n134\t Mongolia\t $11,433,635,876\t $11.43 billion\t 5.30%\t 3,113,786\t $3,672\t 0.01%\n135\t North Macedonia\t $11,279,509,014\t $11.28 billion\t 0.24%\t 2,081,996\t $5,418\t 0.01%\n136\t Guinea\t $10,472,514,515\t $10.47 billion\t 10.60%\t 12,067,519\t $868\t 0.01%\n137\t Chad\t $9,871,247,732\t $9.87 billion\t -2.95%\t 15,016,753\t $657\t 0.01%\n138\t Benin\t $9,246,696,924\t $9.25 billion\t 5.84%\t 11,175,198\t $827\t 0.01%\n139\t Rwanda\t $9,135,454,442\t $9.14 billion\t 6.06%\t 11,980,961\t $762\t 0.01%\n140\t Congo\t $8,701,334,800\t $8.70 billion\t -3.10%\t 5,110,695\t $1,703\t 0.01%\n141\t Haiti\t $8,408,150,518\t $8.41 billion\t 1.17%\t 10,982,366\t $766\t 0.01%\n142\t Moldova\t $8,128,493,432\t $8.13 billion\t 4.50%\t 4,059,684\t $2,002\t 0.01%\n143\t Niger\t $8,119,710,126\t $8.12 billion\t 4.89%\t 21,602,382\t $376\t 0.01%\n144\t Kyrgyzstan\t $7,564,738,836\t $7.56 billion\t 4.58%\t 6,189,733\t $1,222\t 0.01%\n145\t Tajikistan\t $7,146,449,583\t $7.15 billion\t 7.62%\t 8,880,268\t $805\t 0.01%\n146\t Malawi\t $6,303,292,264\t $6.30 billion\t 4.00%\t 17,670,196\t $357\t 0.01%\n147\t Guam\t $5,859,000,000\t $5.86 billion\t 0.19%\t 164,281\t $35,665\t 0.01%\n148\t Fiji\t $5,061,202,767\t $5.06 billion\t 3.80%\t 877,459\t $5,768\t 0.01%\n149\t Mauritania\t $5,024,708,656\t $5.02 billion\t 3.50%\t 4,282,570\t $1,173\t 0.01%\n150\t Maldives\t $4,865,546,027\t $4.87 billion\t 6.91%\t 496,402\t $9,802\t 0.01%\n151\t Montenegro\t $4,844,592,067\t $4.84 billion\t 4.70%\t 627,563\t $7,720\t 0.01%\n152\t Togo\t $4,757,776,485\t $4.76 billion\t 4.40%\t 7,698,474\t $618\t 0.01%\n153\t Barbados\t $4,673,500,000\t $4.67 billion\t 1.00%\t 286,232\t $16,328\t 0.01%\n154\t Eswatini\t $4,433,664,364\t $4.43 billion\t 1.87%\t 1,124,805\t $3,942\t 0.01%\n155\t Sierra Leone\t $3,775,047,334\t $3.78 billion\t 4.21%\t 7,488,423\t $504\t 0.00%\n156\t Guyana\t $3,621,046,005\t $3.62 billion\t 2.92%\t 775,222\t $4,671\t 0.00%\n157\t Liberia\t $3,285,455,000\t $3.29 billion\t 2.47%\t 4,702,226\t $699\t 0.00%\n158\t Burundi\t $3,172,416,146\t $3.17 billion\t 0.50%\t 10,827,019\t $293\t 0.00%\n159\t Andorra\t $3,012,914,131\t $3.01 billion\t 1.87%\t 77,001\t $39,128\t 0.00%\n160\t Suriname\t $2,995,827,901\t $3.00 billion\t 1.69%\t 570,496\t $5,251\t 0.00%\n161\t Timor-Leste\t $2,954,621,000\t $2.95 billion\t -8.00%\t 1,243,258\t $2,377\t 0.00%\n162\t Aruba\t $2,700,558,659\t $2.70 billion\t 1.33%\t 105,366\t $25,630\t 0.00%\n163\t Lesotho\t $2,578,265,358\t $2.58 billion\t -2.29%\t 2,091,534\t $1,233\t 0.00%\n164\t Bhutan\t $2,528,007,911\t $2.53 billion\t 4.63%\t 745,563\t $3,391\t 0.00%\n165\t Central African Republic\t $1,949,411,659\t $1.95 billion\t 4.30%\t 4,596,023\t $424\t 0.00%\n166\t Belize\t $1,862,614,800\t $1.86 billion\t 1.44%\t 375,769\t $4,957\t 0.00%\n167\t Cape Verde\t $1,772,706,451\t $1.77 billion\t 4.01%\t 537,498\t $3,298\t 0.00%\n168\t Saint Lucia\t $1,737,504,296\t $1.74 billion\t 3.82%\t 180,954\t $9,602\t 0.00%\n169\t San Marino\t $1,632,860,041\t $1.63 billion\t 1.50%\t 33,671\t $48,495\t 0.00%\n170\t Northern Mariana Islands\t $1,593,000,000\t $1.59 billion\t 25.14%\t 56,562\t $28,164\t 0.00%\n171\t Antigua and Barbuda\t $1,510,084,751\t $1.51 billion\t 3.03%\t 95,426\t $15,825\t 0.00%\n172\t Seychelles\t $1,497,959,569\t $1.50 billion\t 5.28%\t 96,418\t $15,536\t 0.00%\n173\t Gambia\t $1,489,464,788\t $1.49 billion\t 4.56%\t 2,213,889\t $673\t 0.00%\n174\t Guinea-Bissau\t $1,346,841,897\t $1.35 billion\t 5.92%\t 1,828,145\t $737\t 0.00%\n175\t Solomon Islands\t $1,303,453,622\t $1.30 billion\t 3.24%\t 636,039\t $2,049\t 0.00%\n176\t Grenada\t $1,126,882,296\t $1.13 billion\t 5.06%\t 110,874\t $10,164\t 0.00%\n177\t Comoros\t $1,068,124,330\t $1.07 billion\t 2.71%\t 813,892\t $1,312\t 0.00%\n178\t Saint Kitts and Nevis\t $992,007,403\t $992 million\t 1.17%\t 52,045\t $19,061\t 0.00%\n179\t Vanuatu\t $862,879,789\t $863 million\t 4.50%\t 285,510\t $3,022\t 0.00%\n180\t Samoa\t $840,927,997\t $841 million\t 2.70%\t 195,352\t $4,305\t 0.00%\n181\t Saint Vincent and the Grenadines\t $785,222,509\t $785 million\t 0.86%\t 109,827\t $7,150\t 0.00%\n182\t American Samoa\t $634,000,000\t $634 million\t -5.38%\t 55,620\t $11,399\t 0.00%\n183\t Dominica\t $496,727,000\t $497 million\t -9.53%\t 71,458\t $6,951\t 0.00%\n184\t Tonga\t $427,659,795\t $428 million\t 2.70%\t 101,998\t $4,193\t 0.00%\n185\t São Tomé and Príncipe\t $392,570,293\t $393 million\t 3.87%\t 207,089\t $1,896\t 0.00%\n186\t Micronesia\t $336,427,500\t $336 million\t 3.20%\t 532,899\t $631\t 0.00%\n187\t Palau\t $289,823,500\t $290 million\t -3.57%\t 17,808\t $16,275\t 0.00%\n188\t Marshall Islands\t $204,173,430\t $204 million\t 3.60%\t 58,058\t $3,517\t 0.00%\n189\t Kiribati\t $185,572,502\t $186 million\t 0.33%\t 114,158\t $1,626\t 0.00%\n190\t Tuvalu\t $39,731,317\t $40 million\t 3.24%\t 11,370\t $3,494\t 0.00%'''\n\ncoords = '''Abkhazia,Sukhumi,43.001525,41.023415\nAfghanistan,Kabul,34.575503,69.240073\nAland Islands,Mariehamn,60.1,19.933333\nAlbania,Tirana,41.327546,19.818698\nAlgeria,Algiers,36.752887,3.042048\nAmerican Samoa,Pago Pago,-14.275632,-170.702036\nAndorra,Andorra la Vella,42.506317,1.521835\nAngola,Luanda,-8.839988,13.289437\nAnguilla,The Valley,18.214813,-63.057441\nAntarctica,South Pole,-90,0\nAntigua and Barbuda,Saint John's,17.12741,-61.846772\nArgentina,Buenos Aires,-34.603684,-58.381559\nArmenia,Yerevan,40.179186,44.499103\nAruba,Oranjestad,12.509204,-70.008631\nAustralia,Canberra,-35.282,149.128684\nAustria,Vienna,48.208174,16.373819\nAzerbaijan,Baku,40.409262,49.867092\nBahamas,Nassau,25.047984,-77.355413\nBahrain,Manama,26.228516,50.58605\nBangladesh,Dhaka,23.810332,90.412518\nBarbados,Bridgetown,13.113222,-59.598809\nBelarus,Minsk,53.90454,27.561524\nBelgium,Brussels,50.85034,4.35171\nBelize,Belmopan,17.251011,-88.75902\nBenin,Porto-Novo,6.496857,2.628852\nBermuda,Hamilton,32.294816,-64.781375\nBhutan,Thimphu,27.472792,89.639286\nBolivia,La Paz,-16.489689,-68.119294\nBosnia and Herzegovina,Sarajevo,43.856259,18.413076\nBotswana,Gaborone,-24.628208,25.923147\nBouvet Island,Bouvet Island,-54.43,3.38\nBrazil,Brasília,-15.794229,-47.882166\nBritish Indian Ocean Territory,Camp Justice,21.3419,55.4778\nBritish Virgin Islands,Road Town,18.428612,-64.618466\nBrunei,Bandar Seri Begawan,4.903052,114.939821\nBulgaria,Sofia,42.697708,23.321868\nBurkina Faso,Ouagadougou,12.371428,-1.51966\nBurundi,Bujumbura,-3.361378,29.359878\nCambodia,Phnom Penh,11.544873,104.892167\nCameroon,Yaoundé,3.848033,11.502075\nCanada,Ottawa,45.42153,-75.697193\nCape Verde,Praia,14.93305,-23.513327\nCayman Islands,George Town,19.286932,-81.367439\nCentral African Republic,Bangui,4.394674,18.55819\nChad,N'Djamena,12.134846,15.055742\nChile,Santiago,-33.44889,-70.669265\nChina,Beijing,39.904211,116.407395\nChristmas Island,Flying Fish Cove,-10.420686,105.679379\nCocos (Keeling) Islands,West Island,-12.188834,96.829316\nColombia,Bogotá,4.710989,-74.072092\nComoros,Moroni,-11.717216,43.247315\nDR Congo,Kinshasa,-4.441931,15.266293\nCongo,Brazzaville,-4.26336,15.242885\nCook Islands,Avarua,-21.212901,-159.782306\nCosta Rica,San José,9.928069,-84.090725\nCôte d'Ivoire,Yamoussoukro,6.827623,-5.289343\nCroatia,Zagreb ,45.815011,15.981919\nCuba,Havana,23.05407,-82.345189\nCuraçao,Willemstad,12.122422,-68.882423\nCyprus,Nicosia,35.185566,33.382276\nCzech Republic,Prague,50.075538,14.4378\nDenmark,Copenhagen,55.676097,12.568337\nDjibouti,Djibouti,11.572077,43.145647\nDominica,Roseau,15.309168,-61.379355\nDominican Republic,Santo Domingo,18.486058,-69.931212\nEcuador,Quito,-0.180653,-78.467838\nEgypt,Cairo,30.04442,31.235712\nEl Salvador,San Salvador,13.69294,-89.218191\nEquatorial Guinea,Malabo,3.750412,8.737104\nEritrea,Asmara,15.322877,38.925052\nEstonia,Tallinn,59.436961,24.753575\nEthiopia,Addis Ababa,8.980603,38.757761\nFalkland Islands (Islas Malvinas),Stanley,-51.697713,-57.851663\nFaroe Islands,Tórshavn,62.007864,-6.790982\nFiji,Suva,-18.124809,178.450079\nFinland,Helsinki,60.173324,24.941025\nFrance,Paris,48.856614,2.352222\nFrench Guiana,Cayenne,4.92242,-52.313453\nFrench Polynesia,Papeete,-17.551625,-149.558476\nFrench Southern Territories,Saint-Pierre ,-21.3419,55.4778\nGabon,Libreville,0.416198,9.467268\nGambia,Banjul,13.454876,-16.579032\nGeorgia,Tbilisi,41.715138,44.827096\nGermany,Berlin,52.520007,13.404954\nGhana,Accra,5.603717,-0.186964\nGibraltar,Gibraltar,36.140773,-5.353599\nGreece,Athens,37.983917,23.72936\nGreenland,Nuuk,64.18141,-51.694138\nGrenada,Saint George's,12.056098,-61.7488\nGuadeloupe,Basse-Terre,16.014453,-61.706411\nGuam,Hagåtña,13.470891,144.751278\nGuatemala,Guatemala City,14.634915,-90.506882\nGuernsey,Saint Peter Port,49.455443,-2.536871\nGuinea,Conakry,9.641185,-13.578401\nGuinea-Bissau,Bissau,11.881655,-15.617794\nGuyana,Georgetown,6.801279,-58.155125\nHaiti,Port-au-Prince,18.594395,-72.307433\nHonduras,Tegucigalpa,14.072275,-87.192136\nHong Kong,Hong Kong,22.396428,114.109497\nHungary,Budapest,47.497912,19.040235\nIceland,Reykjavík,64.126521,-21.817439\nIndia,New Delhi,28.613939,77.209021\nIndonesia,Jakarta,-6.208763,106.845599\nIran,Tehran,35.689198,51.388974\nIraq,Baghdad,33.312806,44.361488\nIreland,Dublin,53.349805,-6.26031\nIsle of Man,Douglas,54.152337,-4.486123\nIsrael,Tel Aviv,32.0853,34.781768\nItaly,Rome,41.902784,12.496366\nJamaica,Kingston,18.042327,-76.802893\nJapan,Tokyo,35.709026,139.731992\nJersey,Saint Helier,49.186823,-2.106568\nJordan,Amman,31.956578,35.945695\nKazakhstan,Astana,51.160523,71.470356\nKenya,Nairobi,-1.292066,36.821946\nKiribati,Tarawa Atoll,1.451817,172.971662\nKosovo,Pristina,42.662914,21.165503\nKuwait,Kuwait City,29.375859,47.977405\nKyrgyzstan,Bishkek,42.874621,74.569762\nLaos,Vientiane,17.975706,102.633104\nLatvia,Riga,56.949649,24.105186\nLebanon,Beirut,33.888629,35.495479\nLesotho,Maseru,-29.363219,27.51436\nLiberia,Monrovia,6.290743,-10.760524\nLibya,Tripoli,32.887209,13.191338\nLiechtenstein,Vaduz,47.14103,9.520928\nLithuania,Vilnius,54.687156,25.279651\nLuxembourg,Luxembourg,49.611621,6.131935\nMacau,Macau,22.166667,113.55\nNorth Macedonia,Skopje,41.997346,21.427996\nMadagascar,Antananarivo,-18.87919,47.507905\nMalawi,Lilongwe,-13.962612,33.774119\nMalaysia,Kuala Lumpur,3.139003,101.686855\nMaldives,Malé,4.175496,73.509347\nMali,Bamako,12.639232,-8.002889\nMalta,Valletta,35.898909,14.514553\nMarshall Islands,Majuro,7.116421,171.185774\nMartinique,Fort-de-France,14.616065,-61.05878\nMauritania,Nouakchott,18.07353,-15.958237\nMauritius,Port Louis,-20.166896,57.502332\nMayotte,Mamoudzou,-12.780949,45.227872\nMexico,Mexico City,19.432608,-99.133208\nMicronesia,Palikir,6.914712,158.161027\nMoldova,Chisinau,47.010453,28.86381\nMonaco,Monaco,43.737411,7.420816\nMongolia,Ulaanbaatar,47.886399,106.905744\nMontenegro,Podgorica,42.43042,19.259364\nMontserrat,Plymouth,16.706523,-62.215738\nMorocco,Rabat,33.97159,-6.849813\nMozambique,Maputo,-25.891968,32.605135\nMyanmar,Naypyidaw,19.763306,96.07851\nNagorno-Karabakh Republic,Stepanakert,39.826385,46.763595\nNamibia,Windhoek,-22.560881,17.065755\nNauru,Yaren,-0.546686,166.921091\nNepal,Kathmandu,27.717245,85.323961\nNetherlands,Amsterdam,52.370216,4.895168\nNetherlands Antilles,Willemstad ,12.1091242,-68.9316546\nNew Caledonia,Nouméa,-22.255823,166.450524\nNew Zealand,Wellington,-41.28646,174.776236\nNicaragua,Managua,12.114993,-86.236174\nNiger,Niamey,13.511596,2.125385\nNigeria,Abuja,9.076479,7.398574\nNiue,Alofi,-19.055371,-169.917871\nNorfolk Island,Kingston,-29.056394,167.959588\nNorth Korea,Pyongyang,39.039219,125.762524\nNorthern Cyprus,Nicosia,35.185566,33.382276\nNorthern Mariana Islands,Saipan,15.177801,145.750967\nNorway,Oslo,59.913869,10.752245\nOman,Muscat,23.58589,58.405923\nPakistan,Islamabad,33.729388,73.093146\nPalau,Ngerulmud,7.500384,134.624289\nPalestine,Ramallah,31.9073509,35.5354719\nPanama,Panama City,9.101179,-79.402864\nPapua New Guinea,Port Moresby,-9.4438,147.180267\nParaguay,Asuncion,-25.26374,-57.575926\nPeru,Lima,-12.046374,-77.042793\nPhilippines,Manila,14.599512,120.98422\nPitcairn Islands,Adamstown,-25.06629,-130.100464\nPoland,Warsaw,52.229676,21.012229\nPortugal,Lisbon,38.722252,-9.139337\nPuerto Rico,San Juan,18.466334,-66.105722\nQatar,Doha,25.285447,51.53104\nRéunion,Saint-Denis,-20.882057,55.450675\nRomania,Bucharest,44.426767,26.102538\nRussia,Moscow,55.755826,37.6173\nRwanda,Kigali,-1.957875,30.112735\nSaint Pierre and Miquelon,Saint Pierre,46.775846,-56.180636\nSaint Vincent and the Grenadines,Kingstown,13.160025,-61.224816\nSamoa,Apia,-13.850696,-171.751355\nSan Marino,San Marino,43.935591,12.447281\nSão Tomé and Príncipe,São Tomé,0.330192,6.733343\nSaudi Arabia,Riyadh,24.749403,46.902838\nSenegal,Dakar,14.764504,-17.366029\nSerbia,Belgrade,44.786568,20.448922\nSeychelles,Victoria,-4.619143,55.451315\nSierra Leone,Freetown,8.465677,-13.231722\nSingapore,Singapore,1.280095,103.850949\nSlovakia,Bratislava,48.145892,17.107137\nSlovenia,Ljubljana,46.056947,14.505751\nSolomon Islands,Honiara,-9.445638,159.9729\nSomalia,Mogadishu,2.046934,45.318162\nSouth Africa,Pretoria,-25.747868,28.229271\nSouth Georgia and the South Sandwich Islands,King Edward Point,-54.28325,-36.493735\nSouth Korea,Seoul,37.566535,126.977969\nSouth Ossetia,Tskhinvali,42.22146,43.964405\nSouth Sudan,Juba,4.859363,31.57125\nSpain,Madrid,40.416775,-3.70379\nSri Lanka,Sri Jayawardenepura Kotte,6.89407,79.902478\nSaint Barthélemy,Gustavia,17.896435,-62.852201\nSaint Kitts and Nevis,Basseterre,17.302606,-62.717692\nSaint Lucia,Castries,14.010109,-60.987469\nSaint Martin,Marigot,18.067519,-63.082466\nSudan,Khartoum,15.500654,32.559899\nSuriname,Paramaribo,5.852036,-55.203828\nSvalbard and Jan Mayen,Longyearbyen ,78.062,22.055\nEswatini,Mbabane,-26.305448,31.136672\nSweden,Stockholm,59.329323,18.068581\nSwitzerland,Bern,46.947974,7.447447\nSyria,Damascus,33.513807,36.276528\nTaiwan,Taipei,25.032969,121.565418\nTajikistan,Dushanbe,38.559772,68.787038\nTanzania,Dodoma,-6.162959,35.751607\nThailand,Bangkok,13.756331,100.501765\nTimor-Leste,Dili,-8.556856,125.560314\nTogo,Lomé,6.172497,1.231362\nTokelau,Nukunonu,-9.2005,-171.848\nTonga,Nukuʻalofa,-21.139342,-175.204947\nTransnistria,Tiraspol,46.848185,29.596805\nTrinidad and Tobago,Port of Spain,10.654901,-61.501926\nTristan da Cunha,Edinburgh of the Seven Seas,-37.068042,-12.311315\nTunisia,Tunis,36.806495,10.181532\nTurkey,Ankara,39.933364,32.859742\nTurkmenistan,Ashgabat,37.960077,58.326063\nTurks and Caicos Islands,Cockburn Town,21.467458,-71.13891\nTuvalu,Funafuti,-8.520066,179.198128\nU.S. Virgin Islands,Charlotte Amalie,18.3419,-64.930701\nUganda,Kampala,0.347596,32.58252\nUkraine,Kiev,50.4501,30.5234\nUnited Arab Emirates,Abu Dhabi,24.299174,54.697277\nUnited Kingdom,London,51.507351,-0.127758\nUnited States,Washington,38.907192,-77.036871\nUruguay,Montevideo,-34.901113,-56.164531\nUzbekistan,Tashkent,41.299496,69.240073\nVanuatu,Port Vila,-17.733251,168.327325\nVatican City,Vatican City,41.902179,12.453601\nVenezuela,Caracas,10.480594,-66.903606\nVietnam,Hanoi,21.027764,105.83416\nWallis and Futuna,Mata-Utu,-13.282509,-176.176447\nWestern Sahara,El Aaiún,27.125287,-13.1625\nYemen,Sana'a,15.369445,44.191007\nZambia,Lusaka,-15.387526,28.322817\nZimbabwe,Harare,-17.825166,31.03351'''\n\ncreate_file()\n\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('Site', '0004_arquivopdf')]
operations = [migrations.CreateModel(name='historico', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('criados', models.DateField(
auto_now_add=True, verbose_name='Criação')), ('modificado', models.
DateField(auto_now=True, verbose_name='Atualização')), ('ativo',
models.BooleanField(default=True, verbose_name='Ativo?')), (
'titulo', models.CharField(max_length=100, verbose_name='Título')),
('imagem', stdimage.models.StdImageField(upload_to='img_historico',
verbose_name='Imagem')), ('subtitulo01', models.CharField(
max_length=100, verbose_name='Subtítulo01')), ('descricao01',
models.TextField(max_length=200, verbose_name=
'Subtítulo01 Descrição')), ('subtitulo02', models.CharField(
max_length=100, verbose_name='Subtítulo02')), ('descricao02',
models.TextField(max_length=200, verbose_name=
'Subtítulo02 Descrição')), ('contador01', models.CharField(
max_length=50, verbose_name='contador01')), ('valor01', models.
TextField(max_length=6, verbose_name='valor contador01')), (
'contador02', models.CharField(max_length=50, verbose_name=
'contador02')), ('valor02', models.TextField(max_length=6,
verbose_name='valor contador02')), ('contador03', models.CharField(
max_length=50, verbose_name='contador03')), ('valor03', models.
TextField(max_length=6, verbose_name='valor contador03')), (
'subtitulo03', models.CharField(max_length=100, verbose_name=
'Subtítulo03')), ('descricao03', models.TextField(max_length=200,
verbose_name='Subtítulo03 Descrição'))], options={'verbose_name':
'Notícia', 'verbose_name_plural': 'Noticias'}), migrations.AddField
(model_name='arquivopdf', name='descricao', field=models.TextField(
default=1, max_length=200, verbose_name='Descrição'),
preserve_default=False), migrations.AddField(model_name=
'arquivopdf', name='titulo', field=models.CharField(default=1,
max_length=100, verbose_name='Título'), preserve_default=False)]
<|reserved_special_token_1|>
from django.db import migrations, models
import stdimage.models
class Migration(migrations.Migration):
dependencies = [('Site', '0004_arquivopdf')]
operations = [migrations.CreateModel(name='historico', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('criados', models.DateField(
auto_now_add=True, verbose_name='Criação')), ('modificado', models.
DateField(auto_now=True, verbose_name='Atualização')), ('ativo',
models.BooleanField(default=True, verbose_name='Ativo?')), (
'titulo', models.CharField(max_length=100, verbose_name='Título')),
('imagem', stdimage.models.StdImageField(upload_to='img_historico',
verbose_name='Imagem')), ('subtitulo01', models.CharField(
max_length=100, verbose_name='Subtítulo01')), ('descricao01',
models.TextField(max_length=200, verbose_name=
'Subtítulo01 Descrição')), ('subtitulo02', models.CharField(
max_length=100, verbose_name='Subtítulo02')), ('descricao02',
models.TextField(max_length=200, verbose_name=
'Subtítulo02 Descrição')), ('contador01', models.CharField(
max_length=50, verbose_name='contador01')), ('valor01', models.
TextField(max_length=6, verbose_name='valor contador01')), (
'contador02', models.CharField(max_length=50, verbose_name=
'contador02')), ('valor02', models.TextField(max_length=6,
verbose_name='valor contador02')), ('contador03', models.CharField(
max_length=50, verbose_name='contador03')), ('valor03', models.
TextField(max_length=6, verbose_name='valor contador03')), (
'subtitulo03', models.CharField(max_length=100, verbose_name=
'Subtítulo03')), ('descricao03', models.TextField(max_length=200,
verbose_name='Subtítulo03 Descrição'))], options={'verbose_name':
'Notícia', 'verbose_name_plural': 'Noticias'}), migrations.AddField
(model_name='arquivopdf', name='descricao', field=models.TextField(
default=1, max_length=200, verbose_name='Descrição'),
preserve_default=False), migrations.AddField(model_name=
'arquivopdf', name='titulo', field=models.CharField(default=1,
max_length=100, verbose_name='Título'), preserve_default=False)]
<|reserved_special_token_1|>
# Generated by Django 3.1.5 on 2021-02-24 18:34
from django.db import migrations, models
import stdimage.models
class Migration(migrations.Migration):
dependencies = [
('Site', '0004_arquivopdf'),
]
operations = [
migrations.CreateModel(
name='historico',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('criados', models.DateField(auto_now_add=True, verbose_name='Criação')),
('modificado', models.DateField(auto_now=True, verbose_name='Atualização')),
('ativo', models.BooleanField(default=True, verbose_name='Ativo?')),
('titulo', models.CharField(max_length=100, verbose_name='Título')),
('imagem', stdimage.models.StdImageField(upload_to='img_historico', verbose_name='Imagem')),
('subtitulo01', models.CharField(max_length=100, verbose_name='Subtítulo01')),
('descricao01', models.TextField(max_length=200, verbose_name='Subtítulo01 Descrição')),
('subtitulo02', models.CharField(max_length=100, verbose_name='Subtítulo02')),
('descricao02', models.TextField(max_length=200, verbose_name='Subtítulo02 Descrição')),
('contador01', models.CharField(max_length=50, verbose_name='contador01')),
('valor01', models.TextField(max_length=6, verbose_name='valor contador01')),
('contador02', models.CharField(max_length=50, verbose_name='contador02')),
('valor02', models.TextField(max_length=6, verbose_name='valor contador02')),
('contador03', models.CharField(max_length=50, verbose_name='contador03')),
('valor03', models.TextField(max_length=6, verbose_name='valor contador03')),
('subtitulo03', models.CharField(max_length=100, verbose_name='Subtítulo03')),
('descricao03', models.TextField(max_length=200, verbose_name='Subtítulo03 Descrição')),
],
options={
'verbose_name': 'Notícia',
'verbose_name_plural': 'Noticias',
},
),
migrations.AddField(
model_name='arquivopdf',
name='descricao',
field=models.TextField(default=1, max_length=200, verbose_name='Descrição'),
preserve_default=False,
),
migrations.AddField(
model_name='arquivopdf',
name='titulo',
field=models.CharField(default=1, max_length=100, verbose_name='Título'),
preserve_default=False,
),
]
|
flexible
|
{
"blob_id": "321147f2e2d8caf6d9224e2a8969f51ded48baf7",
"index": 8130,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Site', '0004_arquivopdf')]\n operations = [migrations.CreateModel(name='historico', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('criados', models.DateField(\n auto_now_add=True, verbose_name='Criação')), ('modificado', models.\n DateField(auto_now=True, verbose_name='Atualização')), ('ativo',\n models.BooleanField(default=True, verbose_name='Ativo?')), (\n 'titulo', models.CharField(max_length=100, verbose_name='Título')),\n ('imagem', stdimage.models.StdImageField(upload_to='img_historico',\n verbose_name='Imagem')), ('subtitulo01', models.CharField(\n max_length=100, verbose_name='Subtítulo01')), ('descricao01',\n models.TextField(max_length=200, verbose_name=\n 'Subtítulo01 Descrição')), ('subtitulo02', models.CharField(\n max_length=100, verbose_name='Subtítulo02')), ('descricao02',\n models.TextField(max_length=200, verbose_name=\n 'Subtítulo02 Descrição')), ('contador01', models.CharField(\n max_length=50, verbose_name='contador01')), ('valor01', models.\n TextField(max_length=6, verbose_name='valor contador01')), (\n 'contador02', models.CharField(max_length=50, verbose_name=\n 'contador02')), ('valor02', models.TextField(max_length=6,\n verbose_name='valor contador02')), ('contador03', models.CharField(\n max_length=50, verbose_name='contador03')), ('valor03', models.\n TextField(max_length=6, verbose_name='valor contador03')), (\n 'subtitulo03', models.CharField(max_length=100, verbose_name=\n 'Subtítulo03')), ('descricao03', models.TextField(max_length=200,\n verbose_name='Subtítulo03 Descrição'))], options={'verbose_name':\n 'Notícia', 'verbose_name_plural': 'Noticias'}), migrations.AddField\n (model_name='arquivopdf', name='descricao', field=models.TextField(\n default=1, max_length=200, verbose_name='Descrição'),\n preserve_default=False), migrations.AddField(model_name=\n 'arquivopdf', name='titulo', field=models.CharField(default=1,\n max_length=100, verbose_name='Título'), preserve_default=False)]\n",
"step-4": "from django.db import migrations, models\nimport stdimage.models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Site', '0004_arquivopdf')]\n operations = [migrations.CreateModel(name='historico', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('criados', models.DateField(\n auto_now_add=True, verbose_name='Criação')), ('modificado', models.\n DateField(auto_now=True, verbose_name='Atualização')), ('ativo',\n models.BooleanField(default=True, verbose_name='Ativo?')), (\n 'titulo', models.CharField(max_length=100, verbose_name='Título')),\n ('imagem', stdimage.models.StdImageField(upload_to='img_historico',\n verbose_name='Imagem')), ('subtitulo01', models.CharField(\n max_length=100, verbose_name='Subtítulo01')), ('descricao01',\n models.TextField(max_length=200, verbose_name=\n 'Subtítulo01 Descrição')), ('subtitulo02', models.CharField(\n max_length=100, verbose_name='Subtítulo02')), ('descricao02',\n models.TextField(max_length=200, verbose_name=\n 'Subtítulo02 Descrição')), ('contador01', models.CharField(\n max_length=50, verbose_name='contador01')), ('valor01', models.\n TextField(max_length=6, verbose_name='valor contador01')), (\n 'contador02', models.CharField(max_length=50, verbose_name=\n 'contador02')), ('valor02', models.TextField(max_length=6,\n verbose_name='valor contador02')), ('contador03', models.CharField(\n max_length=50, verbose_name='contador03')), ('valor03', models.\n TextField(max_length=6, verbose_name='valor contador03')), (\n 'subtitulo03', models.CharField(max_length=100, verbose_name=\n 'Subtítulo03')), ('descricao03', models.TextField(max_length=200,\n verbose_name='Subtítulo03 Descrição'))], options={'verbose_name':\n 'Notícia', 'verbose_name_plural': 'Noticias'}), migrations.AddField\n (model_name='arquivopdf', name='descricao', field=models.TextField(\n default=1, max_length=200, verbose_name='Descrição'),\n preserve_default=False), migrations.AddField(model_name=\n 'arquivopdf', name='titulo', field=models.CharField(default=1,\n max_length=100, verbose_name='Título'), preserve_default=False)]\n",
"step-5": "# Generated by Django 3.1.5 on 2021-02-24 18:34\n\nfrom django.db import migrations, models\nimport stdimage.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Site', '0004_arquivopdf'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='historico',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('criados', models.DateField(auto_now_add=True, verbose_name='Criação')),\n ('modificado', models.DateField(auto_now=True, verbose_name='Atualização')),\n ('ativo', models.BooleanField(default=True, verbose_name='Ativo?')),\n ('titulo', models.CharField(max_length=100, verbose_name='Título')),\n ('imagem', stdimage.models.StdImageField(upload_to='img_historico', verbose_name='Imagem')),\n ('subtitulo01', models.CharField(max_length=100, verbose_name='Subtítulo01')),\n ('descricao01', models.TextField(max_length=200, verbose_name='Subtítulo01 Descrição')),\n ('subtitulo02', models.CharField(max_length=100, verbose_name='Subtítulo02')),\n ('descricao02', models.TextField(max_length=200, verbose_name='Subtítulo02 Descrição')),\n ('contador01', models.CharField(max_length=50, verbose_name='contador01')),\n ('valor01', models.TextField(max_length=6, verbose_name='valor contador01')),\n ('contador02', models.CharField(max_length=50, verbose_name='contador02')),\n ('valor02', models.TextField(max_length=6, verbose_name='valor contador02')),\n ('contador03', models.CharField(max_length=50, verbose_name='contador03')),\n ('valor03', models.TextField(max_length=6, verbose_name='valor contador03')),\n ('subtitulo03', models.CharField(max_length=100, verbose_name='Subtítulo03')),\n ('descricao03', models.TextField(max_length=200, verbose_name='Subtítulo03 Descrição')),\n ],\n options={\n 'verbose_name': 'Notícia',\n 'verbose_name_plural': 'Noticias',\n },\n ),\n migrations.AddField(\n model_name='arquivopdf',\n name='descricao',\n field=models.TextField(default=1, max_length=200, verbose_name='Descrição'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='arquivopdf',\n name='titulo',\n field=models.CharField(default=1, max_length=100, verbose_name='Título'),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db import models
from helpers.models import BaseAbstractModel
from Auth.models import Profile
# from Jobs.models import UserJob
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Notification(BaseAbstractModel):
title = models.CharField(max_length=200)
body = models.TextField()
recipients = models.ManyToManyField(to=Profile,
related_name='notifications',
related_query_name='notification')
time_stamp = models.DateTimeField(auto_now_add=True)
read = models.BooleanField(default=False)
# @receiver(post_save, sender=UserJob)
# def job_handler(sender, instance, **kwargs):
# if instance.is_active:
# profile_list = instance.author.profile.all()
# subscribed_users = profile_list.filter(
# Q(user__notification_subscription__in_app_notifications=True) | Q(
# user__notification_subscription__email_notifications=True))
# email_subscribed_users = profile_list.filter(
# user__notification_subscription__email_notifications=True)
# if(subscribed_users.count() >= 1):
# notification = Notification.objects.create(
# title="New Job on Twous",
# body=re.sub(' +', ' ', "{} has published another job \
# titled {}".format(
# instance.author.first_name.capitalize(),
# instance.title)))
# notification.recipients.add(*subscribed_users)
# if(email_subscribed_users.count() >= 1):
# send_emails_to_recipients(notification, email_subscribed_users)
# notification.save()
|
normal
|
{
"blob_id": "1066f86d3a35e892ca2a7054dfc89fe79f1d32c8",
"index": 7496,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Notification(BaseAbstractModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Notification(BaseAbstractModel):\n title = models.CharField(max_length=200)\n body = models.TextField()\n recipients = models.ManyToManyField(to=Profile, related_name=\n 'notifications', related_query_name='notification')\n time_stamp = models.DateTimeField(auto_now_add=True)\n read = models.BooleanField(default=False)\n",
"step-4": "from django.db import models\nfrom helpers.models import BaseAbstractModel\nfrom Auth.models import Profile\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\n\nclass Notification(BaseAbstractModel):\n title = models.CharField(max_length=200)\n body = models.TextField()\n recipients = models.ManyToManyField(to=Profile, related_name=\n 'notifications', related_query_name='notification')\n time_stamp = models.DateTimeField(auto_now_add=True)\n read = models.BooleanField(default=False)\n",
"step-5": "from django.db import models\nfrom helpers.models import BaseAbstractModel\nfrom Auth.models import Profile\n# from Jobs.models import UserJob\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n# Create your models here.\nclass Notification(BaseAbstractModel):\n title = models.CharField(max_length=200)\n body = models.TextField()\n recipients = models.ManyToManyField(to=Profile,\n related_name='notifications',\n related_query_name='notification')\n time_stamp = models.DateTimeField(auto_now_add=True)\n read = models.BooleanField(default=False)\n\n# @receiver(post_save, sender=UserJob)\n# def job_handler(sender, instance, **kwargs):\n# if instance.is_active:\n# profile_list = instance.author.profile.all()\n# subscribed_users = profile_list.filter(\n# Q(user__notification_subscription__in_app_notifications=True) | Q(\n# user__notification_subscription__email_notifications=True))\n\n# email_subscribed_users = profile_list.filter(\n# user__notification_subscription__email_notifications=True)\n# if(subscribed_users.count() >= 1):\n\n# notification = Notification.objects.create(\n# title=\"New Job on Twous\",\n# body=re.sub(' +', ' ', \"{} has published another job \\\n# titled {}\".format(\n# instance.author.first_name.capitalize(),\n# instance.title)))\n# notification.recipients.add(*subscribed_users)\n\n# if(email_subscribed_users.count() >= 1):\n# send_emails_to_recipients(notification, email_subscribed_users)\n\n# notification.save()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(TutorialsReview)
admin.site.register(TutorialsReviewComment)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import TutorialsReview, TutorialsReviewComment
admin.site.register(TutorialsReview)
admin.site.register(TutorialsReviewComment)
|
flexible
|
{
"blob_id": "fea0619263b081f60ed0a4e178ef777a8d5dc988",
"index": 6500,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(TutorialsReview)\nadmin.site.register(TutorialsReviewComment)\n",
"step-3": "from django.contrib import admin\nfrom .models import TutorialsReview, TutorialsReviewComment\nadmin.site.register(TutorialsReview)\nadmin.site.register(TutorialsReviewComment)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
list1 = [('北京大洋路', '红蛋', '散框批发', '120-125', '44', '落', '8车'), ('北京回龙观',
'红蛋', '散框批发', '124', '44', '落', ''), ('北京石门', '红蛋', '散框批发', '124', '44',
'落', '')]
mysql_data = []
import numpy as np
for l in list1:
array = np.array(l)
tolist = array.tolist()
tolist.insert(0, 'ppp')
tolist.append('lll')
mysql_data.append(tolist)
print(mysql_data)
import requests
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'
}
get = requests.get('http://www.baidu.com', headers=headers)
print(get.text)
|
normal
|
{
"blob_id": "896d836ede533bad24f4077e5ba964105d96bf7a",
"index": 9485,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor l in list1:\n array = np.array(l)\n tolist = array.tolist()\n tolist.insert(0, 'ppp')\n tolist.append('lll')\n mysql_data.append(tolist)\nprint(mysql_data)\n<mask token>\nprint(get.text)\n",
"step-3": "list1 = [('北京大洋路', '红蛋', '散框批发', '120-125', '44', '落', '8车'), ('北京回龙观',\n '红蛋', '散框批发', '124', '44', '落', ''), ('北京石门', '红蛋', '散框批发', '124', '44',\n '落', '')]\nmysql_data = []\n<mask token>\nfor l in list1:\n array = np.array(l)\n tolist = array.tolist()\n tolist.insert(0, 'ppp')\n tolist.append('lll')\n mysql_data.append(tolist)\nprint(mysql_data)\n<mask token>\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'\n }\nget = requests.get('http://www.baidu.com', headers=headers)\nprint(get.text)\n",
"step-4": "list1 = [('北京大洋路', '红蛋', '散框批发', '120-125', '44', '落', '8车'), ('北京回龙观',\n '红蛋', '散框批发', '124', '44', '落', ''), ('北京石门', '红蛋', '散框批发', '124', '44',\n '落', '')]\nmysql_data = []\nimport numpy as np\nfor l in list1:\n array = np.array(l)\n tolist = array.tolist()\n tolist.insert(0, 'ppp')\n tolist.append('lll')\n mysql_data.append(tolist)\nprint(mysql_data)\nimport requests\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'\n }\nget = requests.get('http://www.baidu.com', headers=headers)\nprint(get.text)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.encoder = nn.Sequential(nn.Conv2d(1, 6, 5), nn.MaxPool2d(2, 2),
nn.ReLU(True), nn.Conv2d(6, 16, 5), nn.MaxPool2d(2, 2), nn.ReLU
(True))
self.classifier = nn.Sequential(nn.Linear(16 * 4 * 4, 120), nn.ReLU
(), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10), nn.Softmax(1)
)
def forward(self, x):
x = self.encoder(x)
x = x.view(-1, 16 * 4 * 4)
x = self.classifier(x)
return x
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
test = MVPP('programs/mnist.txt')
for batch_idx, (data, target) in enumerate(train_loader):
for inner_iter in range(1):
data, target = data.to(device), target.to(device)
output = model(data)
test.parameters = output.tolist()
test.normalize_probs()
value = sum(target.tolist())
observation = ':- not addition(i1,i2,' + str(value) + ').'
gradients = test.gradients_one_obs(observation)
if device.type == 'cuda':
grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)
else:
grad_by_prob = -1 * torch.FloatTensor(gradients)
loss = F.nll_loss(output, target)
output.backward(grad_by_prob, retain_graph=True)
if (batch_idx + 1) % args.multiExampleNum == 0 and inner_iter == 0:
optimizer.step()
optimizer.zero_grad()
if batch_idx % args.log_interval == 0 and inner_iter == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
format(epoch, batch_idx * len(data), len(train_loader.
dataset), 100.0 * batch_idx / len(train_loader), loss.
item()))
print(observation)
print('Output: {}'.format(output.data.tolist()))
print('Gradient: {}'.format(grad_by_prob))
<|reserved_special_token_0|>
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
parser.add_argument('--test-batch-size', type=int, default=1000,
metavar='N', help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1, metavar='N', help=
'number of epochs to train (default: 1)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help=
'random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1000, metavar=
'N', help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--multiExampleNum', type=int, default=1, metavar=
'N', help=
'input the number of examples whose gradients are accumulated before back-propogation (default: 10)'
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device('cuda' if use_cuda else 'cpu')
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=True, download=True, transform=transforms.Compose([transforms
.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=False, transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.
test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if args.save_model:
torch.save(model.state_dict(), 'mnist_cnn.pt')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.encoder = nn.Sequential(nn.Conv2d(1, 6, 5), nn.MaxPool2d(2, 2),
nn.ReLU(True), nn.Conv2d(6, 16, 5), nn.MaxPool2d(2, 2), nn.ReLU
(True))
self.classifier = nn.Sequential(nn.Linear(16 * 4 * 4, 120), nn.ReLU
(), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10), nn.Softmax(1)
)
def forward(self, x):
x = self.encoder(x)
x = x.view(-1, 16 * 4 * 4)
x = self.classifier(x)
return x
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
test = MVPP('programs/mnist.txt')
for batch_idx, (data, target) in enumerate(train_loader):
for inner_iter in range(1):
data, target = data.to(device), target.to(device)
output = model(data)
test.parameters = output.tolist()
test.normalize_probs()
value = sum(target.tolist())
observation = ':- not addition(i1,i2,' + str(value) + ').'
gradients = test.gradients_one_obs(observation)
if device.type == 'cuda':
grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)
else:
grad_by_prob = -1 * torch.FloatTensor(gradients)
loss = F.nll_loss(output, target)
output.backward(grad_by_prob, retain_graph=True)
if (batch_idx + 1) % args.multiExampleNum == 0 and inner_iter == 0:
optimizer.step()
optimizer.zero_grad()
if batch_idx % args.log_interval == 0 and inner_iter == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
format(epoch, batch_idx * len(data), len(train_loader.
dataset), 100.0 * batch_idx / len(train_loader), loss.
item()))
print(observation)
print('Output: {}'.format(output.data.tolist()))
print('Gradient: {}'.format(grad_by_prob))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.
format(test_loss, correct, len(test_loader.dataset), 100.0 *
correct / len(test_loader.dataset)))
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
parser.add_argument('--test-batch-size', type=int, default=1000,
metavar='N', help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1, metavar='N', help=
'number of epochs to train (default: 1)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help=
'random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1000, metavar=
'N', help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--multiExampleNum', type=int, default=1, metavar=
'N', help=
'input the number of examples whose gradients are accumulated before back-propogation (default: 10)'
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device('cuda' if use_cuda else 'cpu')
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=True, download=True, transform=transforms.Compose([transforms
.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=False, transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.
test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if args.save_model:
torch.save(model.state_dict(), 'mnist_cnn.pt')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.encoder = nn.Sequential(nn.Conv2d(1, 6, 5), nn.MaxPool2d(2, 2),
nn.ReLU(True), nn.Conv2d(6, 16, 5), nn.MaxPool2d(2, 2), nn.ReLU
(True))
self.classifier = nn.Sequential(nn.Linear(16 * 4 * 4, 120), nn.ReLU
(), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10), nn.Softmax(1)
)
def forward(self, x):
x = self.encoder(x)
x = x.view(-1, 16 * 4 * 4)
x = self.classifier(x)
return x
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
test = MVPP('programs/mnist.txt')
for batch_idx, (data, target) in enumerate(train_loader):
for inner_iter in range(1):
data, target = data.to(device), target.to(device)
output = model(data)
test.parameters = output.tolist()
test.normalize_probs()
value = sum(target.tolist())
observation = ':- not addition(i1,i2,' + str(value) + ').'
gradients = test.gradients_one_obs(observation)
if device.type == 'cuda':
grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)
else:
grad_by_prob = -1 * torch.FloatTensor(gradients)
loss = F.nll_loss(output, target)
output.backward(grad_by_prob, retain_graph=True)
if (batch_idx + 1) % args.multiExampleNum == 0 and inner_iter == 0:
optimizer.step()
optimizer.zero_grad()
if batch_idx % args.log_interval == 0 and inner_iter == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
format(epoch, batch_idx * len(data), len(train_loader.
dataset), 100.0 * batch_idx / len(train_loader), loss.
item()))
print(observation)
print('Output: {}'.format(output.data.tolist()))
print('Gradient: {}'.format(grad_by_prob))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.
format(test_loss, correct, len(test_loader.dataset), 100.0 *
correct / len(test_loader.dataset)))
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
parser.add_argument('--test-batch-size', type=int, default=1000,
metavar='N', help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1, metavar='N', help=
'number of epochs to train (default: 1)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help=
'random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1000, metavar=
'N', help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--multiExampleNum', type=int, default=1, metavar=
'N', help=
'input the number of examples whose gradients are accumulated before back-propogation (default: 10)'
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device('cuda' if use_cuda else 'cpu')
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=True, download=True, transform=transforms.Compose([transforms
.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=False, transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.
test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if args.save_model:
torch.save(model.state_dict(), 'mnist_cnn.pt')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dprogram = """
img(i1). img(i2).
addition(A,B,N) :- digit(A,1,N1), digit(B,1,N2), N=N1+N2.
nn(m(X,1), digit, [0,1,2,3,4,5,6,7,8,9]) :- img(X).
"""
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.encoder = nn.Sequential(nn.Conv2d(1, 6, 5), nn.MaxPool2d(2, 2),
nn.ReLU(True), nn.Conv2d(6, 16, 5), nn.MaxPool2d(2, 2), nn.ReLU
(True))
self.classifier = nn.Sequential(nn.Linear(16 * 4 * 4, 120), nn.ReLU
(), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10), nn.Softmax(1)
)
def forward(self, x):
x = self.encoder(x)
x = x.view(-1, 16 * 4 * 4)
x = self.classifier(x)
return x
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
test = MVPP('programs/mnist.txt')
for batch_idx, (data, target) in enumerate(train_loader):
for inner_iter in range(1):
data, target = data.to(device), target.to(device)
output = model(data)
test.parameters = output.tolist()
test.normalize_probs()
value = sum(target.tolist())
observation = ':- not addition(i1,i2,' + str(value) + ').'
gradients = test.gradients_one_obs(observation)
if device.type == 'cuda':
grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)
else:
grad_by_prob = -1 * torch.FloatTensor(gradients)
loss = F.nll_loss(output, target)
output.backward(grad_by_prob, retain_graph=True)
if (batch_idx + 1) % args.multiExampleNum == 0 and inner_iter == 0:
optimizer.step()
optimizer.zero_grad()
if batch_idx % args.log_interval == 0 and inner_iter == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
format(epoch, batch_idx * len(data), len(train_loader.
dataset), 100.0 * batch_idx / len(train_loader), loss.
item()))
print(observation)
print('Output: {}'.format(output.data.tolist()))
print('Gradient: {}'.format(grad_by_prob))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.
format(test_loss, correct, len(test_loader.dataset), 100.0 *
correct / len(test_loader.dataset)))
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
parser.add_argument('--test-batch-size', type=int, default=1000,
metavar='N', help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1, metavar='N', help=
'number of epochs to train (default: 1)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S', help=
'random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1000, metavar=
'N', help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--multiExampleNum', type=int, default=1, metavar=
'N', help=
'input the number of examples whose gradients are accumulated before back-propogation (default: 10)'
)
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device('cuda' if use_cuda else 'cpu')
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=True, download=True, transform=transforms.Compose([transforms
.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',
train=False, transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.
test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if args.save_model:
torch.save(model.state_dict(), 'mnist_cnn.pt')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import sys
import json
import math
from klpmln import MVPP
dprogram = '''
img(i1). img(i2).
addition(A,B,N) :- digit(A,1,N1), digit(B,1,N2), N=N1+N2.
nn(m(X,1), digit, [0,1,2,3,4,5,6,7,8,9]) :- img(X).
'''
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(1, 6, 5), # 6 is the output chanel size; 5 is the kernal size; 1 (chanel) 28 28 -> 6 24 24
nn.MaxPool2d(2, 2), # kernal size 2; stride size 2; 6 24 24 -> 6 12 12
nn.ReLU(True), # inplace=True means that it will modify the input directly thus save memory
nn.Conv2d(6, 16, 5), # 6 12 12 -> 16 8 8
nn.MaxPool2d(2, 2), # 16 8 8 -> 16 4 4
nn.ReLU(True)
)
self.classifier = nn.Sequential(
nn.Linear(16 * 4 * 4, 120),
nn.ReLU(),
nn.Linear(120, 84),
nn.ReLU(),
nn.Linear(84, 10),
nn.Softmax(1)
)
def forward(self, x):
x = self.encoder(x)
x = x.view(-1, 16 * 4 * 4)
x = self.classifier(x)
# return F.log_softmax(x, dim=1)
return x
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
test = MVPP("programs/mnist.txt")
for batch_idx, (data, target) in enumerate(train_loader):
for inner_iter in range(1):
data, target = data.to(device), target.to(device)
# optimizer.zero_grad()
output = model(data)
# test = MVPP("programs/mnist.txt")
test.parameters = output.tolist()
test.normalize_probs()
# construct observation addition(i1, i2, sum)
value = sum(target.tolist())
observation = ":- not addition(i1,i2,"+ str(value) + ")."
# we calculate gradients with exact computation
gradients = test.gradients_one_obs(observation)
if device.type == 'cuda':
grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)
else:
grad_by_prob = -1 * torch.FloatTensor(gradients)
loss = F.nll_loss(output, target)
output.backward(grad_by_prob, retain_graph=True)
if (batch_idx+1) % args.multiExampleNum == 0 and inner_iter == 0:
optimizer.step()
optimizer.zero_grad()
# optimizer.step()
if batch_idx % args.log_interval == 0 and inner_iter == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
print(observation)
print("Output: {}".format(output.data.tolist()))
print("Gradient: {}".format(grad_by_prob))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1, metavar='N',
help='number of epochs to train (default: 1)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1000, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
parser.add_argument('--multiExampleNum', type=int, default=1, metavar='N',
help='input the number of examples whose gradients are accumulated before back-propogation (default: 10)')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if (args.save_model):
torch.save(model.state_dict(),"mnist_cnn.pt")
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "70b08b9e8c1510a9be48a4bc1de39c6c85b36eed",
"index": 2426,
"step-1": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.encoder = nn.Sequential(nn.Conv2d(1, 6, 5), nn.MaxPool2d(2, 2),\n nn.ReLU(True), nn.Conv2d(6, 16, 5), nn.MaxPool2d(2, 2), nn.ReLU\n (True))\n self.classifier = nn.Sequential(nn.Linear(16 * 4 * 4, 120), nn.ReLU\n (), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10), nn.Softmax(1)\n )\n\n def forward(self, x):\n x = self.encoder(x)\n x = x.view(-1, 16 * 4 * 4)\n x = self.classifier(x)\n return x\n\n\ndef train(args, model, device, train_loader, optimizer, epoch):\n model.train()\n test = MVPP('programs/mnist.txt')\n for batch_idx, (data, target) in enumerate(train_loader):\n for inner_iter in range(1):\n data, target = data.to(device), target.to(device)\n output = model(data)\n test.parameters = output.tolist()\n test.normalize_probs()\n value = sum(target.tolist())\n observation = ':- not addition(i1,i2,' + str(value) + ').'\n gradients = test.gradients_one_obs(observation)\n if device.type == 'cuda':\n grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)\n else:\n grad_by_prob = -1 * torch.FloatTensor(gradients)\n loss = F.nll_loss(output, target)\n output.backward(grad_by_prob, retain_graph=True)\n if (batch_idx + 1) % args.multiExampleNum == 0 and inner_iter == 0:\n optimizer.step()\n optimizer.zero_grad()\n if batch_idx % args.log_interval == 0 and inner_iter == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.\n format(epoch, batch_idx * len(data), len(train_loader.\n dataset), 100.0 * batch_idx / len(train_loader), loss.\n item()))\n print(observation)\n print('Output: {}'.format(output.data.tolist()))\n print('Gradient: {}'.format(grad_by_prob))\n\n\n<mask token>\n\n\ndef main():\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=2, metavar='N',\n help='input batch size for training (default: 2)')\n parser.add_argument('--test-batch-size', type=int, default=1000,\n metavar='N', help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=1, metavar='N', help=\n 'number of epochs to train (default: 1)')\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S', help=\n 'random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=1000, metavar=\n 'N', help='how many batches to wait before logging training status')\n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n parser.add_argument('--multiExampleNum', type=int, default=1, metavar=\n 'N', help=\n 'input the number of examples whose gradients are accumulated before back-propogation (default: 10)'\n )\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n torch.manual_seed(args.seed)\n device = torch.device('cuda' if use_cuda else 'cpu')\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',\n train=True, download=True, transform=transforms.Compose([transforms\n .ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',\n train=False, transform=transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.\n test_batch_size, shuffle=True, **kwargs)\n model = Net().to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch)\n test(args, model, device, test_loader)\n if args.save_model:\n torch.save(model.state_dict(), 'mnist_cnn.pt')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.encoder = nn.Sequential(nn.Conv2d(1, 6, 5), nn.MaxPool2d(2, 2),\n nn.ReLU(True), nn.Conv2d(6, 16, 5), nn.MaxPool2d(2, 2), nn.ReLU\n (True))\n self.classifier = nn.Sequential(nn.Linear(16 * 4 * 4, 120), nn.ReLU\n (), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10), nn.Softmax(1)\n )\n\n def forward(self, x):\n x = self.encoder(x)\n x = x.view(-1, 16 * 4 * 4)\n x = self.classifier(x)\n return x\n\n\ndef train(args, model, device, train_loader, optimizer, epoch):\n model.train()\n test = MVPP('programs/mnist.txt')\n for batch_idx, (data, target) in enumerate(train_loader):\n for inner_iter in range(1):\n data, target = data.to(device), target.to(device)\n output = model(data)\n test.parameters = output.tolist()\n test.normalize_probs()\n value = sum(target.tolist())\n observation = ':- not addition(i1,i2,' + str(value) + ').'\n gradients = test.gradients_one_obs(observation)\n if device.type == 'cuda':\n grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)\n else:\n grad_by_prob = -1 * torch.FloatTensor(gradients)\n loss = F.nll_loss(output, target)\n output.backward(grad_by_prob, retain_graph=True)\n if (batch_idx + 1) % args.multiExampleNum == 0 and inner_iter == 0:\n optimizer.step()\n optimizer.zero_grad()\n if batch_idx % args.log_interval == 0 and inner_iter == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.\n format(epoch, batch_idx * len(data), len(train_loader.\n dataset), 100.0 * batch_idx / len(train_loader), loss.\n item()))\n print(observation)\n print('Output: {}'.format(output.data.tolist()))\n print('Gradient: {}'.format(grad_by_prob))\n\n\ndef test(args, model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item()\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.\n format(test_loss, correct, len(test_loader.dataset), 100.0 *\n correct / len(test_loader.dataset)))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=2, metavar='N',\n help='input batch size for training (default: 2)')\n parser.add_argument('--test-batch-size', type=int, default=1000,\n metavar='N', help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=1, metavar='N', help=\n 'number of epochs to train (default: 1)')\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S', help=\n 'random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=1000, metavar=\n 'N', help='how many batches to wait before logging training status')\n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n parser.add_argument('--multiExampleNum', type=int, default=1, metavar=\n 'N', help=\n 'input the number of examples whose gradients are accumulated before back-propogation (default: 10)'\n )\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n torch.manual_seed(args.seed)\n device = torch.device('cuda' if use_cuda else 'cpu')\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',\n train=True, download=True, transform=transforms.Compose([transforms\n .ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',\n train=False, transform=transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.\n test_batch_size, shuffle=True, **kwargs)\n model = Net().to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch)\n test(args, model, device, test_loader)\n if args.save_model:\n torch.save(model.state_dict(), 'mnist_cnn.pt')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.encoder = nn.Sequential(nn.Conv2d(1, 6, 5), nn.MaxPool2d(2, 2),\n nn.ReLU(True), nn.Conv2d(6, 16, 5), nn.MaxPool2d(2, 2), nn.ReLU\n (True))\n self.classifier = nn.Sequential(nn.Linear(16 * 4 * 4, 120), nn.ReLU\n (), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10), nn.Softmax(1)\n )\n\n def forward(self, x):\n x = self.encoder(x)\n x = x.view(-1, 16 * 4 * 4)\n x = self.classifier(x)\n return x\n\n\ndef train(args, model, device, train_loader, optimizer, epoch):\n model.train()\n test = MVPP('programs/mnist.txt')\n for batch_idx, (data, target) in enumerate(train_loader):\n for inner_iter in range(1):\n data, target = data.to(device), target.to(device)\n output = model(data)\n test.parameters = output.tolist()\n test.normalize_probs()\n value = sum(target.tolist())\n observation = ':- not addition(i1,i2,' + str(value) + ').'\n gradients = test.gradients_one_obs(observation)\n if device.type == 'cuda':\n grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)\n else:\n grad_by_prob = -1 * torch.FloatTensor(gradients)\n loss = F.nll_loss(output, target)\n output.backward(grad_by_prob, retain_graph=True)\n if (batch_idx + 1) % args.multiExampleNum == 0 and inner_iter == 0:\n optimizer.step()\n optimizer.zero_grad()\n if batch_idx % args.log_interval == 0 and inner_iter == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.\n format(epoch, batch_idx * len(data), len(train_loader.\n dataset), 100.0 * batch_idx / len(train_loader), loss.\n item()))\n print(observation)\n print('Output: {}'.format(output.data.tolist()))\n print('Gradient: {}'.format(grad_by_prob))\n\n\ndef test(args, model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item()\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.\n format(test_loss, correct, len(test_loader.dataset), 100.0 *\n correct / len(test_loader.dataset)))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=2, metavar='N',\n help='input batch size for training (default: 2)')\n parser.add_argument('--test-batch-size', type=int, default=1000,\n metavar='N', help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=1, metavar='N', help=\n 'number of epochs to train (default: 1)')\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S', help=\n 'random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=1000, metavar=\n 'N', help='how many batches to wait before logging training status')\n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n parser.add_argument('--multiExampleNum', type=int, default=1, metavar=\n 'N', help=\n 'input the number of examples whose gradients are accumulated before back-propogation (default: 10)'\n )\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n torch.manual_seed(args.seed)\n device = torch.device('cuda' if use_cuda else 'cpu')\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',\n train=True, download=True, transform=transforms.Compose([transforms\n .ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',\n train=False, transform=transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.\n test_batch_size, shuffle=True, **kwargs)\n model = Net().to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch)\n test(args, model, device, test_loader)\n if args.save_model:\n torch.save(model.state_dict(), 'mnist_cnn.pt')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\ndprogram = \"\"\"\nimg(i1). img(i2).\n\naddition(A,B,N) :- digit(A,1,N1), digit(B,1,N2), N=N1+N2.\n\nnn(m(X,1), digit, [0,1,2,3,4,5,6,7,8,9]) :- img(X).\n\"\"\"\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n self.encoder = nn.Sequential(nn.Conv2d(1, 6, 5), nn.MaxPool2d(2, 2),\n nn.ReLU(True), nn.Conv2d(6, 16, 5), nn.MaxPool2d(2, 2), nn.ReLU\n (True))\n self.classifier = nn.Sequential(nn.Linear(16 * 4 * 4, 120), nn.ReLU\n (), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10), nn.Softmax(1)\n )\n\n def forward(self, x):\n x = self.encoder(x)\n x = x.view(-1, 16 * 4 * 4)\n x = self.classifier(x)\n return x\n\n\ndef train(args, model, device, train_loader, optimizer, epoch):\n model.train()\n test = MVPP('programs/mnist.txt')\n for batch_idx, (data, target) in enumerate(train_loader):\n for inner_iter in range(1):\n data, target = data.to(device), target.to(device)\n output = model(data)\n test.parameters = output.tolist()\n test.normalize_probs()\n value = sum(target.tolist())\n observation = ':- not addition(i1,i2,' + str(value) + ').'\n gradients = test.gradients_one_obs(observation)\n if device.type == 'cuda':\n grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)\n else:\n grad_by_prob = -1 * torch.FloatTensor(gradients)\n loss = F.nll_loss(output, target)\n output.backward(grad_by_prob, retain_graph=True)\n if (batch_idx + 1) % args.multiExampleNum == 0 and inner_iter == 0:\n optimizer.step()\n optimizer.zero_grad()\n if batch_idx % args.log_interval == 0 and inner_iter == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.\n format(epoch, batch_idx * len(data), len(train_loader.\n dataset), 100.0 * batch_idx / len(train_loader), loss.\n item()))\n print(observation)\n print('Output: {}'.format(output.data.tolist()))\n print('Gradient: {}'.format(grad_by_prob))\n\n\ndef test(args, model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item()\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.\n format(test_loss, correct, len(test_loader.dataset), 100.0 *\n correct / len(test_loader.dataset)))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=2, metavar='N',\n help='input batch size for training (default: 2)')\n parser.add_argument('--test-batch-size', type=int, default=1000,\n metavar='N', help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=1, metavar='N', help=\n 'number of epochs to train (default: 1)')\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S', help=\n 'random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=1000, metavar=\n 'N', help='how many batches to wait before logging training status')\n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n parser.add_argument('--multiExampleNum', type=int, default=1, metavar=\n 'N', help=\n 'input the number of examples whose gradients are accumulated before back-propogation (default: 10)'\n )\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n torch.manual_seed(args.seed)\n device = torch.device('cuda' if use_cuda else 'cpu')\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',\n train=True, download=True, transform=transforms.Compose([transforms\n .ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data',\n train=False, transform=transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.\n test_batch_size, shuffle=True, **kwargs)\n model = Net().to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch)\n test(args, model, device, test_loader)\n if args.save_model:\n torch.save(model.state_dict(), 'mnist_cnn.pt')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n\nimport sys\nimport json\nimport math\n\nfrom klpmln import MVPP\n\ndprogram = '''\nimg(i1). img(i2).\n\naddition(A,B,N) :- digit(A,1,N1), digit(B,1,N2), N=N1+N2.\n\nnn(m(X,1), digit, [0,1,2,3,4,5,6,7,8,9]) :- img(X).\n'''\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.encoder = nn.Sequential(\n nn.Conv2d(1, 6, 5), # 6 is the output chanel size; 5 is the kernal size; 1 (chanel) 28 28 -> 6 24 24\n nn.MaxPool2d(2, 2), # kernal size 2; stride size 2; 6 24 24 -> 6 12 12\n nn.ReLU(True), # inplace=True means that it will modify the input directly thus save memory\n nn.Conv2d(6, 16, 5), # 6 12 12 -> 16 8 8\n nn.MaxPool2d(2, 2), # 16 8 8 -> 16 4 4\n nn.ReLU(True) \n )\n self.classifier = nn.Sequential(\n nn.Linear(16 * 4 * 4, 120),\n nn.ReLU(),\n nn.Linear(120, 84),\n nn.ReLU(),\n nn.Linear(84, 10),\n nn.Softmax(1)\n )\n\n def forward(self, x):\n x = self.encoder(x)\n x = x.view(-1, 16 * 4 * 4)\n x = self.classifier(x)\n # return F.log_softmax(x, dim=1)\n return x\n\n\n \ndef train(args, model, device, train_loader, optimizer, epoch):\n model.train()\n test = MVPP(\"programs/mnist.txt\")\n for batch_idx, (data, target) in enumerate(train_loader):\n for inner_iter in range(1):\n data, target = data.to(device), target.to(device)\n # optimizer.zero_grad()\n output = model(data)\n\n # test = MVPP(\"programs/mnist.txt\")\n test.parameters = output.tolist()\n test.normalize_probs()\n\n # construct observation addition(i1, i2, sum)\n value = sum(target.tolist())\n observation = \":- not addition(i1,i2,\"+ str(value) + \").\"\n\n # we calculate gradients with exact computation\n gradients = test.gradients_one_obs(observation)\n\n if device.type == 'cuda':\n grad_by_prob = -1 * torch.cuda.FloatTensor(gradients)\n else:\n grad_by_prob = -1 * torch.FloatTensor(gradients)\n\n loss = F.nll_loss(output, target)\n\n output.backward(grad_by_prob, retain_graph=True)\n if (batch_idx+1) % args.multiExampleNum == 0 and inner_iter == 0:\n optimizer.step()\n optimizer.zero_grad()\n # optimizer.step()\n if batch_idx % args.log_interval == 0 and inner_iter == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n print(observation)\n print(\"Output: {}\".format(output.data.tolist()))\n print(\"Gradient: {}\".format(grad_by_prob))\n\ndef test(args, model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=2, metavar='N',\n help='input batch size for training (default: 2)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=1, metavar='N',\n help='number of epochs to train (default: 1)')\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=1000, metavar='N',\n help='how many batches to wait before logging training status')\n \n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n\n parser.add_argument('--multiExampleNum', type=int, default=1, metavar='N',\n help='input the number of examples whose gradients are accumulated before back-propogation (default: 10)')\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.test_batch_size, shuffle=True, **kwargs)\n\n\n model = Net().to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n # optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n \n\n\n\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch)\n test(args, model, device, test_loader)\n\n if (args.save_model):\n torch.save(model.state_dict(),\"mnist_cnn.pt\")\n \nif __name__ == '__main__':\n main()\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
def translate(src, tgt, text):
mname = f'stas/wmt19-{src}-{tgt}'
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
encoded = tokenizer.encode(text, return_tensors='pt')
output = model.generate(encoded, num_beams=5, early_stopping=True)[0]
decoded = tokenizer.decode(output, skip_special_tokens=True)
return decoded
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, '/code/huggingface/transformers-fair-wmt/src')
<|reserved_special_token_0|>
logging.disable(logging.INFO)
<|reserved_special_token_0|>
def translate(src, tgt, text):
mname = f'stas/wmt19-{src}-{tgt}'
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
encoded = tokenizer.encode(text, return_tensors='pt')
output = model.generate(encoded, num_beams=5, early_stopping=True)[0]
decoded = tokenizer.decode(output, skip_special_tokens=True)
return decoded
def paraphrase(src, tgt, text):
return translate(tgt, src, translate(src, tgt, text))
<|reserved_special_token_0|>
print('Paraphrasing:')
print(f'en : {text}')
print(f'en-ru-en: {en_ru}')
print(f'en-de-en: {en_de}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, '/code/huggingface/transformers-fair-wmt/src')
<|reserved_special_token_0|>
logging.disable(logging.INFO)
<|reserved_special_token_0|>
def translate(src, tgt, text):
mname = f'stas/wmt19-{src}-{tgt}'
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
encoded = tokenizer.encode(text, return_tensors='pt')
output = model.generate(encoded, num_beams=5, early_stopping=True)[0]
decoded = tokenizer.decode(output, skip_special_tokens=True)
return decoded
def paraphrase(src, tgt, text):
return translate(tgt, src, translate(src, tgt, text))
text = (
'Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?'
)
en_ru = paraphrase('en', 'ru', text)
en_de = paraphrase('en', 'de', text)
print('Paraphrasing:')
print(f'en : {text}')
print(f'en-ru-en: {en_ru}')
print(f'en-de-en: {en_de}')
<|reserved_special_token_1|>
import sys
sys.path.insert(0, '/code/huggingface/transformers-fair-wmt/src')
import logging
logging.disable(logging.INFO)
from transformers.tokenization_fsmt import FSMTTokenizer
from transformers.modeling_fsmt import FSMTForConditionalGeneration
def translate(src, tgt, text):
mname = f'stas/wmt19-{src}-{tgt}'
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
encoded = tokenizer.encode(text, return_tensors='pt')
output = model.generate(encoded, num_beams=5, early_stopping=True)[0]
decoded = tokenizer.decode(output, skip_special_tokens=True)
return decoded
def paraphrase(src, tgt, text):
return translate(tgt, src, translate(src, tgt, text))
text = (
'Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?'
)
en_ru = paraphrase('en', 'ru', text)
en_de = paraphrase('en', 'de', text)
print('Paraphrasing:')
print(f'en : {text}')
print(f'en-ru-en: {en_ru}')
print(f'en-de-en: {en_de}')
<|reserved_special_token_1|>
#!/usr/bin/env python
# coding: utf-8
import sys
sys.path.insert(0, "/code/huggingface/transformers-fair-wmt/src")
import logging
logging.disable(logging.INFO) # disable INFO and DEBUG logger everywhere
from transformers.tokenization_fsmt import FSMTTokenizer
from transformers.modeling_fsmt import FSMTForConditionalGeneration
def translate(src, tgt, text):
# to switch to local model
#mname = "/code/huggingface/transformers-fair-wmt/data/wmt19-{src}-{tgt}"
# s3 uploaded model
mname = f"stas/wmt19-{src}-{tgt}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
encoded = tokenizer.encode(text, return_tensors='pt')
# print(encoded)
output = model.generate(encoded, num_beams=5, early_stopping=True)[0]
# print(output)
decoded = tokenizer.decode(output, skip_special_tokens=True)
#print(decoded)
return decoded
def paraphrase(src, tgt, text):
return translate(tgt, src, translate(src, tgt, text))
#text = """Here's a little song I wrote. You might want to sing it note for note. Don't worry, be happy. In every life we have some trouble. But when you worry you make it double. Don't worry, be happy. Don't worry, be happy now."""
text = "Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?"
en_ru = paraphrase('en', 'ru', text)
en_de = paraphrase('en', 'de', text)
# print together to avoid the logger noise :(
print("Paraphrasing:")
print(f"en : {text}")
print(f"en-ru-en: {en_ru}")
print(f"en-de-en: {en_de}")
# Paraphrasing:
# en : Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?
# en-ru-en: Every morning when I wake up, I have a delightful joy - the joy of being Salvador Dali - and I ask myself in delight: What wonderful things is this Salvador Dali going to do today?
# en-de-en: Every morning when I wake up, I experience an extraordinary joy - the joy of being Salvador Dalí - and I wonder with delight: what wonderful things will this Salvador Dalí do today?
|
flexible
|
{
"blob_id": "7864138459caf469a0148420718b2282598141de",
"index": 6674,
"step-1": "<mask token>\n\n\ndef translate(src, tgt, text):\n mname = f'stas/wmt19-{src}-{tgt}'\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n encoded = tokenizer.encode(text, return_tensors='pt')\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n return decoded\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, '/code/huggingface/transformers-fair-wmt/src')\n<mask token>\nlogging.disable(logging.INFO)\n<mask token>\n\n\ndef translate(src, tgt, text):\n mname = f'stas/wmt19-{src}-{tgt}'\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n encoded = tokenizer.encode(text, return_tensors='pt')\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n return decoded\n\n\ndef paraphrase(src, tgt, text):\n return translate(tgt, src, translate(src, tgt, text))\n\n\n<mask token>\nprint('Paraphrasing:')\nprint(f'en : {text}')\nprint(f'en-ru-en: {en_ru}')\nprint(f'en-de-en: {en_de}')\n",
"step-3": "<mask token>\nsys.path.insert(0, '/code/huggingface/transformers-fair-wmt/src')\n<mask token>\nlogging.disable(logging.INFO)\n<mask token>\n\n\ndef translate(src, tgt, text):\n mname = f'stas/wmt19-{src}-{tgt}'\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n encoded = tokenizer.encode(text, return_tensors='pt')\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n return decoded\n\n\ndef paraphrase(src, tgt, text):\n return translate(tgt, src, translate(src, tgt, text))\n\n\ntext = (\n 'Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?'\n )\nen_ru = paraphrase('en', 'ru', text)\nen_de = paraphrase('en', 'de', text)\nprint('Paraphrasing:')\nprint(f'en : {text}')\nprint(f'en-ru-en: {en_ru}')\nprint(f'en-de-en: {en_de}')\n",
"step-4": "import sys\nsys.path.insert(0, '/code/huggingface/transformers-fair-wmt/src')\nimport logging\nlogging.disable(logging.INFO)\nfrom transformers.tokenization_fsmt import FSMTTokenizer\nfrom transformers.modeling_fsmt import FSMTForConditionalGeneration\n\n\ndef translate(src, tgt, text):\n mname = f'stas/wmt19-{src}-{tgt}'\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n encoded = tokenizer.encode(text, return_tensors='pt')\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n return decoded\n\n\ndef paraphrase(src, tgt, text):\n return translate(tgt, src, translate(src, tgt, text))\n\n\ntext = (\n 'Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?'\n )\nen_ru = paraphrase('en', 'ru', text)\nen_de = paraphrase('en', 'de', text)\nprint('Paraphrasing:')\nprint(f'en : {text}')\nprint(f'en-ru-en: {en_ru}')\nprint(f'en-de-en: {en_de}')\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\nsys.path.insert(0, \"/code/huggingface/transformers-fair-wmt/src\")\n\nimport logging\nlogging.disable(logging.INFO) # disable INFO and DEBUG logger everywhere\n\nfrom transformers.tokenization_fsmt import FSMTTokenizer\nfrom transformers.modeling_fsmt import FSMTForConditionalGeneration\n\ndef translate(src, tgt, text):\n # to switch to local model\n #mname = \"/code/huggingface/transformers-fair-wmt/data/wmt19-{src}-{tgt}\"\n # s3 uploaded model\n mname = f\"stas/wmt19-{src}-{tgt}\"\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n\n encoded = tokenizer.encode(text, return_tensors='pt')\n # print(encoded)\n\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n # print(output)\n\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n #print(decoded)\n return decoded\n\ndef paraphrase(src, tgt, text):\n return translate(tgt, src, translate(src, tgt, text))\n\n#text = \"\"\"Here's a little song I wrote. You might want to sing it note for note. Don't worry, be happy. In every life we have some trouble. But when you worry you make it double. Don't worry, be happy. Don't worry, be happy now.\"\"\"\n\ntext = \"Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?\"\n\nen_ru = paraphrase('en', 'ru', text)\nen_de = paraphrase('en', 'de', text)\n# print together to avoid the logger noise :(\nprint(\"Paraphrasing:\")\nprint(f\"en : {text}\")\nprint(f\"en-ru-en: {en_ru}\")\nprint(f\"en-de-en: {en_de}\")\n\n# Paraphrasing:\n# en : Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?\n# en-ru-en: Every morning when I wake up, I have a delightful joy - the joy of being Salvador Dali - and I ask myself in delight: What wonderful things is this Salvador Dali going to do today?\n# en-de-en: Every morning when I wake up, I experience an extraordinary joy - the joy of being Salvador Dalí - and I wonder with delight: what wonderful things will this Salvador Dalí do today?\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import sys, os; sys.path.insert(0,'..'); sys.path.insert(0,'../NEURON');
from tests.cells.NEURONCellTest import NEURONCellTest
from tests.cells.NeuroMLCellTest import NeuroMLCellTest
class NEURON(NEURONCellTest):
def __init__(self):
super(NEURON, self).__init__()
self.path = "../NEURON/granule.hoc"
self.label = "granule"
self.resultsFile = "results/cells/granule/NEURON.json"
self.currentRange = (-0.01, 0.1)
def prepare(self, h):
# Build the network with 1GC
sys.path.append(os.getcwd())
import customsim
import modeldata
customsim.setup(1, 1)
model = modeldata.getmodel()
cell = model.granules[110821] # The GC of the first MC
h.celsius = 24
return cell
class NeuroML(NeuroMLCellTest):
def __init__(self):
super(NeuroML, self).__init__()
self.path = "../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml"
self.label = "granule"
self.resultsFile = "results/cells/granule/NeuroML.json"
self.id = "Granule_0_110821"
self.currentRange = (-0.01, 0.1)
def prepare(self, h):
# Load the cell hoc
h.load_file(self.id+".hoc")
cell = getattr(h,self.id)()
h.celsius = 24
return cell
|
normal
|
{
"blob_id": "6dbafbcf126c37edb2187eb28c01e2c1125c1c64",
"index": 7134,
"step-1": "<mask token>\n\n\nclass NEURON(NEURONCellTest):\n\n def __init__(self):\n super(NEURON, self).__init__()\n self.path = '../NEURON/granule.hoc'\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NEURON.json'\n self.currentRange = -0.01, 0.1\n <mask token>\n\n\nclass NeuroML(NeuroMLCellTest):\n\n def __init__(self):\n super(NeuroML, self).__init__()\n self.path = (\n '../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NeuroML.json'\n self.id = 'Granule_0_110821'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n h.load_file(self.id + '.hoc')\n cell = getattr(h, self.id)()\n h.celsius = 24\n return cell\n",
"step-2": "<mask token>\n\n\nclass NEURON(NEURONCellTest):\n\n def __init__(self):\n super(NEURON, self).__init__()\n self.path = '../NEURON/granule.hoc'\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NEURON.json'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n sys.path.append(os.getcwd())\n import customsim\n import modeldata\n customsim.setup(1, 1)\n model = modeldata.getmodel()\n cell = model.granules[110821]\n h.celsius = 24\n return cell\n\n\nclass NeuroML(NeuroMLCellTest):\n\n def __init__(self):\n super(NeuroML, self).__init__()\n self.path = (\n '../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NeuroML.json'\n self.id = 'Granule_0_110821'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n h.load_file(self.id + '.hoc')\n cell = getattr(h, self.id)()\n h.celsius = 24\n return cell\n",
"step-3": "<mask token>\nsys.path.insert(0, '..')\nsys.path.insert(0, '../NEURON')\n<mask token>\n\n\nclass NEURON(NEURONCellTest):\n\n def __init__(self):\n super(NEURON, self).__init__()\n self.path = '../NEURON/granule.hoc'\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NEURON.json'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n sys.path.append(os.getcwd())\n import customsim\n import modeldata\n customsim.setup(1, 1)\n model = modeldata.getmodel()\n cell = model.granules[110821]\n h.celsius = 24\n return cell\n\n\nclass NeuroML(NeuroMLCellTest):\n\n def __init__(self):\n super(NeuroML, self).__init__()\n self.path = (\n '../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NeuroML.json'\n self.id = 'Granule_0_110821'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n h.load_file(self.id + '.hoc')\n cell = getattr(h, self.id)()\n h.celsius = 24\n return cell\n",
"step-4": "import sys, os\nsys.path.insert(0, '..')\nsys.path.insert(0, '../NEURON')\nfrom tests.cells.NEURONCellTest import NEURONCellTest\nfrom tests.cells.NeuroMLCellTest import NeuroMLCellTest\n\n\nclass NEURON(NEURONCellTest):\n\n def __init__(self):\n super(NEURON, self).__init__()\n self.path = '../NEURON/granule.hoc'\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NEURON.json'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n sys.path.append(os.getcwd())\n import customsim\n import modeldata\n customsim.setup(1, 1)\n model = modeldata.getmodel()\n cell = model.granules[110821]\n h.celsius = 24\n return cell\n\n\nclass NeuroML(NeuroMLCellTest):\n\n def __init__(self):\n super(NeuroML, self).__init__()\n self.path = (\n '../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NeuroML.json'\n self.id = 'Granule_0_110821'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n h.load_file(self.id + '.hoc')\n cell = getattr(h, self.id)()\n h.celsius = 24\n return cell\n",
"step-5": "import sys, os; sys.path.insert(0,'..'); sys.path.insert(0,'../NEURON');\r\nfrom tests.cells.NEURONCellTest import NEURONCellTest\r\nfrom tests.cells.NeuroMLCellTest import NeuroMLCellTest\r\n\r\nclass NEURON(NEURONCellTest):\r\n\r\n def __init__(self):\r\n super(NEURON, self).__init__()\r\n\r\n self.path = \"../NEURON/granule.hoc\"\r\n self.label = \"granule\"\r\n self.resultsFile = \"results/cells/granule/NEURON.json\"\r\n self.currentRange = (-0.01, 0.1)\r\n\r\n def prepare(self, h):\r\n\r\n # Build the network with 1GC\r\n sys.path.append(os.getcwd())\r\n import customsim\r\n import modeldata\r\n customsim.setup(1, 1)\r\n model = modeldata.getmodel()\r\n cell = model.granules[110821] # The GC of the first MC\r\n\r\n h.celsius = 24\r\n\r\n return cell\r\n\r\nclass NeuroML(NeuroMLCellTest):\r\n def __init__(self):\r\n super(NeuroML, self).__init__()\r\n\r\n self.path = \"../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml\"\r\n self.label = \"granule\"\r\n self.resultsFile = \"results/cells/granule/NeuroML.json\"\r\n self.id = \"Granule_0_110821\"\r\n self.currentRange = (-0.01, 0.1)\r\n\r\n def prepare(self, h):\r\n # Load the cell hoc\r\n h.load_file(self.id+\".hoc\")\r\n\r\n cell = getattr(h,self.id)()\r\n\r\n h.celsius = 24\r\n\r\n return cell\r\n\r\n\r\n\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('lectures', '0003_auto_20210805_1954')]
operations = [migrations.RenameField(model_name='lecture', old_name=
'is_requird', new_name='is_required')]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('lectures', '0003_auto_20210805_1954')]
operations = [migrations.RenameField(model_name='lecture', old_name=
'is_requird', new_name='is_required')]
<|reserved_special_token_1|>
# Generated by Django 3.2.5 on 2021-08-05 23:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lectures', '0003_auto_20210805_1954'),
]
operations = [
migrations.RenameField(
model_name='lecture',
old_name='is_requird',
new_name='is_required',
),
]
|
flexible
|
{
"blob_id": "e5bf4518f3834c73c3743d4c711a8d1a4ce3b944",
"index": 6788,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('lectures', '0003_auto_20210805_1954')]\n operations = [migrations.RenameField(model_name='lecture', old_name=\n 'is_requird', new_name='is_required')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('lectures', '0003_auto_20210805_1954')]\n operations = [migrations.RenameField(model_name='lecture', old_name=\n 'is_requird', new_name='is_required')]\n",
"step-5": "# Generated by Django 3.2.5 on 2021-08-05 23:59\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lectures', '0003_auto_20210805_1954'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='lecture',\n old_name='is_requird',\n new_name='is_required',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn import tree
import pickle as pk
X = pk.load(file=open('../data/temp/train.pkl', 'rb'))
y = pk.load(file=open('../data/temp/label.pkl', 'rb'))
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
def train_model(model_name):
if model_name == "LinearRegression":
model = LinearRegression()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == "Lasso":
model = Lasso(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == "Ridge":
model = Ridge(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == "tree":
model = tree.DecisionTreeRegressor()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if __name__ == '__main__':
model_chosen = "Lasso"
train_model(model_chosen)
|
normal
|
{
"blob_id": "539726df0e631c7a8edabf50fd739ee0497e3e97",
"index": 5557,
"step-1": "<mask token>\n\n\ndef train_model(model_name):\n if model_name == 'LinearRegression':\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Lasso':\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Ridge':\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'tree':\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef train_model(model_name):\n if model_name == 'LinearRegression':\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Lasso':\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Ridge':\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'tree':\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\nif __name__ == '__main__':\n model_chosen = 'Lasso'\n train_model(model_chosen)\n",
"step-3": "<mask token>\nX = pk.load(file=open('../data/temp/train.pkl', 'rb'))\ny = pk.load(file=open('../data/temp/label.pkl', 'rb'))\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)\n\n\ndef train_model(model_name):\n if model_name == 'LinearRegression':\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Lasso':\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Ridge':\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'tree':\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\nif __name__ == '__main__':\n model_chosen = 'Lasso'\n train_model(model_chosen)\n",
"step-4": "from sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression, Lasso, Ridge\nfrom sklearn import tree\nimport pickle as pk\nX = pk.load(file=open('../data/temp/train.pkl', 'rb'))\ny = pk.load(file=open('../data/temp/label.pkl', 'rb'))\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)\n\n\ndef train_model(model_name):\n if model_name == 'LinearRegression':\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Lasso':\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Ridge':\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'tree':\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\nif __name__ == '__main__':\n model_chosen = 'Lasso'\n train_model(model_chosen)\n",
"step-5": "from sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression, Lasso, Ridge\nfrom sklearn import tree\nimport pickle as pk\n\nX = pk.load(file=open('../data/temp/train.pkl', 'rb'))\ny = pk.load(file=open('../data/temp/label.pkl', 'rb'))\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)\n\n\ndef train_model(model_name):\n if model_name == \"LinearRegression\":\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n if model_name == \"Lasso\":\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n if model_name == \"Ridge\":\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n if model_name == \"tree\":\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\nif __name__ == '__main__':\n model_chosen = \"Lasso\"\n train_model(model_chosen)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from random import random
import numpy as np
class TemperatureSensor:
sensor_type = "temperature"
unit="celsius"
instance_id="283h62gsj"
#initialisation
def __init__(self, average_temperature, temperature_variation, min_temperature, max_temperature):
self.average_temperature = average_temperature
self.temperature_variation = temperature_variation
self.min_temperature = min_temperature
self.max_temperature= max_temperature
self.value = 0.0 #initialise current temp value
#sensing
def sense(self):
#self.value = self.value + self.simple_random()
self.value = self.complex_random() + self.noise()
return self.value
#noise
def noise(self):
self.noise_value = np.random.normal(0,1)
return self.noise_value
#helper function for generating values with min temp as its base
def simple_random(self):
value = self.min_temperature + (random() * (self.max_temperature - self.min_temperature)) #so that it is in the range
return value
def complex_random(self):
value = self.average_temperature * (1 + (self.temperature_variation/100) * (1 * random() -1))
value = max(value,self.min_temperature)
value = min(value,self.max_temperature)
return value
#creating instance of sensor
ts = TemperatureSensor(25,10,16,35)
|
normal
|
{
"blob_id": "bc890f0f40a7e9c916628d491e473b5ecfa9bb9b",
"index": 740,
"step-1": "<mask token>\n\n\nclass TemperatureSensor:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, average_temperature, temperature_variation,\n min_temperature, max_temperature):\n self.average_temperature = average_temperature\n self.temperature_variation = temperature_variation\n self.min_temperature = min_temperature\n self.max_temperature = max_temperature\n self.value = 0.0\n <mask token>\n\n def noise(self):\n self.noise_value = np.random.normal(0, 1)\n return self.noise_value\n\n def simple_random(self):\n value = self.min_temperature + random() * (self.max_temperature -\n self.min_temperature)\n return value\n\n def complex_random(self):\n value = self.average_temperature * (1 + self.temperature_variation /\n 100 * (1 * random() - 1))\n value = max(value, self.min_temperature)\n value = min(value, self.max_temperature)\n return value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TemperatureSensor:\n sensor_type = 'temperature'\n unit = 'celsius'\n instance_id = '283h62gsj'\n\n def __init__(self, average_temperature, temperature_variation,\n min_temperature, max_temperature):\n self.average_temperature = average_temperature\n self.temperature_variation = temperature_variation\n self.min_temperature = min_temperature\n self.max_temperature = max_temperature\n self.value = 0.0\n\n def sense(self):\n self.value = self.complex_random() + self.noise()\n return self.value\n\n def noise(self):\n self.noise_value = np.random.normal(0, 1)\n return self.noise_value\n\n def simple_random(self):\n value = self.min_temperature + random() * (self.max_temperature -\n self.min_temperature)\n return value\n\n def complex_random(self):\n value = self.average_temperature * (1 + self.temperature_variation /\n 100 * (1 * random() - 1))\n value = max(value, self.min_temperature)\n value = min(value, self.max_temperature)\n return value\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TemperatureSensor:\n sensor_type = 'temperature'\n unit = 'celsius'\n instance_id = '283h62gsj'\n\n def __init__(self, average_temperature, temperature_variation,\n min_temperature, max_temperature):\n self.average_temperature = average_temperature\n self.temperature_variation = temperature_variation\n self.min_temperature = min_temperature\n self.max_temperature = max_temperature\n self.value = 0.0\n\n def sense(self):\n self.value = self.complex_random() + self.noise()\n return self.value\n\n def noise(self):\n self.noise_value = np.random.normal(0, 1)\n return self.noise_value\n\n def simple_random(self):\n value = self.min_temperature + random() * (self.max_temperature -\n self.min_temperature)\n return value\n\n def complex_random(self):\n value = self.average_temperature * (1 + self.temperature_variation /\n 100 * (1 * random() - 1))\n value = max(value, self.min_temperature)\n value = min(value, self.max_temperature)\n return value\n\n\nts = TemperatureSensor(25, 10, 16, 35)\n",
"step-4": "from random import random\nimport numpy as np\n\n\nclass TemperatureSensor:\n sensor_type = 'temperature'\n unit = 'celsius'\n instance_id = '283h62gsj'\n\n def __init__(self, average_temperature, temperature_variation,\n min_temperature, max_temperature):\n self.average_temperature = average_temperature\n self.temperature_variation = temperature_variation\n self.min_temperature = min_temperature\n self.max_temperature = max_temperature\n self.value = 0.0\n\n def sense(self):\n self.value = self.complex_random() + self.noise()\n return self.value\n\n def noise(self):\n self.noise_value = np.random.normal(0, 1)\n return self.noise_value\n\n def simple_random(self):\n value = self.min_temperature + random() * (self.max_temperature -\n self.min_temperature)\n return value\n\n def complex_random(self):\n value = self.average_temperature * (1 + self.temperature_variation /\n 100 * (1 * random() - 1))\n value = max(value, self.min_temperature)\n value = min(value, self.max_temperature)\n return value\n\n\nts = TemperatureSensor(25, 10, 16, 35)\n",
"step-5": "from random import random\r\n\r\nimport numpy as np\r\n\r\nclass TemperatureSensor:\r\n sensor_type = \"temperature\"\r\n unit=\"celsius\"\r\n instance_id=\"283h62gsj\"\r\n \r\n #initialisation\r\n \r\n def __init__(self, average_temperature, temperature_variation, min_temperature, max_temperature):\r\n self.average_temperature = average_temperature\r\n self.temperature_variation = temperature_variation\r\n self.min_temperature = min_temperature \r\n self.max_temperature= max_temperature\r\n self.value = 0.0 #initialise current temp value\r\n \r\n #sensing \r\n def sense(self):\r\n #self.value = self.value + self.simple_random()\r\n self.value = self.complex_random() + self.noise()\r\n return self.value\r\n \r\n #noise\r\n def noise(self):\r\n self.noise_value = np.random.normal(0,1)\r\n return self.noise_value\r\n \r\n #helper function for generating values with min temp as its base\r\n def simple_random(self):\r\n value = self.min_temperature + (random() * (self.max_temperature - self.min_temperature)) #so that it is in the range\r\n return value\r\n \r\n def complex_random(self):\r\n value = self.average_temperature * (1 + (self.temperature_variation/100) * (1 * random() -1))\r\n value = max(value,self.min_temperature)\r\n value = min(value,self.max_temperature)\r\n return value\r\n \r\n#creating instance of sensor\r\nts = TemperatureSensor(25,10,16,35)\r\n\r\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
paramiko.util.log_to_file('syslogin.log')
<|reserved_special_token_0|>
t.connect(username=jumpuser, password=jumppass)
<|reserved_special_token_0|>
sftp.put(localpath, remotepath)
sftp.close()
<|reserved_special_token_0|>
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
<|reserved_special_token_0|>
channel.settimeout(10)
<|reserved_special_token_0|>
channel.send('scp ' + tmppath + ' ' + user + '@' + hostname + ':' +
remotepath + '\n')
while not buff.endswith(passinfo):
try:
resp = channel.recv(9999)
except Exception as e:
print('Error info: ' + str(e))
channel.close()
ssh.close()
sys.exit()
buff += resp
if not buff.find('yes/no') == -1:
channel.send('yes\n')
buff = ''
channel.send(password + '\n')
<|reserved_special_token_0|>
while not buff.endswith('# '):
resp = channel.recv(9999)
if not resp.find(passinfo) == -1:
print('Error info: Auth failed.')
channel.close()
ssh.close()
sys.exit()
buff += resp
print(buff)
channel.close()
ssh.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
jumpip = '192.168.10.1'
jumpuser = 'jackie'
jumppass = '123456'
hostname = '192.168.10.2'
user = 'root'
password = '654321'
tmpdir = '/tmp'
remotedir = '/data'
localpath = '/home/nginx_access.tar.gz'
tmppath = tmpdir + '/nginx_access.tar.gz'
remotepath = remotedir + '/nginx_access_hd.tar.gz'
port = 22
passinfo = "'s password: "
paramiko.util.log_to_file('syslogin.log')
t = paramiko.Transport((jumpip, port))
t.connect(username=jumpuser, password=jumppass)
sftp = paramiko.SFTPClient.from_transport(t)
sftp.put(localpath, remotepath)
sftp.close()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
channel = ssh.invoke_shell()
channel.settimeout(10)
buff = ''
resp = ''
channel.send('scp ' + tmppath + ' ' + user + '@' + hostname + ':' +
remotepath + '\n')
while not buff.endswith(passinfo):
try:
resp = channel.recv(9999)
except Exception as e:
print('Error info: ' + str(e))
channel.close()
ssh.close()
sys.exit()
buff += resp
if not buff.find('yes/no') == -1:
channel.send('yes\n')
buff = ''
channel.send(password + '\n')
buff = ''
while not buff.endswith('# '):
resp = channel.recv(9999)
if not resp.find(passinfo) == -1:
print('Error info: Auth failed.')
channel.close()
ssh.close()
sys.exit()
buff += resp
print(buff)
channel.close()
ssh.close()
<|reserved_special_token_1|>
import paramiko
import os, sys, time
jumpip = '192.168.10.1'
jumpuser = 'jackie'
jumppass = '123456'
hostname = '192.168.10.2'
user = 'root'
password = '654321'
tmpdir = '/tmp'
remotedir = '/data'
localpath = '/home/nginx_access.tar.gz'
tmppath = tmpdir + '/nginx_access.tar.gz'
remotepath = remotedir + '/nginx_access_hd.tar.gz'
port = 22
passinfo = "'s password: "
paramiko.util.log_to_file('syslogin.log')
t = paramiko.Transport((jumpip, port))
t.connect(username=jumpuser, password=jumppass)
sftp = paramiko.SFTPClient.from_transport(t)
sftp.put(localpath, remotepath)
sftp.close()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
channel = ssh.invoke_shell()
channel.settimeout(10)
buff = ''
resp = ''
channel.send('scp ' + tmppath + ' ' + user + '@' + hostname + ':' +
remotepath + '\n')
while not buff.endswith(passinfo):
try:
resp = channel.recv(9999)
except Exception as e:
print('Error info: ' + str(e))
channel.close()
ssh.close()
sys.exit()
buff += resp
if not buff.find('yes/no') == -1:
channel.send('yes\n')
buff = ''
channel.send(password + '\n')
buff = ''
while not buff.endswith('# '):
resp = channel.recv(9999)
if not resp.find(passinfo) == -1:
print('Error info: Auth failed.')
channel.close()
ssh.close()
sys.exit()
buff += resp
print(buff)
channel.close()
ssh.close()
<|reserved_special_token_1|>
#!/usr/bin/env python3
# coding=utf-8
# title :paramiko_sftp.py
# description :
# author :JackieTsui
# organization :pytoday.org
# date :1/16/18 9:22 PM
# email :[email protected]
# notes :
# ==================================================
# Import the module needed to run the script
import paramiko
import os,sys,time
jumpip = "192.168.10.1"
jumpuser = "jackie"
jumppass = "123456"
hostname = "192.168.10.2"
user = "root"
password = "654321"
tmpdir = "/tmp"
remotedir = "/data"
localpath = "/home/nginx_access.tar.gz"
tmppath = tmpdir + "/nginx_access.tar.gz"
remotepath = remotedir + "/nginx_access_hd.tar.gz"
port = 22
passinfo = "'s password: "
paramiko.util.log_to_file('syslogin.log')
t = paramiko.Transport((jumpip, port))
t.connect(username=jumpuser, password=jumppass)
sftp = paramiko.SFTPClient.from_transport(t)
sftp.put(localpath, remotepath)
sftp.close()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
channel = ssh.invoke_shell()
channel.settimeout(10)
buff = ""
resp = ""
channel.send("scp " + tmppath + " " + user + "@" + hostname + ":" + remotepath + "\n")
while not buff.endswith(passinfo):
try:
resp = channel.recv(9999)
except Exception as e:
print("Error info: " + str(e))
channel.close()
ssh.close()
sys.exit()
buff += resp
if not buff.find("yes/no") == -1:
channel.send("yes\n")
buff = ""
channel.send(password + "\n")
buff = ""
while not buff.endswith("# "):
resp = channel.recv(9999)
if not resp.find(passinfo) == -1:
print("Error info: Auth failed.")
channel.close()
ssh.close()
sys.exit()
buff += resp
print(buff)
channel.close()
ssh.close()
|
flexible
|
{
"blob_id": "64cf6b03fb68be8a23c6e87c8d68d0a42db0eb54",
"index": 6451,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparamiko.util.log_to_file('syslogin.log')\n<mask token>\nt.connect(username=jumpuser, password=jumppass)\n<mask token>\nsftp.put(localpath, remotepath)\nsftp.close()\n<mask token>\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n<mask token>\nchannel.settimeout(10)\n<mask token>\nchannel.send('scp ' + tmppath + ' ' + user + '@' + hostname + ':' +\n remotepath + '\\n')\nwhile not buff.endswith(passinfo):\n try:\n resp = channel.recv(9999)\n except Exception as e:\n print('Error info: ' + str(e))\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n if not buff.find('yes/no') == -1:\n channel.send('yes\\n')\n buff = ''\nchannel.send(password + '\\n')\n<mask token>\nwhile not buff.endswith('# '):\n resp = channel.recv(9999)\n if not resp.find(passinfo) == -1:\n print('Error info: Auth failed.')\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\nprint(buff)\nchannel.close()\nssh.close()\n",
"step-3": "<mask token>\njumpip = '192.168.10.1'\njumpuser = 'jackie'\njumppass = '123456'\nhostname = '192.168.10.2'\nuser = 'root'\npassword = '654321'\ntmpdir = '/tmp'\nremotedir = '/data'\nlocalpath = '/home/nginx_access.tar.gz'\ntmppath = tmpdir + '/nginx_access.tar.gz'\nremotepath = remotedir + '/nginx_access_hd.tar.gz'\nport = 22\npassinfo = \"'s password: \"\nparamiko.util.log_to_file('syslogin.log')\nt = paramiko.Transport((jumpip, port))\nt.connect(username=jumpuser, password=jumppass)\nsftp = paramiko.SFTPClient.from_transport(t)\nsftp.put(localpath, remotepath)\nsftp.close()\nssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nchannel = ssh.invoke_shell()\nchannel.settimeout(10)\nbuff = ''\nresp = ''\nchannel.send('scp ' + tmppath + ' ' + user + '@' + hostname + ':' +\n remotepath + '\\n')\nwhile not buff.endswith(passinfo):\n try:\n resp = channel.recv(9999)\n except Exception as e:\n print('Error info: ' + str(e))\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n if not buff.find('yes/no') == -1:\n channel.send('yes\\n')\n buff = ''\nchannel.send(password + '\\n')\nbuff = ''\nwhile not buff.endswith('# '):\n resp = channel.recv(9999)\n if not resp.find(passinfo) == -1:\n print('Error info: Auth failed.')\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\nprint(buff)\nchannel.close()\nssh.close()\n",
"step-4": "import paramiko\nimport os, sys, time\njumpip = '192.168.10.1'\njumpuser = 'jackie'\njumppass = '123456'\nhostname = '192.168.10.2'\nuser = 'root'\npassword = '654321'\ntmpdir = '/tmp'\nremotedir = '/data'\nlocalpath = '/home/nginx_access.tar.gz'\ntmppath = tmpdir + '/nginx_access.tar.gz'\nremotepath = remotedir + '/nginx_access_hd.tar.gz'\nport = 22\npassinfo = \"'s password: \"\nparamiko.util.log_to_file('syslogin.log')\nt = paramiko.Transport((jumpip, port))\nt.connect(username=jumpuser, password=jumppass)\nsftp = paramiko.SFTPClient.from_transport(t)\nsftp.put(localpath, remotepath)\nsftp.close()\nssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nchannel = ssh.invoke_shell()\nchannel.settimeout(10)\nbuff = ''\nresp = ''\nchannel.send('scp ' + tmppath + ' ' + user + '@' + hostname + ':' +\n remotepath + '\\n')\nwhile not buff.endswith(passinfo):\n try:\n resp = channel.recv(9999)\n except Exception as e:\n print('Error info: ' + str(e))\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n if not buff.find('yes/no') == -1:\n channel.send('yes\\n')\n buff = ''\nchannel.send(password + '\\n')\nbuff = ''\nwhile not buff.endswith('# '):\n resp = channel.recv(9999)\n if not resp.find(passinfo) == -1:\n print('Error info: Auth failed.')\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\nprint(buff)\nchannel.close()\nssh.close()\n",
"step-5": "#!/usr/bin/env python3\n# coding=utf-8\n# title :paramiko_sftp.py\n# description :\n# author :JackieTsui\n# organization :pytoday.org\n# date :1/16/18 9:22 PM\n# email :[email protected]\n# notes :\n# ==================================================\n\n# Import the module needed to run the script\nimport paramiko\nimport os,sys,time\n\n\njumpip = \"192.168.10.1\"\njumpuser = \"jackie\"\njumppass = \"123456\"\nhostname = \"192.168.10.2\"\nuser = \"root\"\npassword = \"654321\"\n\ntmpdir = \"/tmp\"\nremotedir = \"/data\"\nlocalpath = \"/home/nginx_access.tar.gz\"\ntmppath = tmpdir + \"/nginx_access.tar.gz\"\nremotepath = remotedir + \"/nginx_access_hd.tar.gz\"\nport = 22\npassinfo = \"'s password: \"\nparamiko.util.log_to_file('syslogin.log')\n\nt = paramiko.Transport((jumpip, port))\nt.connect(username=jumpuser, password=jumppass)\nsftp = paramiko.SFTPClient.from_transport(t)\nsftp.put(localpath, remotepath)\nsftp.close()\n\nssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\nchannel = ssh.invoke_shell()\nchannel.settimeout(10)\n\nbuff = \"\"\nresp = \"\"\nchannel.send(\"scp \" + tmppath + \" \" + user + \"@\" + hostname + \":\" + remotepath + \"\\n\")\nwhile not buff.endswith(passinfo):\n try:\n resp = channel.recv(9999)\n except Exception as e:\n print(\"Error info: \" + str(e))\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n if not buff.find(\"yes/no\") == -1:\n channel.send(\"yes\\n\")\n buff = \"\"\n\nchannel.send(password + \"\\n\")\n\nbuff = \"\"\nwhile not buff.endswith(\"# \"):\n resp = channel.recv(9999)\n if not resp.find(passinfo) == -1:\n print(\"Error info: Auth failed.\")\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n\nprint(buff)\nchannel.close()\nssh.close()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class DirectorySearchHandler(BaseHandler):
def initialize(self):
super(DirectorySearchHandler, self).initialize()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def ajax_get(self, uuid, isweb):
print('=' * 20)
print(uuid)
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
csw.getrecordbyid(id=[uuid])
print('-' * 20)
print(csw.getrecordbyid(id=[uuid]))
if isweb == '1':
rec = csw.records.get(uuid)
else:
birds_query = PropertyIsLike('csw:AnyText', uuid)
csw.getrecords2(constraints=[birds_query], maxrecords=20,
startposition=0, distributedsearch=True, hopcount=2)
print(csw.results)
for key in csw.records:
rec = csw.records[key]
out_dict = {'title': '', 'uid': '', 'sizhi': ''}
self.render('../torcms_dde/search/show_rec.html', kws=out_dict,
meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=
self.userinfo)
class MyXML:
def __init__(self, in_ele):
self.element = in_ele
def uid(self):
for sub_ele in self.element.iter():
if 'identifier' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def recordPosition(self):
for sub_ele in self.element.iter():
if 'recordPosition' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def sizhi(self):
out_arr = [0, 0, 0, 0]
for sub_ele in self.element.iter():
if 'LowerCorner' == sub_ele.tag.split('}')[1]:
t1 = sub_ele.text.split(' ')
out_arr[0] = float(t1[0])
out_arr[2] = float(t1[1])
if 'UpperCorner' == sub_ele.tag.split('}')[1]:
t2 = sub_ele.text.split(' ')
out_arr[1] = float(t2[0])
out_arr[3] = float(t2[1])
return out_arr
def title(self):
for sub_ele in self.element.iter():
if 'title' == sub_ele.tag.split('}')[1]:
return sub_ele.text
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DirectorySearchHandler(BaseHandler):
def initialize(self):
super(DirectorySearchHandler, self).initialize()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def search(self, keyw, isweb, ldrt, max_num):
post_data = self.get_request_arguments()
startnum = post_data.get('startnum', 0)
startposition = int(startnum) * int(max_num) + 1
print(',' * 50)
print(startnum)
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
if ldrt:
print('=' * 40)
print(type(ldrt))
print(ldrt)
print('=' * 40)
xx_ldrt = [float(x) for x in ldrt.split(',')]
xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]
print(xx_ldrt)
bbox_query = BBox(xx_ldrt)
if isweb == '1':
csw.getrecords2(constraints=[bbox_query], startposition=
startposition, maxrecords=max_num)
else:
birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(
keyw))
csw.getrecords2(constraints=[birds_query, bbox_query],
maxrecords=max_num, startposition=startposition,
distributedsearch=True, hopcount=2)
elif isweb == '1':
birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query], startposition=
startposition, maxrecords=max_num)
else:
birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query], maxrecords=max_num,
startposition=startposition, distributedsearch=True, hopcount=2
)
print('-' * 20)
print(isweb)
print(csw.results)
for rec in csw.records:
print(rec)
self.render('../torcms_dde/search/show_result.html', meta_results=
csw.records, userinfo=self.userinfo, isweb=isweb, startnum=startnum
)
def ajax_get(self, uuid, isweb):
print('=' * 20)
print(uuid)
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
csw.getrecordbyid(id=[uuid])
print('-' * 20)
print(csw.getrecordbyid(id=[uuid]))
if isweb == '1':
rec = csw.records.get(uuid)
else:
birds_query = PropertyIsLike('csw:AnyText', uuid)
csw.getrecords2(constraints=[birds_query], maxrecords=20,
startposition=0, distributedsearch=True, hopcount=2)
print(csw.results)
for key in csw.records:
rec = csw.records[key]
out_dict = {'title': '', 'uid': '', 'sizhi': ''}
self.render('../torcms_dde/search/show_rec.html', kws=out_dict,
meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=
self.userinfo)
class MyXML:
def __init__(self, in_ele):
self.element = in_ele
def uid(self):
for sub_ele in self.element.iter():
if 'identifier' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def recordPosition(self):
for sub_ele in self.element.iter():
if 'recordPosition' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def sizhi(self):
out_arr = [0, 0, 0, 0]
for sub_ele in self.element.iter():
if 'LowerCorner' == sub_ele.tag.split('}')[1]:
t1 = sub_ele.text.split(' ')
out_arr[0] = float(t1[0])
out_arr[2] = float(t1[1])
if 'UpperCorner' == sub_ele.tag.split('}')[1]:
t2 = sub_ele.text.split(' ')
out_arr[1] = float(t2[0])
out_arr[3] = float(t2[1])
return out_arr
def title(self):
for sub_ele in self.element.iter():
if 'title' == sub_ele.tag.split('}')[1]:
return sub_ele.text
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DirectorySearchHandler(BaseHandler):
def initialize(self):
super(DirectorySearchHandler, self).initialize()
<|reserved_special_token_0|>
def list(self, keyw):
keyw = 'data'
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query_like], maxrecords=20)
print('-' * 20)
print(csw.results)
for rec in csw.results:
print(rec)
self.render('../torcms_dde/search/meta_index.html', meta_results=
csw.records, userinfo=self.userinfo)
def search(self, keyw, isweb, ldrt, max_num):
post_data = self.get_request_arguments()
startnum = post_data.get('startnum', 0)
startposition = int(startnum) * int(max_num) + 1
print(',' * 50)
print(startnum)
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
if ldrt:
print('=' * 40)
print(type(ldrt))
print(ldrt)
print('=' * 40)
xx_ldrt = [float(x) for x in ldrt.split(',')]
xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]
print(xx_ldrt)
bbox_query = BBox(xx_ldrt)
if isweb == '1':
csw.getrecords2(constraints=[bbox_query], startposition=
startposition, maxrecords=max_num)
else:
birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(
keyw))
csw.getrecords2(constraints=[birds_query, bbox_query],
maxrecords=max_num, startposition=startposition,
distributedsearch=True, hopcount=2)
elif isweb == '1':
birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query], startposition=
startposition, maxrecords=max_num)
else:
birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query], maxrecords=max_num,
startposition=startposition, distributedsearch=True, hopcount=2
)
print('-' * 20)
print(isweb)
print(csw.results)
for rec in csw.records:
print(rec)
self.render('../torcms_dde/search/show_result.html', meta_results=
csw.records, userinfo=self.userinfo, isweb=isweb, startnum=startnum
)
def ajax_get(self, uuid, isweb):
print('=' * 20)
print(uuid)
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
csw.getrecordbyid(id=[uuid])
print('-' * 20)
print(csw.getrecordbyid(id=[uuid]))
if isweb == '1':
rec = csw.records.get(uuid)
else:
birds_query = PropertyIsLike('csw:AnyText', uuid)
csw.getrecords2(constraints=[birds_query], maxrecords=20,
startposition=0, distributedsearch=True, hopcount=2)
print(csw.results)
for key in csw.records:
rec = csw.records[key]
out_dict = {'title': '', 'uid': '', 'sizhi': ''}
self.render('../torcms_dde/search/show_rec.html', kws=out_dict,
meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=
self.userinfo)
class MyXML:
def __init__(self, in_ele):
self.element = in_ele
def uid(self):
for sub_ele in self.element.iter():
if 'identifier' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def recordPosition(self):
for sub_ele in self.element.iter():
if 'recordPosition' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def sizhi(self):
out_arr = [0, 0, 0, 0]
for sub_ele in self.element.iter():
if 'LowerCorner' == sub_ele.tag.split('}')[1]:
t1 = sub_ele.text.split(' ')
out_arr[0] = float(t1[0])
out_arr[2] = float(t1[1])
if 'UpperCorner' == sub_ele.tag.split('}')[1]:
t2 = sub_ele.text.split(' ')
out_arr[1] = float(t2[0])
out_arr[3] = float(t2[1])
return out_arr
def title(self):
for sub_ele in self.element.iter():
if 'title' == sub_ele.tag.split('}')[1]:
return sub_ele.text
<|reserved_special_token_1|>
import tornado.web
import tornado.escape
from torcms.core.base_handler import BaseHandler
from owslib.csw import CatalogueServiceWeb
from owslib.fes import PropertyIsEqualTo, PropertyIsLike, BBox
class DirectorySearchHandler(BaseHandler):
def initialize(self):
super(DirectorySearchHandler, self).initialize()
def get(self, url_str=''):
url_arr = self.parse_url(url_str)
if len(url_str) > 0:
url_arr = url_str.split('/')
if url_str == '':
self.list('')
elif url_arr[0] == 'search':
if len(url_arr[0]) >= 3:
self.search(url_arr[1], url_arr[2], url_arr[3], url_arr[4])
else:
self.search(url_arr[1], url_arr[2], '', 10)
elif url_arr[0] == 'view':
self.ajax_get(url_arr[1], url_arr[2])
def list(self, keyw):
keyw = 'data'
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query_like], maxrecords=20)
print('-' * 20)
print(csw.results)
for rec in csw.results:
print(rec)
self.render('../torcms_dde/search/meta_index.html', meta_results=
csw.records, userinfo=self.userinfo)
def search(self, keyw, isweb, ldrt, max_num):
post_data = self.get_request_arguments()
startnum = post_data.get('startnum', 0)
startposition = int(startnum) * int(max_num) + 1
print(',' * 50)
print(startnum)
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
if ldrt:
print('=' * 40)
print(type(ldrt))
print(ldrt)
print('=' * 40)
xx_ldrt = [float(x) for x in ldrt.split(',')]
xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]
print(xx_ldrt)
bbox_query = BBox(xx_ldrt)
if isweb == '1':
csw.getrecords2(constraints=[bbox_query], startposition=
startposition, maxrecords=max_num)
else:
birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(
keyw))
csw.getrecords2(constraints=[birds_query, bbox_query],
maxrecords=max_num, startposition=startposition,
distributedsearch=True, hopcount=2)
elif isweb == '1':
birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query], startposition=
startposition, maxrecords=max_num)
else:
birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query], maxrecords=max_num,
startposition=startposition, distributedsearch=True, hopcount=2
)
print('-' * 20)
print(isweb)
print(csw.results)
for rec in csw.records:
print(rec)
self.render('../torcms_dde/search/show_result.html', meta_results=
csw.records, userinfo=self.userinfo, isweb=isweb, startnum=startnum
)
def ajax_get(self, uuid, isweb):
print('=' * 20)
print(uuid)
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
csw.getrecordbyid(id=[uuid])
print('-' * 20)
print(csw.getrecordbyid(id=[uuid]))
if isweb == '1':
rec = csw.records.get(uuid)
else:
birds_query = PropertyIsLike('csw:AnyText', uuid)
csw.getrecords2(constraints=[birds_query], maxrecords=20,
startposition=0, distributedsearch=True, hopcount=2)
print(csw.results)
for key in csw.records:
rec = csw.records[key]
out_dict = {'title': '', 'uid': '', 'sizhi': ''}
self.render('../torcms_dde/search/show_rec.html', kws=out_dict,
meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=
self.userinfo)
class MyXML:
def __init__(self, in_ele):
self.element = in_ele
def uid(self):
for sub_ele in self.element.iter():
if 'identifier' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def recordPosition(self):
for sub_ele in self.element.iter():
if 'recordPosition' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def sizhi(self):
out_arr = [0, 0, 0, 0]
for sub_ele in self.element.iter():
if 'LowerCorner' == sub_ele.tag.split('}')[1]:
t1 = sub_ele.text.split(' ')
out_arr[0] = float(t1[0])
out_arr[2] = float(t1[1])
if 'UpperCorner' == sub_ele.tag.split('}')[1]:
t2 = sub_ele.text.split(' ')
out_arr[1] = float(t2[0])
out_arr[3] = float(t2[1])
return out_arr
def title(self):
for sub_ele in self.element.iter():
if 'title' == sub_ele.tag.split('}')[1]:
return sub_ele.text
<|reserved_special_token_1|>
import tornado.web
import tornado.escape
from torcms.core.base_handler import BaseHandler
from owslib.csw import CatalogueServiceWeb
from owslib.fes import PropertyIsEqualTo, PropertyIsLike, BBox
class DirectorySearchHandler(BaseHandler):
def initialize(self):
super(DirectorySearchHandler, self).initialize()
def get(self, url_str=''):
url_arr = self.parse_url(url_str)
if len(url_str) > 0:
url_arr = url_str.split('/')
# if url_str == '':
# self.render('metadata/meta_index.html')
if url_str == '':
self.list('')
elif url_arr[0] == 'search':
if len(url_arr[0]) >= 3:
self.search(url_arr[1], url_arr[2], url_arr[3], url_arr[4])
else:
self.search(url_arr[1], url_arr[2], '', 10)
elif url_arr[0] == 'view':
self.ajax_get(url_arr[1], url_arr[2])
# def post(self, *args, **kwargs):
# post_data = self.get_request_arguments()
# keyword = post_data.get('keyw9', '')
# isweb = post_data.get('isweb', '1')
# ldrt = post_data.get('ldrt', '')
# maxrecords = post_data.get('maxrecords', 20)
#
# self.redirect('/directory_search/search/{0}/{1}/{2}/{3}'.format(keyword, isweb, ldrt, maxrecords))
# def search(self, keyw):
# # print('====' * 40)
# # print(post_data)
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}
# &maximumRecords=5&startRecord=5&outputFormat=application/json'.format(
# keyw)
# r = requests.get(url)
# pprint.pprint(r.text)
# self.parseXML(r.text.encode(encoding='UTF-8'))
def list(self, keyw):
# print('====' * 40)
# print(post_data)
keyw = 'data'
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query_like], maxrecords=20)
print('-' * 20)
print(csw.results)
for rec in csw.results:
print(rec)
# out_dic = {}
# for rec in csw.records:
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}\
# maximumRecords=5&startRecord=5&outputFormat=application/json'.format(
# keyw)
# r = requests.get(url)
# pprint.pprint(r.text)
self.render('../torcms_dde/search/meta_index.html',
meta_results=csw.records,
userinfo=self.userinfo)
# self.parseXML(r.text.encode(encoding='UTF-8'))
def search(self, keyw, isweb, ldrt, max_num):
# print('=' * 40)
# print(ldrt)
post_data = self.get_request_arguments()
startnum = post_data.get('startnum', 0)
startposition = int(startnum) * int(max_num) +1
print("," * 50)
print(startnum)
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
# birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
if ldrt:
print('=' * 40)
print(type(ldrt))
print(ldrt)
print('=' * 40)
xx_ldrt = [float(x) for x in ldrt.split(',')]
xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]
print(xx_ldrt)
bbox_query = BBox(xx_ldrt)
if isweb == '1':
# birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[bbox_query], startposition=startposition,maxrecords=max_num)
else:
birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query, bbox_query], maxrecords=max_num, startposition=startposition,
distributedsearch=True,
hopcount=2)
else:
if isweb == '1':
birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query], startposition=startposition,maxrecords=max_num)
else:
birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))
csw.getrecords2(constraints=[birds_query], maxrecords=max_num, startposition=startposition, distributedsearch=True,
hopcount=2)
print('-' * 20)
print(isweb)
print(csw.results)
for rec in csw.records:
print(rec)
# out_dic = {}
# for rec in csw.records:
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}&
# maximumRecords=5&startRecord=5&outputFormat=application/json'.format(
# keyw)
# r = requests.get(url)
# pprint.pprint(r.text)
self.render('../torcms_dde/search/show_result.html',
meta_results=csw.records,
userinfo=self.userinfo,
isweb=isweb,
startnum = startnum
)
# self.parseXML(r.text.encode(encoding='UTF-8'))
# def get_result(self, post_data):
# print('====' * 40)
# print(post_data)
# url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}
# &maximumRecords=5&startRecord=5'.format(
# post_data['keyw'][0])
# r = requests.get(url)
# pprint.pprint(r.text)
# self.parseXML(r.text.encode(encoding='UTF-8'))
# # data = urllib.request.Request(url)
def ajax_get(self, uuid, isweb):
print('=' * 20)
print(uuid)
# uuid = uuid.split(':')[-1]
csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')
# birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))
csw.getrecordbyid(id=[uuid])
print('-' * 20)
print(csw.getrecordbyid(id=[uuid]))
if isweb == '1':
rec = csw.records.get(uuid)
else:
birds_query = PropertyIsLike('csw:AnyText', uuid)
csw.getrecords2(constraints=[birds_query], maxrecords=20, startposition=0, distributedsearch=True,
hopcount=2)
print(csw.results)
for key in csw.records:
rec = csw.records[key]
out_dict = {
'title': '',
'uid': '',
'sizhi': '',
}
self.render('../torcms_dde/search/show_rec.html',
kws=out_dict,
# meta_rec=csw.records.get(uuid),
meta_rec=rec,
unescape=tornado.escape.xhtml_unescape,
userinfo=self.userinfo
)
# #
# def parseXML(self, data):
#
# tree = etree.fromstring(data)
# # root = tree.getroot()
# uu = tree.findall('zs:record', tree.nsmap)
#
# meta_arr = []
# for x in uu:
# meta_arr.append(MyXML(x))
# # print(x.element('ows:LowerCorner'))
# # uu = etree.SubElement(x, "LowerCorner")
# # for sub_ele in x.iter():
# # print(sub_ele.tag)
# # if 'title' == sub_ele.tag.split('}')[1]:
# # print(sub_ele.text)
# # if 'LowerCorner' == sub_ele.tag.split('}')[1]:
# # print(sub_ele.text)
#
# self.render('metadata/show_result.html',
# meta_arr=meta_arr)
class MyXML():
def __init__(self, in_ele):
self.element = in_ele
def uid(self):
for sub_ele in self.element.iter():
if 'identifier' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def recordPosition(self):
for sub_ele in self.element.iter():
if 'recordPosition' == sub_ele.tag.split('}')[1]:
return sub_ele.text
def sizhi(self):
out_arr = [0, 0, 0, 0]
for sub_ele in self.element.iter():
if 'LowerCorner' == sub_ele.tag.split('}')[1]:
t1 = sub_ele.text.split(' ')
out_arr[0] = float(t1[0])
out_arr[2] = float(t1[1])
if 'UpperCorner' == sub_ele.tag.split('}')[1]:
t2 = sub_ele.text.split(' ')
out_arr[1] = float(t2[0])
out_arr[3] = float(t2[1])
return out_arr
def title(self):
for sub_ele in self.element.iter():
if 'title' == sub_ele.tag.split('}')[1]:
return sub_ele.text
|
flexible
|
{
"blob_id": "72ce7c48c9d1a7bcdbaead12648d03970663a11e",
"index": 3227,
"step-1": "<mask token>\n\n\nclass DirectorySearchHandler(BaseHandler):\n\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n <mask token>\n <mask token>\n <mask token>\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20,\n startposition=0, distributedsearch=True, hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n out_dict = {'title': '', 'uid': '', 'sizhi': ''}\n self.render('../torcms_dde/search/show_rec.html', kws=out_dict,\n meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=\n self.userinfo)\n\n\nclass MyXML:\n\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-2": "<mask token>\n\n\nclass DirectorySearchHandler(BaseHandler):\n\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n <mask token>\n <mask token>\n\n def search(self, keyw, isweb, ldrt, max_num):\n post_data = self.get_request_arguments()\n startnum = post_data.get('startnum', 0)\n startposition = int(startnum) * int(max_num) + 1\n print(',' * 50)\n print(startnum)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n if ldrt:\n print('=' * 40)\n print(type(ldrt))\n print(ldrt)\n print('=' * 40)\n xx_ldrt = [float(x) for x in ldrt.split(',')]\n xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]\n print(xx_ldrt)\n bbox_query = BBox(xx_ldrt)\n if isweb == '1':\n csw.getrecords2(constraints=[bbox_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(\n keyw))\n csw.getrecords2(constraints=[birds_query, bbox_query],\n maxrecords=max_num, startposition=startposition,\n distributedsearch=True, hopcount=2)\n elif isweb == '1':\n birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], maxrecords=max_num,\n startposition=startposition, distributedsearch=True, hopcount=2\n )\n print('-' * 20)\n print(isweb)\n print(csw.results)\n for rec in csw.records:\n print(rec)\n self.render('../torcms_dde/search/show_result.html', meta_results=\n csw.records, userinfo=self.userinfo, isweb=isweb, startnum=startnum\n )\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20,\n startposition=0, distributedsearch=True, hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n out_dict = {'title': '', 'uid': '', 'sizhi': ''}\n self.render('../torcms_dde/search/show_rec.html', kws=out_dict,\n meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=\n self.userinfo)\n\n\nclass MyXML:\n\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-3": "<mask token>\n\n\nclass DirectorySearchHandler(BaseHandler):\n\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n <mask token>\n\n def list(self, keyw):\n keyw = 'data'\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query_like], maxrecords=20)\n print('-' * 20)\n print(csw.results)\n for rec in csw.results:\n print(rec)\n self.render('../torcms_dde/search/meta_index.html', meta_results=\n csw.records, userinfo=self.userinfo)\n\n def search(self, keyw, isweb, ldrt, max_num):\n post_data = self.get_request_arguments()\n startnum = post_data.get('startnum', 0)\n startposition = int(startnum) * int(max_num) + 1\n print(',' * 50)\n print(startnum)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n if ldrt:\n print('=' * 40)\n print(type(ldrt))\n print(ldrt)\n print('=' * 40)\n xx_ldrt = [float(x) for x in ldrt.split(',')]\n xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]\n print(xx_ldrt)\n bbox_query = BBox(xx_ldrt)\n if isweb == '1':\n csw.getrecords2(constraints=[bbox_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(\n keyw))\n csw.getrecords2(constraints=[birds_query, bbox_query],\n maxrecords=max_num, startposition=startposition,\n distributedsearch=True, hopcount=2)\n elif isweb == '1':\n birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], maxrecords=max_num,\n startposition=startposition, distributedsearch=True, hopcount=2\n )\n print('-' * 20)\n print(isweb)\n print(csw.results)\n for rec in csw.records:\n print(rec)\n self.render('../torcms_dde/search/show_result.html', meta_results=\n csw.records, userinfo=self.userinfo, isweb=isweb, startnum=startnum\n )\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20,\n startposition=0, distributedsearch=True, hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n out_dict = {'title': '', 'uid': '', 'sizhi': ''}\n self.render('../torcms_dde/search/show_rec.html', kws=out_dict,\n meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=\n self.userinfo)\n\n\nclass MyXML:\n\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-4": "import tornado.web\nimport tornado.escape\nfrom torcms.core.base_handler import BaseHandler\nfrom owslib.csw import CatalogueServiceWeb\nfrom owslib.fes import PropertyIsEqualTo, PropertyIsLike, BBox\n\n\nclass DirectorySearchHandler(BaseHandler):\n\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n\n def get(self, url_str=''):\n url_arr = self.parse_url(url_str)\n if len(url_str) > 0:\n url_arr = url_str.split('/')\n if url_str == '':\n self.list('')\n elif url_arr[0] == 'search':\n if len(url_arr[0]) >= 3:\n self.search(url_arr[1], url_arr[2], url_arr[3], url_arr[4])\n else:\n self.search(url_arr[1], url_arr[2], '', 10)\n elif url_arr[0] == 'view':\n self.ajax_get(url_arr[1], url_arr[2])\n\n def list(self, keyw):\n keyw = 'data'\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query_like], maxrecords=20)\n print('-' * 20)\n print(csw.results)\n for rec in csw.results:\n print(rec)\n self.render('../torcms_dde/search/meta_index.html', meta_results=\n csw.records, userinfo=self.userinfo)\n\n def search(self, keyw, isweb, ldrt, max_num):\n post_data = self.get_request_arguments()\n startnum = post_data.get('startnum', 0)\n startposition = int(startnum) * int(max_num) + 1\n print(',' * 50)\n print(startnum)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n if ldrt:\n print('=' * 40)\n print(type(ldrt))\n print(ldrt)\n print('=' * 40)\n xx_ldrt = [float(x) for x in ldrt.split(',')]\n xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]\n print(xx_ldrt)\n bbox_query = BBox(xx_ldrt)\n if isweb == '1':\n csw.getrecords2(constraints=[bbox_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(\n keyw))\n csw.getrecords2(constraints=[birds_query, bbox_query],\n maxrecords=max_num, startposition=startposition,\n distributedsearch=True, hopcount=2)\n elif isweb == '1':\n birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], startposition=\n startposition, maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], maxrecords=max_num,\n startposition=startposition, distributedsearch=True, hopcount=2\n )\n print('-' * 20)\n print(isweb)\n print(csw.results)\n for rec in csw.records:\n print(rec)\n self.render('../torcms_dde/search/show_result.html', meta_results=\n csw.records, userinfo=self.userinfo, isweb=isweb, startnum=startnum\n )\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20,\n startposition=0, distributedsearch=True, hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n out_dict = {'title': '', 'uid': '', 'sizhi': ''}\n self.render('../torcms_dde/search/show_rec.html', kws=out_dict,\n meta_rec=rec, unescape=tornado.escape.xhtml_unescape, userinfo=\n self.userinfo)\n\n\nclass MyXML:\n\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-5": "import tornado.web\nimport tornado.escape\nfrom torcms.core.base_handler import BaseHandler\nfrom owslib.csw import CatalogueServiceWeb\nfrom owslib.fes import PropertyIsEqualTo, PropertyIsLike, BBox\n\n\nclass DirectorySearchHandler(BaseHandler):\n def initialize(self):\n super(DirectorySearchHandler, self).initialize()\n\n def get(self, url_str=''):\n url_arr = self.parse_url(url_str)\n if len(url_str) > 0:\n url_arr = url_str.split('/')\n # if url_str == '':\n # self.render('metadata/meta_index.html')\n\n if url_str == '':\n self.list('')\n elif url_arr[0] == 'search':\n if len(url_arr[0]) >= 3:\n self.search(url_arr[1], url_arr[2], url_arr[3], url_arr[4])\n else:\n self.search(url_arr[1], url_arr[2], '', 10)\n\n elif url_arr[0] == 'view':\n self.ajax_get(url_arr[1], url_arr[2])\n\n # def post(self, *args, **kwargs):\n # post_data = self.get_request_arguments()\n # keyword = post_data.get('keyw9', '')\n # isweb = post_data.get('isweb', '1')\n # ldrt = post_data.get('ldrt', '')\n # maxrecords = post_data.get('maxrecords', 20)\n #\n # self.redirect('/directory_search/search/{0}/{1}/{2}/{3}'.format(keyword, isweb, ldrt, maxrecords))\n\n # def search(self, keyw):\n # # print('====' * 40)\n # # print(post_data)\n # url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}\n # &maximumRecords=5&startRecord=5&outputFormat=application/json'.format(\n # keyw)\n # r = requests.get(url)\n # pprint.pprint(r.text)\n # self.parseXML(r.text.encode(encoding='UTF-8'))\n def list(self, keyw):\n # print('====' * 40)\n # print(post_data)\n keyw = 'data'\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query_like], maxrecords=20)\n print('-' * 20)\n print(csw.results)\n\n for rec in csw.results:\n print(rec)\n\n # out_dic = {}\n # for rec in csw.records:\n # url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}\\\n # maximumRecords=5&startRecord=5&outputFormat=application/json'.format(\n # keyw)\n # r = requests.get(url)\n # pprint.pprint(r.text)\n\n self.render('../torcms_dde/search/meta_index.html',\n meta_results=csw.records,\n userinfo=self.userinfo)\n\n # self.parseXML(r.text.encode(encoding='UTF-8'))\n\n def search(self, keyw, isweb, ldrt, max_num):\n # print('=' * 40)\n # print(ldrt)\n post_data = self.get_request_arguments()\n startnum = post_data.get('startnum', 0)\n\n startposition = int(startnum) * int(max_num) +1\n print(\",\" * 50)\n print(startnum)\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n # birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n\n\n if ldrt:\n print('=' * 40)\n print(type(ldrt))\n print(ldrt)\n print('=' * 40)\n\n xx_ldrt = [float(x) for x in ldrt.split(',')]\n\n xx_ldrt = [xx_ldrt[1], xx_ldrt[0], xx_ldrt[3], xx_ldrt[2]]\n\n print(xx_ldrt)\n\n bbox_query = BBox(xx_ldrt)\n if isweb == '1':\n\n # birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[bbox_query], startposition=startposition,maxrecords=max_num)\n\n else:\n\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query, bbox_query], maxrecords=max_num, startposition=startposition,\n distributedsearch=True,\n hopcount=2)\n else:\n if isweb == '1':\n\n birds_query = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], startposition=startposition,maxrecords=max_num)\n else:\n birds_query = PropertyIsLike('csw:AnyText', '%{0}%'.format(keyw))\n csw.getrecords2(constraints=[birds_query], maxrecords=max_num, startposition=startposition, distributedsearch=True,\n hopcount=2)\n print('-' * 20)\n print(isweb)\n print(csw.results)\n\n for rec in csw.records:\n print(rec)\n\n # out_dic = {}\n # for rec in csw.records:\n\n # url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}&\n # maximumRecords=5&startRecord=5&outputFormat=application/json'.format(\n # keyw)\n # r = requests.get(url)\n # pprint.pprint(r.text)\n\n self.render('../torcms_dde/search/show_result.html',\n meta_results=csw.records,\n userinfo=self.userinfo,\n isweb=isweb,\n startnum = startnum\n )\n\n # self.parseXML(r.text.encode(encoding='UTF-8'))\n\n # def get_result(self, post_data):\n # print('====' * 40)\n # print(post_data)\n # url = 'http://meta.osgeo.cn/pycsw/csw.py?mode=sru&operation=searchRetrieve&query={0}\n # &maximumRecords=5&startRecord=5'.format(\n # post_data['keyw'][0])\n # r = requests.get(url)\n # pprint.pprint(r.text)\n # self.parseXML(r.text.encode(encoding='UTF-8'))\n # # data = urllib.request.Request(url)\n\n def ajax_get(self, uuid, isweb):\n print('=' * 20)\n print(uuid)\n # uuid = uuid.split(':')[-1]\n csw = CatalogueServiceWeb('https://drr.ikcest.org/csw')\n # birds_query_like = PropertyIsLike('dc:title', '%{0}%'.format(keyw))\n\n csw.getrecordbyid(id=[uuid])\n print('-' * 20)\n print(csw.getrecordbyid(id=[uuid]))\n if isweb == '1':\n rec = csw.records.get(uuid)\n else:\n birds_query = PropertyIsLike('csw:AnyText', uuid)\n csw.getrecords2(constraints=[birds_query], maxrecords=20, startposition=0, distributedsearch=True,\n hopcount=2)\n print(csw.results)\n for key in csw.records:\n rec = csw.records[key]\n\n out_dict = {\n 'title': '',\n 'uid': '',\n 'sizhi': '',\n\n }\n\n self.render('../torcms_dde/search/show_rec.html',\n kws=out_dict,\n # meta_rec=csw.records.get(uuid),\n meta_rec=rec,\n unescape=tornado.escape.xhtml_unescape,\n userinfo=self.userinfo\n )\n\n # #\n # def parseXML(self, data):\n #\n # tree = etree.fromstring(data)\n # # root = tree.getroot()\n # uu = tree.findall('zs:record', tree.nsmap)\n #\n # meta_arr = []\n # for x in uu:\n # meta_arr.append(MyXML(x))\n # # print(x.element('ows:LowerCorner'))\n # # uu = etree.SubElement(x, \"LowerCorner\")\n # # for sub_ele in x.iter():\n # # print(sub_ele.tag)\n # # if 'title' == sub_ele.tag.split('}')[1]:\n # # print(sub_ele.text)\n # # if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n # # print(sub_ele.text)\n #\n # self.render('metadata/show_result.html',\n # meta_arr=meta_arr)\n\n\nclass MyXML():\n def __init__(self, in_ele):\n self.element = in_ele\n\n def uid(self):\n for sub_ele in self.element.iter():\n if 'identifier' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def recordPosition(self):\n for sub_ele in self.element.iter():\n if 'recordPosition' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n\n def sizhi(self):\n out_arr = [0, 0, 0, 0]\n for sub_ele in self.element.iter():\n if 'LowerCorner' == sub_ele.tag.split('}')[1]:\n t1 = sub_ele.text.split(' ')\n out_arr[0] = float(t1[0])\n out_arr[2] = float(t1[1])\n if 'UpperCorner' == sub_ele.tag.split('}')[1]:\n t2 = sub_ele.text.split(' ')\n out_arr[1] = float(t2[0])\n out_arr[3] = float(t2[1])\n return out_arr\n\n def title(self):\n for sub_ele in self.element.iter():\n if 'title' == sub_ele.tag.split('}')[1]:\n return sub_ele.text\n",
"step-ids": [
9,
10,
11,
13,
14
]
}
|
[
9,
10,
11,
13,
14
] |
from bs4 import BeautifulSoup
from pprint import pprint
from scraper.sas.sas_models import SASEvent, SASCategory, SASCategoryStage, SASEventStage
from scraper.base_models.models import Event, Category, CategoryStage, EventStage, Participant, Result
from scraper.sas.sas_config import DESTINATION_URL, MTB_EVENT_TYPE, YEARS
from scraper import db
from datetime import datetime
import urllib
import json
import time
def scrape_sas():
pprint("Scraping Events")
get_mtb_events()
pprint("Getting categories and stages")
for event in db.session.query(SASEvent):
pprint(event.event_id)
get_categories_and_stages(event.event_reference, event.event_id)
#time.sleep(2)
for event_stage in db.session.query(SASEventStage):
pprint("Getting event stage results")
base_event_stage = db.session.query(EventStage).filter(EventStage.id==event_stage.event_stage_id).first()
if (base_event_stage.results):
pprint("Event has results")
else:
write_stage_results(event_stage.stage_reference, event_stage.event_stage_id, "event")
for category_stage in db.session.query(SASCategoryStage):
pprint("Getting category stage results")
base_category_stage = db.session.query(CategoryStage).filter(CategoryStage.id==category_stage.category_stage_id).first()
if (base_category_stage.results):
pprint("Category stage has results")
else:
write_stage_results(category_stage.stage_reference, category_stage.category_stage_id, "category")
for category in db.session.query(SASCategory):
pprint("Getting category results")
base_category = db.session.query(Category).filter(Category.id==category.category_id).first()
if (base_category.results):
pprint("Category has results")
else:
if (not base_category.category_stages):
write_category_results(category.stage_reference, category.id)
else:
pprint("No results but has category stages")
pprint("Scrape Complete")
def get_mtb_events():
for year in YEARS:
url = ("%s/participants/event-results/fetch-series-by-type?event_type=%s&event_year=%d" %
(DESTINATION_URL, MTB_EVENT_TYPE, year))
try:
page = urllib.request.urlopen(url)
content = page.read().decode("utf-8")
json_content = json.loads(content)
soup = BeautifulSoup(json_content['HTML'], "html.parser")
anchors = soup.find_all('a')
except (urllib.error.HTTPError, urllib.error.ConnectionResetError):
pass
for anchor in anchors:
event_reference = anchor["href"]
divs = anchor.find_all('div')
for div in divs:
if ("event-date" in div["class"]):
event_date = (div.find(text=True))
elif ("event-title" in div["class"]):
event_name = (div.find(text=True))
db_date = datetime.strptime(event_date, '%d %b %Y')
db_event = Event(event_name, db_date)
db_check = db.session.query(Event.title).filter(Event.title==event_name)
if not (db.session.query(db_check.exists()).scalar()):
db.session.add(db_event)
db.session.flush()
sas_event = SASEvent(db_event.id, event_reference)
db.session.add(sas_event)
db.session.commit()
def get_categories_and_stages(event_reference, event_id):
event = db.session.query(Event).filter(Event.id==event_id).first()
if (event.categories or event.event_stages):
pprint("Event Exists")
else:
url = (DESTINATION_URL + event_reference)
try:
page = urllib.request.urlopen(url)
except (urllib.error.HTTPError, urllib.error.URLError):
return
soup = BeautifulSoup(page, "html.parser")
check_stages = get_categories(soup, event_id)
def get_categories(soup, event_id):
category_div = soup.find('div', attrs={"id" : "category_container"})
#Check to see if event has categories first
if category_div:
divs = category_div.find_all('div')
for div in divs:
if div.has_attr("data-event-category-id"):
#Event has categories
category_reference = div["data-event-category-id"]
category_name = div["data-loading-text"]
category_own_stage_reference = div["data-event-stage-id"]
db_category = Category(category_name, event_id)
#Check both name and event id to allow duplicate names
db_category_check = db.session.query(Category.name).filter(
(Category.name==category_name) &
(Category.event_id==event_id))
#Check SAS category for duplicates as well
db_sas_category_check = db.session.query(SASCategory).filter(
(SASCategory.category_reference==category_reference) &
(SASCategory.stage_reference==category_own_stage_reference))
if not (db.session.query(db_category_check.exists()).scalar()):
db.session.add(db_category)
db.session.flush()
if not (db.session.query(db_sas_category_check.exists()).scalar()):
db_sas_category = SASCategory(category_reference, category_own_stage_reference, db_category.id)
db.session.add(db_sas_category)
db.session.flush()
db.session.commit()
if (div["data-multiple-event-stages"] == "1"):
#Event has stages with their own categories
get_category_stages(soup, db_category.id, category_reference)
else:
#Event does not have categories
get_event_stages(soup, event_id)
def get_category_stages(soup, category_id, category_reference):
stage_group_div = soup.find('div', attrs={"id" : ("ec_" + category_reference)})
stage_divs = stage_group_div.find_all('div')
for stage_div in stage_divs:
if stage_div.has_attr("data-stage-id"):
category_stage_reference = stage_div["data-stage-id"]
category_stage_name = stage_div["data-loading-text"]
db_category_stage = CategoryStage(category_stage_name, category_id)
#Check both name and category id to allow duplicate names
db_category_stage_check = db.session.query(CategoryStage.name).filter(
(CategoryStage.name==category_stage_name) &
(CategoryStage.category_id==category_id))
if not (db.session.query(db_category_stage_check.exists()).scalar()):
db.session.add(db_category_stage)
db.session.flush()
db_sas_category_stage = SASCategoryStage(db_category_stage.id, category_stage_reference)
db.session.add(db_sas_category_stage)
db.session.flush()
db.session.commit()
def get_event_stages(soup, event_id):
all_event_stage_divs = soup.find('div', class_ = "row categories_stages event-sub-types")
#Check if event has stages
if all_event_stage_divs:
event_stage_divs = all_event_stage_divs.find_all ('div')
for event_stage_div in event_stage_divs:
if event_stage_div.has_attr("data-stage-id"):
#Event has stages and no categories
event_stage_reference = event_stage_div["data-stage-id"]
event_stage_name = event_stage_div["data-loading-text"]
db_event_stage = EventStage(event_stage_name, event_id)
#Check if it exists by name and ID and add if it doesn't
db_event_stage_check = db.session.query(EventStage.name).filter(
(EventStage.name==event_stage_name) &
(EventStage.event_id==event_id))
if not (db.session.query(db_event_stage_check.exists()).scalar()):
db.session.add(db_event_stage)
db.session.flush()
db_sas_event_stage = SASEventStage(db_event_stage.id, event_stage_reference)
db.session.add(db_sas_event_stage)
db.session.flush()
db.session.commit()
else:
#Event has no stages or categories
#create new stage for just the overall results, unless event has no results
event_stage_reference_div = soup.find('div', class_ = "result-row load-results")
if event_stage_reference_div:
if event_stage_reference_div.has_attr("data-stage"):
event_stage_reference = event_stage_reference_div["data-stage"]
sas_event = db.session.query(SASEvent).filter(SASEvent.event_id==event_id).first()
db_event_stage_check = db.session.query(EventStage.name).filter(
(EventStage.name=="Overall Results") &
(EventStage.event_id==sas_event.event_id))
if not (db.session.query(db_event_stage_check.exists()).scalar()):
db_event_stage = EventStage("Overall Results", sas_event.event_id)
db.session.add(db_event_stage)
db.session.flush()
db_sas_event_stage = SASEventStage(db_event_stage.id, event_stage_reference)
db.session.add(db_sas_event_stage)
db.session.commit()
def get_results(event_reference):
url = ("%s/participants/event-results/add-results?stage_id=%s&from=0&count=9999" %
(DESTINATION_URL, event_reference))
pprint(url)
try:
page = urllib.request.urlopen(url)
except (urllib.error.HTTPError, urllib.error.ConnectionResetError):
return
content = page.read().decode("utf-8")
json_content = json.loads(content)
json_results = json_content['rows']
return json_results
def write_stage_results(stage_reference, stage_id, stage_type):
results = get_results(stage_reference)
category_stage_id = None
event_stage_id = None
if (stage_type=="event"):
event_stage_id = stage_id
elif (stage_type=="category"):
category_stage_id = stage_id
if results:
for result in results:
participant_id = get_participant(result)
db_result_check = db.session.query(Result).filter(
(Result.position==result['overall_pos']) &
(Result.gender_position==result['gender_pos']) &
(Result.time==result['time_taken_seconds']) &
(Result.event_stage_id==event_stage_id) &
(Result.category_stage_id==category_stage_id))
if not (db.session.query(db_result_check.exists()).scalar()):
if (stage_type=="category"):
db_result = Result(result['overall_pos'], participant_id, result['gender_pos'],
result['time_taken_seconds'], None, category_stage_id, None)
elif (stage_type=="event"):
db_result = Result(result['overall_pos'], participant_id, result['gender_pos'],
result['time_taken_seconds'], event_stage_id, None, None)
db.session.add(db_result)
db.session.commit()
def write_category_results(category_reference, category_id):
results = get_results(category_reference)
for result in results:
participant_id = get_participant(result)
db_result_check = db.session.query(Result).filter(
(Result.position==result['overall_pos']) &
(Result.gender_position==result['gender_pos']) &
(Result.time==result['time_taken_seconds']) &
(Result.category_id==category_id)).first()
if not db_result_check:
db_category_result = Result(result['overall_pos'], participant_id,
result['gender_pos'], result['time_taken_seconds'], None, None, category_id)
db.session.add(db_category_result)
db.session.commit()
def get_participant(result):
if result['date_of_birth']:
birth_date = datetime.strptime(result['date_of_birth'], '%Y-%m-%d').date()
else:
birth_date = None
db_participant_check = db.session.query(Participant).filter(
(Participant.first_name==result['first_name']) &
(Participant.last_name==result['last_name']) &
(Participant.sex==result['person_sex']) &
(Participant.birth_date==birth_date))
if not (db.session.query(db_participant_check.exists()).scalar()):
db_participant = Participant(result['first_name'], result['last_name'],
result['person_sex'], birth_date)
db.session.add(db_participant)
db.session.commit()
return db_participant.id
else:
return db_participant_check.first().id
|
normal
|
{
"blob_id": "ecc351cf95254e0bbc5021eff11c500fa0950bd3",
"index": 2653,
"step-1": "<mask token>\n\n\ndef scrape_sas():\n pprint('Scraping Events')\n get_mtb_events()\n pprint('Getting categories and stages')\n for event in db.session.query(SASEvent):\n pprint(event.event_id)\n get_categories_and_stages(event.event_reference, event.event_id)\n for event_stage in db.session.query(SASEventStage):\n pprint('Getting event stage results')\n base_event_stage = db.session.query(EventStage).filter(EventStage.\n id == event_stage.event_stage_id).first()\n if base_event_stage.results:\n pprint('Event has results')\n else:\n write_stage_results(event_stage.stage_reference, event_stage.\n event_stage_id, 'event')\n for category_stage in db.session.query(SASCategoryStage):\n pprint('Getting category stage results')\n base_category_stage = db.session.query(CategoryStage).filter(\n CategoryStage.id == category_stage.category_stage_id).first()\n if base_category_stage.results:\n pprint('Category stage has results')\n else:\n write_stage_results(category_stage.stage_reference,\n category_stage.category_stage_id, 'category')\n for category in db.session.query(SASCategory):\n pprint('Getting category results')\n base_category = db.session.query(Category).filter(Category.id ==\n category.category_id).first()\n if base_category.results:\n pprint('Category has results')\n elif not base_category.category_stages:\n write_category_results(category.stage_reference, category.id)\n else:\n pprint('No results but has category stages')\n pprint('Scrape Complete')\n\n\n<mask token>\n\n\ndef get_categories_and_stages(event_reference, event_id):\n event = db.session.query(Event).filter(Event.id == event_id).first()\n if event.categories or event.event_stages:\n pprint('Event Exists')\n else:\n url = DESTINATION_URL + event_reference\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.URLError):\n return\n soup = BeautifulSoup(page, 'html.parser')\n check_stages = get_categories(soup, event_id)\n\n\ndef get_categories(soup, event_id):\n category_div = soup.find('div', attrs={'id': 'category_container'})\n if category_div:\n divs = category_div.find_all('div')\n for div in divs:\n if div.has_attr('data-event-category-id'):\n category_reference = div['data-event-category-id']\n category_name = div['data-loading-text']\n category_own_stage_reference = div['data-event-stage-id']\n db_category = Category(category_name, event_id)\n db_category_check = db.session.query(Category.name).filter(\n (Category.name == category_name) & (Category.event_id ==\n event_id))\n db_sas_category_check = db.session.query(SASCategory).filter(\n (SASCategory.category_reference == category_reference) &\n (SASCategory.stage_reference ==\n category_own_stage_reference))\n if not db.session.query(db_category_check.exists()).scalar():\n db.session.add(db_category)\n db.session.flush()\n if not db.session.query(db_sas_category_check.exists()\n ).scalar():\n db_sas_category = SASCategory(category_reference,\n category_own_stage_reference, db_category.id)\n db.session.add(db_sas_category)\n db.session.flush()\n db.session.commit()\n if div['data-multiple-event-stages'] == '1':\n get_category_stages(soup, db_category.id,\n category_reference)\n else:\n get_event_stages(soup, event_id)\n\n\n<mask token>\n\n\ndef get_event_stages(soup, event_id):\n all_event_stage_divs = soup.find('div', class_=\n 'row categories_stages event-sub-types')\n if all_event_stage_divs:\n event_stage_divs = all_event_stage_divs.find_all('div')\n for event_stage_div in event_stage_divs:\n if event_stage_div.has_attr('data-stage-id'):\n event_stage_reference = event_stage_div['data-stage-id']\n event_stage_name = event_stage_div['data-loading-text']\n db_event_stage = EventStage(event_stage_name, event_id)\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == event_stage_name) & (\n EventStage.event_id == event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.flush()\n db.session.commit()\n else:\n event_stage_reference_div = soup.find('div', class_=\n 'result-row load-results')\n if event_stage_reference_div:\n if event_stage_reference_div.has_attr('data-stage'):\n event_stage_reference = event_stage_reference_div['data-stage']\n sas_event = db.session.query(SASEvent).filter(SASEvent.\n event_id == event_id).first()\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == 'Overall Results') & (\n EventStage.event_id == sas_event.event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db_event_stage = EventStage('Overall Results',\n sas_event.event_id)\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.commit()\n\n\ndef get_results(event_reference):\n url = (\n '%s/participants/event-results/add-results?stage_id=%s&from=0&count=9999'\n % (DESTINATION_URL, event_reference))\n pprint(url)\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n return\n content = page.read().decode('utf-8')\n json_content = json.loads(content)\n json_results = json_content['rows']\n return json_results\n\n\ndef write_stage_results(stage_reference, stage_id, stage_type):\n results = get_results(stage_reference)\n category_stage_id = None\n event_stage_id = None\n if stage_type == 'event':\n event_stage_id = stage_id\n elif stage_type == 'category':\n category_stage_id = stage_id\n if results:\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.\n position == result['overall_pos']) & (Result.\n gender_position == result['gender_pos']) & (Result.time ==\n result['time_taken_seconds']) & (Result.event_stage_id ==\n event_stage_id) & (Result.category_stage_id ==\n category_stage_id))\n if not db.session.query(db_result_check.exists()).scalar():\n if stage_type == 'category':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, category_stage_id, None)\n elif stage_type == 'event':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], event_stage_id, None, None)\n db.session.add(db_result)\n db.session.commit()\n\n\ndef write_category_results(category_reference, category_id):\n results = get_results(category_reference)\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.position ==\n result['overall_pos']) & (Result.gender_position == result[\n 'gender_pos']) & (Result.time == result['time_taken_seconds']) &\n (Result.category_id == category_id)).first()\n if not db_result_check:\n db_category_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, None, category_id)\n db.session.add(db_category_result)\n db.session.commit()\n\n\ndef get_participant(result):\n if result['date_of_birth']:\n birth_date = datetime.strptime(result['date_of_birth'], '%Y-%m-%d'\n ).date()\n else:\n birth_date = None\n db_participant_check = db.session.query(Participant).filter((\n Participant.first_name == result['first_name']) & (Participant.\n last_name == result['last_name']) & (Participant.sex == result[\n 'person_sex']) & (Participant.birth_date == birth_date))\n if not db.session.query(db_participant_check.exists()).scalar():\n db_participant = Participant(result['first_name'], result[\n 'last_name'], result['person_sex'], birth_date)\n db.session.add(db_participant)\n db.session.commit()\n return db_participant.id\n else:\n return db_participant_check.first().id\n",
"step-2": "<mask token>\n\n\ndef scrape_sas():\n pprint('Scraping Events')\n get_mtb_events()\n pprint('Getting categories and stages')\n for event in db.session.query(SASEvent):\n pprint(event.event_id)\n get_categories_and_stages(event.event_reference, event.event_id)\n for event_stage in db.session.query(SASEventStage):\n pprint('Getting event stage results')\n base_event_stage = db.session.query(EventStage).filter(EventStage.\n id == event_stage.event_stage_id).first()\n if base_event_stage.results:\n pprint('Event has results')\n else:\n write_stage_results(event_stage.stage_reference, event_stage.\n event_stage_id, 'event')\n for category_stage in db.session.query(SASCategoryStage):\n pprint('Getting category stage results')\n base_category_stage = db.session.query(CategoryStage).filter(\n CategoryStage.id == category_stage.category_stage_id).first()\n if base_category_stage.results:\n pprint('Category stage has results')\n else:\n write_stage_results(category_stage.stage_reference,\n category_stage.category_stage_id, 'category')\n for category in db.session.query(SASCategory):\n pprint('Getting category results')\n base_category = db.session.query(Category).filter(Category.id ==\n category.category_id).first()\n if base_category.results:\n pprint('Category has results')\n elif not base_category.category_stages:\n write_category_results(category.stage_reference, category.id)\n else:\n pprint('No results but has category stages')\n pprint('Scrape Complete')\n\n\n<mask token>\n\n\ndef get_categories_and_stages(event_reference, event_id):\n event = db.session.query(Event).filter(Event.id == event_id).first()\n if event.categories or event.event_stages:\n pprint('Event Exists')\n else:\n url = DESTINATION_URL + event_reference\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.URLError):\n return\n soup = BeautifulSoup(page, 'html.parser')\n check_stages = get_categories(soup, event_id)\n\n\ndef get_categories(soup, event_id):\n category_div = soup.find('div', attrs={'id': 'category_container'})\n if category_div:\n divs = category_div.find_all('div')\n for div in divs:\n if div.has_attr('data-event-category-id'):\n category_reference = div['data-event-category-id']\n category_name = div['data-loading-text']\n category_own_stage_reference = div['data-event-stage-id']\n db_category = Category(category_name, event_id)\n db_category_check = db.session.query(Category.name).filter(\n (Category.name == category_name) & (Category.event_id ==\n event_id))\n db_sas_category_check = db.session.query(SASCategory).filter(\n (SASCategory.category_reference == category_reference) &\n (SASCategory.stage_reference ==\n category_own_stage_reference))\n if not db.session.query(db_category_check.exists()).scalar():\n db.session.add(db_category)\n db.session.flush()\n if not db.session.query(db_sas_category_check.exists()\n ).scalar():\n db_sas_category = SASCategory(category_reference,\n category_own_stage_reference, db_category.id)\n db.session.add(db_sas_category)\n db.session.flush()\n db.session.commit()\n if div['data-multiple-event-stages'] == '1':\n get_category_stages(soup, db_category.id,\n category_reference)\n else:\n get_event_stages(soup, event_id)\n\n\ndef get_category_stages(soup, category_id, category_reference):\n stage_group_div = soup.find('div', attrs={'id': 'ec_' + category_reference}\n )\n stage_divs = stage_group_div.find_all('div')\n for stage_div in stage_divs:\n if stage_div.has_attr('data-stage-id'):\n category_stage_reference = stage_div['data-stage-id']\n category_stage_name = stage_div['data-loading-text']\n db_category_stage = CategoryStage(category_stage_name, category_id)\n db_category_stage_check = db.session.query(CategoryStage.name\n ).filter((CategoryStage.name == category_stage_name) & (\n CategoryStage.category_id == category_id))\n if not db.session.query(db_category_stage_check.exists()).scalar():\n db.session.add(db_category_stage)\n db.session.flush()\n db_sas_category_stage = SASCategoryStage(db_category_stage.\n id, category_stage_reference)\n db.session.add(db_sas_category_stage)\n db.session.flush()\n db.session.commit()\n\n\ndef get_event_stages(soup, event_id):\n all_event_stage_divs = soup.find('div', class_=\n 'row categories_stages event-sub-types')\n if all_event_stage_divs:\n event_stage_divs = all_event_stage_divs.find_all('div')\n for event_stage_div in event_stage_divs:\n if event_stage_div.has_attr('data-stage-id'):\n event_stage_reference = event_stage_div['data-stage-id']\n event_stage_name = event_stage_div['data-loading-text']\n db_event_stage = EventStage(event_stage_name, event_id)\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == event_stage_name) & (\n EventStage.event_id == event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.flush()\n db.session.commit()\n else:\n event_stage_reference_div = soup.find('div', class_=\n 'result-row load-results')\n if event_stage_reference_div:\n if event_stage_reference_div.has_attr('data-stage'):\n event_stage_reference = event_stage_reference_div['data-stage']\n sas_event = db.session.query(SASEvent).filter(SASEvent.\n event_id == event_id).first()\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == 'Overall Results') & (\n EventStage.event_id == sas_event.event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db_event_stage = EventStage('Overall Results',\n sas_event.event_id)\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.commit()\n\n\ndef get_results(event_reference):\n url = (\n '%s/participants/event-results/add-results?stage_id=%s&from=0&count=9999'\n % (DESTINATION_URL, event_reference))\n pprint(url)\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n return\n content = page.read().decode('utf-8')\n json_content = json.loads(content)\n json_results = json_content['rows']\n return json_results\n\n\ndef write_stage_results(stage_reference, stage_id, stage_type):\n results = get_results(stage_reference)\n category_stage_id = None\n event_stage_id = None\n if stage_type == 'event':\n event_stage_id = stage_id\n elif stage_type == 'category':\n category_stage_id = stage_id\n if results:\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.\n position == result['overall_pos']) & (Result.\n gender_position == result['gender_pos']) & (Result.time ==\n result['time_taken_seconds']) & (Result.event_stage_id ==\n event_stage_id) & (Result.category_stage_id ==\n category_stage_id))\n if not db.session.query(db_result_check.exists()).scalar():\n if stage_type == 'category':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, category_stage_id, None)\n elif stage_type == 'event':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], event_stage_id, None, None)\n db.session.add(db_result)\n db.session.commit()\n\n\ndef write_category_results(category_reference, category_id):\n results = get_results(category_reference)\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.position ==\n result['overall_pos']) & (Result.gender_position == result[\n 'gender_pos']) & (Result.time == result['time_taken_seconds']) &\n (Result.category_id == category_id)).first()\n if not db_result_check:\n db_category_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, None, category_id)\n db.session.add(db_category_result)\n db.session.commit()\n\n\ndef get_participant(result):\n if result['date_of_birth']:\n birth_date = datetime.strptime(result['date_of_birth'], '%Y-%m-%d'\n ).date()\n else:\n birth_date = None\n db_participant_check = db.session.query(Participant).filter((\n Participant.first_name == result['first_name']) & (Participant.\n last_name == result['last_name']) & (Participant.sex == result[\n 'person_sex']) & (Participant.birth_date == birth_date))\n if not db.session.query(db_participant_check.exists()).scalar():\n db_participant = Participant(result['first_name'], result[\n 'last_name'], result['person_sex'], birth_date)\n db.session.add(db_participant)\n db.session.commit()\n return db_participant.id\n else:\n return db_participant_check.first().id\n",
"step-3": "<mask token>\n\n\ndef scrape_sas():\n pprint('Scraping Events')\n get_mtb_events()\n pprint('Getting categories and stages')\n for event in db.session.query(SASEvent):\n pprint(event.event_id)\n get_categories_and_stages(event.event_reference, event.event_id)\n for event_stage in db.session.query(SASEventStage):\n pprint('Getting event stage results')\n base_event_stage = db.session.query(EventStage).filter(EventStage.\n id == event_stage.event_stage_id).first()\n if base_event_stage.results:\n pprint('Event has results')\n else:\n write_stage_results(event_stage.stage_reference, event_stage.\n event_stage_id, 'event')\n for category_stage in db.session.query(SASCategoryStage):\n pprint('Getting category stage results')\n base_category_stage = db.session.query(CategoryStage).filter(\n CategoryStage.id == category_stage.category_stage_id).first()\n if base_category_stage.results:\n pprint('Category stage has results')\n else:\n write_stage_results(category_stage.stage_reference,\n category_stage.category_stage_id, 'category')\n for category in db.session.query(SASCategory):\n pprint('Getting category results')\n base_category = db.session.query(Category).filter(Category.id ==\n category.category_id).first()\n if base_category.results:\n pprint('Category has results')\n elif not base_category.category_stages:\n write_category_results(category.stage_reference, category.id)\n else:\n pprint('No results but has category stages')\n pprint('Scrape Complete')\n\n\ndef get_mtb_events():\n for year in YEARS:\n url = (\n '%s/participants/event-results/fetch-series-by-type?event_type=%s&event_year=%d'\n % (DESTINATION_URL, MTB_EVENT_TYPE, year))\n try:\n page = urllib.request.urlopen(url)\n content = page.read().decode('utf-8')\n json_content = json.loads(content)\n soup = BeautifulSoup(json_content['HTML'], 'html.parser')\n anchors = soup.find_all('a')\n except (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n pass\n for anchor in anchors:\n event_reference = anchor['href']\n divs = anchor.find_all('div')\n for div in divs:\n if 'event-date' in div['class']:\n event_date = div.find(text=True)\n elif 'event-title' in div['class']:\n event_name = div.find(text=True)\n db_date = datetime.strptime(event_date, '%d %b %Y')\n db_event = Event(event_name, db_date)\n db_check = db.session.query(Event.title).filter(Event.title ==\n event_name)\n if not db.session.query(db_check.exists()).scalar():\n db.session.add(db_event)\n db.session.flush()\n sas_event = SASEvent(db_event.id, event_reference)\n db.session.add(sas_event)\n db.session.commit()\n\n\ndef get_categories_and_stages(event_reference, event_id):\n event = db.session.query(Event).filter(Event.id == event_id).first()\n if event.categories or event.event_stages:\n pprint('Event Exists')\n else:\n url = DESTINATION_URL + event_reference\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.URLError):\n return\n soup = BeautifulSoup(page, 'html.parser')\n check_stages = get_categories(soup, event_id)\n\n\ndef get_categories(soup, event_id):\n category_div = soup.find('div', attrs={'id': 'category_container'})\n if category_div:\n divs = category_div.find_all('div')\n for div in divs:\n if div.has_attr('data-event-category-id'):\n category_reference = div['data-event-category-id']\n category_name = div['data-loading-text']\n category_own_stage_reference = div['data-event-stage-id']\n db_category = Category(category_name, event_id)\n db_category_check = db.session.query(Category.name).filter(\n (Category.name == category_name) & (Category.event_id ==\n event_id))\n db_sas_category_check = db.session.query(SASCategory).filter(\n (SASCategory.category_reference == category_reference) &\n (SASCategory.stage_reference ==\n category_own_stage_reference))\n if not db.session.query(db_category_check.exists()).scalar():\n db.session.add(db_category)\n db.session.flush()\n if not db.session.query(db_sas_category_check.exists()\n ).scalar():\n db_sas_category = SASCategory(category_reference,\n category_own_stage_reference, db_category.id)\n db.session.add(db_sas_category)\n db.session.flush()\n db.session.commit()\n if div['data-multiple-event-stages'] == '1':\n get_category_stages(soup, db_category.id,\n category_reference)\n else:\n get_event_stages(soup, event_id)\n\n\ndef get_category_stages(soup, category_id, category_reference):\n stage_group_div = soup.find('div', attrs={'id': 'ec_' + category_reference}\n )\n stage_divs = stage_group_div.find_all('div')\n for stage_div in stage_divs:\n if stage_div.has_attr('data-stage-id'):\n category_stage_reference = stage_div['data-stage-id']\n category_stage_name = stage_div['data-loading-text']\n db_category_stage = CategoryStage(category_stage_name, category_id)\n db_category_stage_check = db.session.query(CategoryStage.name\n ).filter((CategoryStage.name == category_stage_name) & (\n CategoryStage.category_id == category_id))\n if not db.session.query(db_category_stage_check.exists()).scalar():\n db.session.add(db_category_stage)\n db.session.flush()\n db_sas_category_stage = SASCategoryStage(db_category_stage.\n id, category_stage_reference)\n db.session.add(db_sas_category_stage)\n db.session.flush()\n db.session.commit()\n\n\ndef get_event_stages(soup, event_id):\n all_event_stage_divs = soup.find('div', class_=\n 'row categories_stages event-sub-types')\n if all_event_stage_divs:\n event_stage_divs = all_event_stage_divs.find_all('div')\n for event_stage_div in event_stage_divs:\n if event_stage_div.has_attr('data-stage-id'):\n event_stage_reference = event_stage_div['data-stage-id']\n event_stage_name = event_stage_div['data-loading-text']\n db_event_stage = EventStage(event_stage_name, event_id)\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == event_stage_name) & (\n EventStage.event_id == event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.flush()\n db.session.commit()\n else:\n event_stage_reference_div = soup.find('div', class_=\n 'result-row load-results')\n if event_stage_reference_div:\n if event_stage_reference_div.has_attr('data-stage'):\n event_stage_reference = event_stage_reference_div['data-stage']\n sas_event = db.session.query(SASEvent).filter(SASEvent.\n event_id == event_id).first()\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == 'Overall Results') & (\n EventStage.event_id == sas_event.event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db_event_stage = EventStage('Overall Results',\n sas_event.event_id)\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.commit()\n\n\ndef get_results(event_reference):\n url = (\n '%s/participants/event-results/add-results?stage_id=%s&from=0&count=9999'\n % (DESTINATION_URL, event_reference))\n pprint(url)\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n return\n content = page.read().decode('utf-8')\n json_content = json.loads(content)\n json_results = json_content['rows']\n return json_results\n\n\ndef write_stage_results(stage_reference, stage_id, stage_type):\n results = get_results(stage_reference)\n category_stage_id = None\n event_stage_id = None\n if stage_type == 'event':\n event_stage_id = stage_id\n elif stage_type == 'category':\n category_stage_id = stage_id\n if results:\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.\n position == result['overall_pos']) & (Result.\n gender_position == result['gender_pos']) & (Result.time ==\n result['time_taken_seconds']) & (Result.event_stage_id ==\n event_stage_id) & (Result.category_stage_id ==\n category_stage_id))\n if not db.session.query(db_result_check.exists()).scalar():\n if stage_type == 'category':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, category_stage_id, None)\n elif stage_type == 'event':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], event_stage_id, None, None)\n db.session.add(db_result)\n db.session.commit()\n\n\ndef write_category_results(category_reference, category_id):\n results = get_results(category_reference)\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.position ==\n result['overall_pos']) & (Result.gender_position == result[\n 'gender_pos']) & (Result.time == result['time_taken_seconds']) &\n (Result.category_id == category_id)).first()\n if not db_result_check:\n db_category_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, None, category_id)\n db.session.add(db_category_result)\n db.session.commit()\n\n\ndef get_participant(result):\n if result['date_of_birth']:\n birth_date = datetime.strptime(result['date_of_birth'], '%Y-%m-%d'\n ).date()\n else:\n birth_date = None\n db_participant_check = db.session.query(Participant).filter((\n Participant.first_name == result['first_name']) & (Participant.\n last_name == result['last_name']) & (Participant.sex == result[\n 'person_sex']) & (Participant.birth_date == birth_date))\n if not db.session.query(db_participant_check.exists()).scalar():\n db_participant = Participant(result['first_name'], result[\n 'last_name'], result['person_sex'], birth_date)\n db.session.add(db_participant)\n db.session.commit()\n return db_participant.id\n else:\n return db_participant_check.first().id\n",
"step-4": "from bs4 import BeautifulSoup\nfrom pprint import pprint\nfrom scraper.sas.sas_models import SASEvent, SASCategory, SASCategoryStage, SASEventStage\nfrom scraper.base_models.models import Event, Category, CategoryStage, EventStage, Participant, Result\nfrom scraper.sas.sas_config import DESTINATION_URL, MTB_EVENT_TYPE, YEARS\nfrom scraper import db\nfrom datetime import datetime\nimport urllib\nimport json\nimport time\n\n\ndef scrape_sas():\n pprint('Scraping Events')\n get_mtb_events()\n pprint('Getting categories and stages')\n for event in db.session.query(SASEvent):\n pprint(event.event_id)\n get_categories_and_stages(event.event_reference, event.event_id)\n for event_stage in db.session.query(SASEventStage):\n pprint('Getting event stage results')\n base_event_stage = db.session.query(EventStage).filter(EventStage.\n id == event_stage.event_stage_id).first()\n if base_event_stage.results:\n pprint('Event has results')\n else:\n write_stage_results(event_stage.stage_reference, event_stage.\n event_stage_id, 'event')\n for category_stage in db.session.query(SASCategoryStage):\n pprint('Getting category stage results')\n base_category_stage = db.session.query(CategoryStage).filter(\n CategoryStage.id == category_stage.category_stage_id).first()\n if base_category_stage.results:\n pprint('Category stage has results')\n else:\n write_stage_results(category_stage.stage_reference,\n category_stage.category_stage_id, 'category')\n for category in db.session.query(SASCategory):\n pprint('Getting category results')\n base_category = db.session.query(Category).filter(Category.id ==\n category.category_id).first()\n if base_category.results:\n pprint('Category has results')\n elif not base_category.category_stages:\n write_category_results(category.stage_reference, category.id)\n else:\n pprint('No results but has category stages')\n pprint('Scrape Complete')\n\n\ndef get_mtb_events():\n for year in YEARS:\n url = (\n '%s/participants/event-results/fetch-series-by-type?event_type=%s&event_year=%d'\n % (DESTINATION_URL, MTB_EVENT_TYPE, year))\n try:\n page = urllib.request.urlopen(url)\n content = page.read().decode('utf-8')\n json_content = json.loads(content)\n soup = BeautifulSoup(json_content['HTML'], 'html.parser')\n anchors = soup.find_all('a')\n except (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n pass\n for anchor in anchors:\n event_reference = anchor['href']\n divs = anchor.find_all('div')\n for div in divs:\n if 'event-date' in div['class']:\n event_date = div.find(text=True)\n elif 'event-title' in div['class']:\n event_name = div.find(text=True)\n db_date = datetime.strptime(event_date, '%d %b %Y')\n db_event = Event(event_name, db_date)\n db_check = db.session.query(Event.title).filter(Event.title ==\n event_name)\n if not db.session.query(db_check.exists()).scalar():\n db.session.add(db_event)\n db.session.flush()\n sas_event = SASEvent(db_event.id, event_reference)\n db.session.add(sas_event)\n db.session.commit()\n\n\ndef get_categories_and_stages(event_reference, event_id):\n event = db.session.query(Event).filter(Event.id == event_id).first()\n if event.categories or event.event_stages:\n pprint('Event Exists')\n else:\n url = DESTINATION_URL + event_reference\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.URLError):\n return\n soup = BeautifulSoup(page, 'html.parser')\n check_stages = get_categories(soup, event_id)\n\n\ndef get_categories(soup, event_id):\n category_div = soup.find('div', attrs={'id': 'category_container'})\n if category_div:\n divs = category_div.find_all('div')\n for div in divs:\n if div.has_attr('data-event-category-id'):\n category_reference = div['data-event-category-id']\n category_name = div['data-loading-text']\n category_own_stage_reference = div['data-event-stage-id']\n db_category = Category(category_name, event_id)\n db_category_check = db.session.query(Category.name).filter(\n (Category.name == category_name) & (Category.event_id ==\n event_id))\n db_sas_category_check = db.session.query(SASCategory).filter(\n (SASCategory.category_reference == category_reference) &\n (SASCategory.stage_reference ==\n category_own_stage_reference))\n if not db.session.query(db_category_check.exists()).scalar():\n db.session.add(db_category)\n db.session.flush()\n if not db.session.query(db_sas_category_check.exists()\n ).scalar():\n db_sas_category = SASCategory(category_reference,\n category_own_stage_reference, db_category.id)\n db.session.add(db_sas_category)\n db.session.flush()\n db.session.commit()\n if div['data-multiple-event-stages'] == '1':\n get_category_stages(soup, db_category.id,\n category_reference)\n else:\n get_event_stages(soup, event_id)\n\n\ndef get_category_stages(soup, category_id, category_reference):\n stage_group_div = soup.find('div', attrs={'id': 'ec_' + category_reference}\n )\n stage_divs = stage_group_div.find_all('div')\n for stage_div in stage_divs:\n if stage_div.has_attr('data-stage-id'):\n category_stage_reference = stage_div['data-stage-id']\n category_stage_name = stage_div['data-loading-text']\n db_category_stage = CategoryStage(category_stage_name, category_id)\n db_category_stage_check = db.session.query(CategoryStage.name\n ).filter((CategoryStage.name == category_stage_name) & (\n CategoryStage.category_id == category_id))\n if not db.session.query(db_category_stage_check.exists()).scalar():\n db.session.add(db_category_stage)\n db.session.flush()\n db_sas_category_stage = SASCategoryStage(db_category_stage.\n id, category_stage_reference)\n db.session.add(db_sas_category_stage)\n db.session.flush()\n db.session.commit()\n\n\ndef get_event_stages(soup, event_id):\n all_event_stage_divs = soup.find('div', class_=\n 'row categories_stages event-sub-types')\n if all_event_stage_divs:\n event_stage_divs = all_event_stage_divs.find_all('div')\n for event_stage_div in event_stage_divs:\n if event_stage_div.has_attr('data-stage-id'):\n event_stage_reference = event_stage_div['data-stage-id']\n event_stage_name = event_stage_div['data-loading-text']\n db_event_stage = EventStage(event_stage_name, event_id)\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == event_stage_name) & (\n EventStage.event_id == event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.flush()\n db.session.commit()\n else:\n event_stage_reference_div = soup.find('div', class_=\n 'result-row load-results')\n if event_stage_reference_div:\n if event_stage_reference_div.has_attr('data-stage'):\n event_stage_reference = event_stage_reference_div['data-stage']\n sas_event = db.session.query(SASEvent).filter(SASEvent.\n event_id == event_id).first()\n db_event_stage_check = db.session.query(EventStage.name\n ).filter((EventStage.name == 'Overall Results') & (\n EventStage.event_id == sas_event.event_id))\n if not db.session.query(db_event_stage_check.exists()).scalar(\n ):\n db_event_stage = EventStage('Overall Results',\n sas_event.event_id)\n db.session.add(db_event_stage)\n db.session.flush()\n db_sas_event_stage = SASEventStage(db_event_stage.id,\n event_stage_reference)\n db.session.add(db_sas_event_stage)\n db.session.commit()\n\n\ndef get_results(event_reference):\n url = (\n '%s/participants/event-results/add-results?stage_id=%s&from=0&count=9999'\n % (DESTINATION_URL, event_reference))\n pprint(url)\n try:\n page = urllib.request.urlopen(url)\n except (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n return\n content = page.read().decode('utf-8')\n json_content = json.loads(content)\n json_results = json_content['rows']\n return json_results\n\n\ndef write_stage_results(stage_reference, stage_id, stage_type):\n results = get_results(stage_reference)\n category_stage_id = None\n event_stage_id = None\n if stage_type == 'event':\n event_stage_id = stage_id\n elif stage_type == 'category':\n category_stage_id = stage_id\n if results:\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.\n position == result['overall_pos']) & (Result.\n gender_position == result['gender_pos']) & (Result.time ==\n result['time_taken_seconds']) & (Result.event_stage_id ==\n event_stage_id) & (Result.category_stage_id ==\n category_stage_id))\n if not db.session.query(db_result_check.exists()).scalar():\n if stage_type == 'category':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, category_stage_id, None)\n elif stage_type == 'event':\n db_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], event_stage_id, None, None)\n db.session.add(db_result)\n db.session.commit()\n\n\ndef write_category_results(category_reference, category_id):\n results = get_results(category_reference)\n for result in results:\n participant_id = get_participant(result)\n db_result_check = db.session.query(Result).filter((Result.position ==\n result['overall_pos']) & (Result.gender_position == result[\n 'gender_pos']) & (Result.time == result['time_taken_seconds']) &\n (Result.category_id == category_id)).first()\n if not db_result_check:\n db_category_result = Result(result['overall_pos'],\n participant_id, result['gender_pos'], result[\n 'time_taken_seconds'], None, None, category_id)\n db.session.add(db_category_result)\n db.session.commit()\n\n\ndef get_participant(result):\n if result['date_of_birth']:\n birth_date = datetime.strptime(result['date_of_birth'], '%Y-%m-%d'\n ).date()\n else:\n birth_date = None\n db_participant_check = db.session.query(Participant).filter((\n Participant.first_name == result['first_name']) & (Participant.\n last_name == result['last_name']) & (Participant.sex == result[\n 'person_sex']) & (Participant.birth_date == birth_date))\n if not db.session.query(db_participant_check.exists()).scalar():\n db_participant = Participant(result['first_name'], result[\n 'last_name'], result['person_sex'], birth_date)\n db.session.add(db_participant)\n db.session.commit()\n return db_participant.id\n else:\n return db_participant_check.first().id\n",
"step-5": "from bs4 import BeautifulSoup\nfrom pprint import pprint \nfrom scraper.sas.sas_models import SASEvent, SASCategory, SASCategoryStage, SASEventStage\nfrom scraper.base_models.models import Event, Category, CategoryStage, EventStage, Participant, Result\nfrom scraper.sas.sas_config import DESTINATION_URL, MTB_EVENT_TYPE, YEARS\nfrom scraper import db\nfrom datetime import datetime\nimport urllib\nimport json \nimport time\n\ndef scrape_sas():\n\tpprint(\"Scraping Events\")\n\tget_mtb_events()\n\tpprint(\"Getting categories and stages\")\n\tfor event in db.session.query(SASEvent):\n\t\tpprint(event.event_id)\n\t\tget_categories_and_stages(event.event_reference, event.event_id)\n\t\t#time.sleep(2)\n\tfor event_stage in db.session.query(SASEventStage):\n\t\tpprint(\"Getting event stage results\")\n\t\tbase_event_stage = db.session.query(EventStage).filter(EventStage.id==event_stage.event_stage_id).first()\n\t\tif (base_event_stage.results):\n\t\t\tpprint(\"Event has results\")\n\t\telse:\n\t\t\twrite_stage_results(event_stage.stage_reference, event_stage.event_stage_id, \"event\")\n\tfor category_stage in db.session.query(SASCategoryStage):\n\t\tpprint(\"Getting category stage results\")\n\t\tbase_category_stage = db.session.query(CategoryStage).filter(CategoryStage.id==category_stage.category_stage_id).first()\n\t\tif (base_category_stage.results):\n\t\t\tpprint(\"Category stage has results\")\n\t\telse: \n\t\t\twrite_stage_results(category_stage.stage_reference, category_stage.category_stage_id, \"category\")\n\tfor category in db.session.query(SASCategory):\n\t\tpprint(\"Getting category results\")\n\t\tbase_category = db.session.query(Category).filter(Category.id==category.category_id).first()\n\t\tif (base_category.results):\n\t\t\tpprint(\"Category has results\")\n\t\telse: \n\t\t\tif (not base_category.category_stages):\n\t\t\t\twrite_category_results(category.stage_reference, category.id)\n\t\t\telse:\n\t\t\t\tpprint(\"No results but has category stages\")\n\tpprint(\"Scrape Complete\")\n\ndef get_mtb_events(): \n\tfor year in YEARS: \n\t\turl = (\"%s/participants/event-results/fetch-series-by-type?event_type=%s&event_year=%d\" % \n\t\t\t (DESTINATION_URL, MTB_EVENT_TYPE, year))\n\t\ttry: \n\t\t\tpage = urllib.request.urlopen(url)\n\t\t\tcontent = page.read().decode(\"utf-8\")\n\t\t\tjson_content = json.loads(content)\n\t\t\tsoup = BeautifulSoup(json_content['HTML'], \"html.parser\")\n\t\t\tanchors = soup.find_all('a')\n\t\texcept (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n\t\t\tpass\n\t\tfor anchor in anchors: \n\t\t\tevent_reference = anchor[\"href\"]\n\t\t\tdivs = anchor.find_all('div')\n\t\t\tfor div in divs:\n\t\t\t\tif (\"event-date\" in div[\"class\"]):\n\t\t\t\t\tevent_date = (div.find(text=True))\n\t\t\t\telif (\"event-title\" in div[\"class\"]):\n\t\t\t\t\tevent_name = (div.find(text=True))\n\t\t\tdb_date = datetime.strptime(event_date, '%d %b %Y')\n\t\t\tdb_event = Event(event_name, db_date)\n\t\t\tdb_check = db.session.query(Event.title).filter(Event.title==event_name)\n\t\t\tif not (db.session.query(db_check.exists()).scalar()):\n\t\t\t\tdb.session.add(db_event)\n\t\t\t\tdb.session.flush()\n\t\t\t\tsas_event = SASEvent(db_event.id, event_reference)\n\t\t\t\tdb.session.add(sas_event)\n\t\t\t\tdb.session.commit()\n\ndef get_categories_and_stages(event_reference, event_id):\n\tevent = db.session.query(Event).filter(Event.id==event_id).first()\n\tif (event.categories or event.event_stages):\n\t\tpprint(\"Event Exists\")\n\telse: \n\t\turl = (DESTINATION_URL + event_reference)\n\t\ttry: \n\t\t\tpage = urllib.request.urlopen(url)\n\t\texcept (urllib.error.HTTPError, urllib.error.URLError):\n\t\t\treturn\n\t\tsoup = BeautifulSoup(page, \"html.parser\")\n\t\tcheck_stages = get_categories(soup, event_id)\n\ndef get_categories(soup, event_id):\n\tcategory_div = soup.find('div', attrs={\"id\" : \"category_container\"})\n\t#Check to see if event has categories first\n\tif category_div:\n\t\tdivs = category_div.find_all('div')\n\t\tfor div in divs: \n\t\t\tif div.has_attr(\"data-event-category-id\"):\n\t\t\t\t#Event has categories\n\t\t\t\tcategory_reference = div[\"data-event-category-id\"]\n\t\t\t\tcategory_name = div[\"data-loading-text\"]\n\t\t\t\tcategory_own_stage_reference = div[\"data-event-stage-id\"]\n\t\t\t\tdb_category = Category(category_name, event_id)\n\t\t\t\t#Check both name and event id to allow duplicate names \n\t\t\t\tdb_category_check = db.session.query(Category.name).filter(\n\t\t\t\t(Category.name==category_name) &\n\t\t\t\t(Category.event_id==event_id))\n\t\t\t\t#Check SAS category for duplicates as well \n\t\t\t\tdb_sas_category_check = db.session.query(SASCategory).filter(\n\t\t\t\t(SASCategory.category_reference==category_reference) &\n\t\t\t\t(SASCategory.stage_reference==category_own_stage_reference))\n\t\t\t\tif not (db.session.query(db_category_check.exists()).scalar()):\n\t\t\t\t\tdb.session.add(db_category)\n\t\t\t\t\tdb.session.flush()\n\t\t\t\t\tif not (db.session.query(db_sas_category_check.exists()).scalar()):\n\t\t\t\t\t\tdb_sas_category = SASCategory(category_reference, category_own_stage_reference, db_category.id)\n\t\t\t\t\t\tdb.session.add(db_sas_category)\n\t\t\t\t\t\tdb.session.flush()\n\t\t\t\t\t\tdb.session.commit()\t\t\t\n\t\t\t\t\tif (div[\"data-multiple-event-stages\"] == \"1\"):\n\t\t\t\t\t\t#Event has stages with their own categories\n\t\t\t\t\t\tget_category_stages(soup, db_category.id, category_reference)\n\telse:\n\t\t#Event does not have categories\n\t\tget_event_stages(soup, event_id)\n\n\ndef get_category_stages(soup, category_id, category_reference):\n\tstage_group_div = soup.find('div', attrs={\"id\" : (\"ec_\" + category_reference)})\n\tstage_divs = stage_group_div.find_all('div')\n\tfor stage_div in stage_divs: \n\t\tif stage_div.has_attr(\"data-stage-id\"):\n\t\t\tcategory_stage_reference = stage_div[\"data-stage-id\"]\n\t\t\tcategory_stage_name = stage_div[\"data-loading-text\"]\n\t\t\tdb_category_stage = CategoryStage(category_stage_name, category_id)\n\t\t\t#Check both name and category id to allow duplicate names \n\t\t\tdb_category_stage_check = db.session.query(CategoryStage.name).filter(\n\t\t\t\t(CategoryStage.name==category_stage_name) &\n\t\t\t\t(CategoryStage.category_id==category_id))\n\t\t\tif not (db.session.query(db_category_stage_check.exists()).scalar()):\n\t\t\t\tdb.session.add(db_category_stage)\n\t\t\t\tdb.session.flush()\n\t\t\t\tdb_sas_category_stage = SASCategoryStage(db_category_stage.id, category_stage_reference)\n\t\t\t\tdb.session.add(db_sas_category_stage)\n\t\t\t\tdb.session.flush()\n\t\t\t\tdb.session.commit()\n\ndef get_event_stages(soup, event_id):\n\tall_event_stage_divs = soup.find('div', class_ = \"row categories_stages event-sub-types\")\n\t#Check if event has stages\n\tif all_event_stage_divs:\n\t\tevent_stage_divs = all_event_stage_divs.find_all ('div')\n\t\tfor event_stage_div in event_stage_divs: \n\t\t\tif event_stage_div.has_attr(\"data-stage-id\"):\n\t\t\t\t#Event has stages and no categories\n\t\t\t\tevent_stage_reference = event_stage_div[\"data-stage-id\"]\n\t\t\t\tevent_stage_name = event_stage_div[\"data-loading-text\"]\n\t\t\t\tdb_event_stage = EventStage(event_stage_name, event_id)\n\t\t\t\t#Check if it exists by name and ID and add if it doesn't\n\t\t\t\tdb_event_stage_check = db.session.query(EventStage.name).filter(\n\t\t\t\t\t(EventStage.name==event_stage_name) &\n\t\t\t\t\t(EventStage.event_id==event_id))\n\t\t\t\tif not (db.session.query(db_event_stage_check.exists()).scalar()):\n\t\t\t\t\tdb.session.add(db_event_stage)\n\t\t\t\t\tdb.session.flush()\n\t\t\t\t\tdb_sas_event_stage = SASEventStage(db_event_stage.id, event_stage_reference)\n\t\t\t\t\tdb.session.add(db_sas_event_stage)\n\t\t\t\t\tdb.session.flush()\n\t\t\t\t\tdb.session.commit()\n\telse: \n\t\t#Event has no stages or categories\n\t\t#create new stage for just the overall results, unless event has no results\n\t\tevent_stage_reference_div = soup.find('div', class_ = \"result-row load-results\")\n\t\tif event_stage_reference_div:\n\t\t\tif event_stage_reference_div.has_attr(\"data-stage\"):\n\t\t\t\tevent_stage_reference = event_stage_reference_div[\"data-stage\"]\n\t\t\t\tsas_event = db.session.query(SASEvent).filter(SASEvent.event_id==event_id).first()\n\t\t\t\tdb_event_stage_check = db.session.query(EventStage.name).filter(\n\t\t\t\t\t(EventStage.name==\"Overall Results\") &\n\t\t\t\t\t(EventStage.event_id==sas_event.event_id))\n\t\t\t\tif not (db.session.query(db_event_stage_check.exists()).scalar()):\n\t\t\t\t\tdb_event_stage = EventStage(\"Overall Results\", sas_event.event_id)\n\t\t\t\t\tdb.session.add(db_event_stage)\n\t\t\t\t\tdb.session.flush()\n\t\t\t\t\tdb_sas_event_stage = SASEventStage(db_event_stage.id, event_stage_reference)\n\t\t\t\t\tdb.session.add(db_sas_event_stage)\n\t\t\t\t\tdb.session.commit()\n\ndef get_results(event_reference): \n\turl = (\"%s/participants/event-results/add-results?stage_id=%s&from=0&count=9999\" % \n\t\t\t (DESTINATION_URL, event_reference))\n\tpprint(url)\n\ttry: \n\t\tpage = urllib.request.urlopen(url)\n\texcept (urllib.error.HTTPError, urllib.error.ConnectionResetError):\n\t\treturn\n\tcontent = page.read().decode(\"utf-8\")\n\tjson_content = json.loads(content)\n\tjson_results = json_content['rows']\n\treturn json_results\n\ndef write_stage_results(stage_reference, stage_id, stage_type):\n\tresults = get_results(stage_reference)\n\tcategory_stage_id = None\n\tevent_stage_id = None\n\tif (stage_type==\"event\"):\n\t\tevent_stage_id = stage_id\n\telif (stage_type==\"category\"):\n\t\tcategory_stage_id = stage_id\n\tif results:\n\t\tfor result in results: \n\t\t\tparticipant_id = get_participant(result)\n\t\t\tdb_result_check = db.session.query(Result).filter(\n\t\t\t\t(Result.position==result['overall_pos']) &\n\t\t\t\t(Result.gender_position==result['gender_pos']) & \n\t\t\t\t(Result.time==result['time_taken_seconds']) & \n\t\t\t\t(Result.event_stage_id==event_stage_id) &\n\t\t\t\t(Result.category_stage_id==category_stage_id))\n\t\t\tif not (db.session.query(db_result_check.exists()).scalar()):\n\t\t\t\tif (stage_type==\"category\"): \n\t\t\t\t\tdb_result = Result(result['overall_pos'], participant_id, result['gender_pos'],\n\t\t\t\t\tresult['time_taken_seconds'], None, category_stage_id, None)\n\t\t\t\telif (stage_type==\"event\"):\n\t\t\t\t\tdb_result = Result(result['overall_pos'], participant_id, result['gender_pos'],\n\t\t\t\t result['time_taken_seconds'], event_stage_id, None, None)\n\t\t\t\tdb.session.add(db_result)\n\t\t\t\tdb.session.commit()\n\ndef write_category_results(category_reference, category_id):\n\tresults = get_results(category_reference)\n\tfor result in results: \n\t\tparticipant_id = get_participant(result)\n\n\t\tdb_result_check = db.session.query(Result).filter(\n\t\t\t(Result.position==result['overall_pos']) &\n\t\t\t(Result.gender_position==result['gender_pos']) & \n\t\t\t(Result.time==result['time_taken_seconds']) & \n\t\t\t(Result.category_id==category_id)).first()\n\t\tif not db_result_check:\n\t\t\tdb_category_result = Result(result['overall_pos'], participant_id,\n\t\t\tresult['gender_pos'], result['time_taken_seconds'], None, None, category_id)\n\t\t\tdb.session.add(db_category_result)\n\t\t\tdb.session.commit()\n\ndef get_participant(result):\n\tif result['date_of_birth']:\n\t\tbirth_date = datetime.strptime(result['date_of_birth'], '%Y-%m-%d').date()\n\telse:\n\t\tbirth_date = None\n\tdb_participant_check = db.session.query(Participant).filter(\n\t\t(Participant.first_name==result['first_name']) &\n\t\t(Participant.last_name==result['last_name']) & \n\t\t(Participant.sex==result['person_sex']) & \n\t\t(Participant.birth_date==birth_date))\n\tif not (db.session.query(db_participant_check.exists()).scalar()):\n\t\tdb_participant = Participant(result['first_name'], result['last_name'],\n\t\tresult['person_sex'], birth_date)\n\t\tdb.session.add(db_participant)\n\t\tdb.session.commit()\n\t\treturn db_participant.id\n\telse: \n\t\treturn db_participant_check.first().id\n\n\n\n",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
assert len(kwic.kwic(mystr)) == 3
<|reserved_special_token_1|>
<|reserved_special_token_0|>
mystr = """hello world
my test
apples oranges"""
assert len(kwic.kwic(mystr)) == 3
<|reserved_special_token_1|>
import kwic
mystr = """hello world
my test
apples oranges"""
assert len(kwic.kwic(mystr)) == 3
<|reserved_special_token_1|>
import kwic
mystr = "hello world\nmy test\napples oranges"
#asseirt(kwic0.kwic(mystr) == [])
#assert(kwic1.kwic(mystr) == [mystr])
#assert(len(kwic3.kwic(mystr))==2)
assert len(kwic.kwic(mystr)) == 3
|
flexible
|
{
"blob_id": "1f21fdc9a198b31bb0d5bd6dd8f46a1b3b28ec94",
"index": 6773,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nassert len(kwic.kwic(mystr)) == 3\n",
"step-3": "<mask token>\nmystr = \"\"\"hello world\nmy test\napples oranges\"\"\"\nassert len(kwic.kwic(mystr)) == 3\n",
"step-4": "import kwic\nmystr = \"\"\"hello world\nmy test\napples oranges\"\"\"\nassert len(kwic.kwic(mystr)) == 3\n",
"step-5": "import kwic\n\n\nmystr = \"hello world\\nmy test\\napples oranges\"\n#asseirt(kwic0.kwic(mystr) == [])\n#assert(kwic1.kwic(mystr) == [mystr])\n#assert(len(kwic3.kwic(mystr))==2)\nassert len(kwic.kwic(mystr)) == 3\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# import tensorflow as tf
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets('/tmp/data/',one_hot=True)
# def build_CNN_clasifier(x):
# x_image = tf.reshape (x, [-1,28,28,1])
#
# #layer1
# w_conv1 = tf.Variable(tf.truncated_normal(shape = [5,5,1,32],stddev= 5e-2))
# b_conv1 = tf.Variable(tf.constant(0.1,shape=[32]))
# h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image,w_conv1,stride=[1,1,1,1,],padding='SAME')+b_conv1)
# h_pool1 = tf.nn.max_pool(h_conv1,ksize=[1,2,2,1],strides = [1,2,2,1],padding='SAME')
#
# #layer2
# w_conv2 = tf.Variable(tf.truncated_normal(shape=[5,5,32,64],stddev = 5e-2))
# b_conv2 = tf.Variable(tf.constant(0.1,shape=[64]))
# h_conv2 = tf.nn.relu(tf.nn.conv2d(h_conv1,w_conv2,strides=[1,1,1,1],padding='SAME')+b_conv2)
#
# h_pool2 = tf.nn.max_pool(h_conv2,ksize=[1,2,2,1],strides= [1,2,2,1],padding='SAME')
#
# #fully-connected layer
# w_fc_1 = tf.Variable(tf.truncated_normal(shape=[7*7*64,1024],stddev=5e-2))
# b_fc_1 = tf.Variable(tf.constant(0.1,shape=[1024]))
# h_pool2_flat= tf.reshape(h_pool2,[-1,7*7*64])
# h_fc_1 = tf.nn.relu(tf.matmul(h_pool2_flat,w_fc_1)+b_fc_1)
#
#
#
#
# with tf.Session() as sess:
# sess.run(x_image, feed_dict={x:mnist})
# print(x_image)
# print(x_image.shape)
import numpy as np
def conv1d(x, w, p=0, s=1):
w_rot = np.array(w[::-1])
x_padded = np.array(x)
if p > 0:
zero_pad = np.zeros(shape=p)
x_padded = np.concatenate([zero_pad, x_padded, zero_pad])
res = []
for i in range(0, int((len(x)+2*p-len(w))/s)+1):
j = s*i;
res.append(np.sum(x_padded[j:j+w_rot.shape[0]] * w_rot))
return np.array(res)
## Testing:
x = [1, 0, 2, 3, 0, 1, 1]
w = [2, 1, 3]
print('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))
print('Numpy Results: ', np.convolve(x, w, mode='valid'))
import tensorflow as tf
i = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')
k = tf.constant([2, 1, 3], dtype=tf.float32, name='k')
print(i, '\n', k, '\n')
data = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')
kernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')
print(data, '\n', kernel, '\n')
res = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))
#res = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'SAME'))
#res = tf.squeeze(tf.nn.conv1d(data, kernel, 2, 'SAME’))
#res = tf.nn.conv1d(data, kernel, 2, 'SAME')
with tf.Session() as sess:
print(sess.run(res))
print(sess.run(data))
|
normal
|
{
"blob_id": "a336434abc526357db0536955885cf076ee60f59",
"index": 7220,
"step-1": "<mask token>\n\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):\n j = s * i\n res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))\n return np.array(res)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):\n j = s * i\n res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))\n return np.array(res)\n\n\n<mask token>\nprint('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))\nprint('Numpy Results: ', np.convolve(x, w, mode='valid'))\n<mask token>\nprint(i, '\\n', k, '\\n')\n<mask token>\nprint(data, '\\n', kernel, '\\n')\n<mask token>\nwith tf.Session() as sess:\n print(sess.run(res))\n print(sess.run(data))\n",
"step-3": "<mask token>\n\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):\n j = s * i\n res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))\n return np.array(res)\n\n\nx = [1, 0, 2, 3, 0, 1, 1]\nw = [2, 1, 3]\nprint('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))\nprint('Numpy Results: ', np.convolve(x, w, mode='valid'))\n<mask token>\ni = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')\nk = tf.constant([2, 1, 3], dtype=tf.float32, name='k')\nprint(i, '\\n', k, '\\n')\ndata = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')\nkernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')\nprint(data, '\\n', kernel, '\\n')\nres = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))\nwith tf.Session() as sess:\n print(sess.run(res))\n print(sess.run(data))\n",
"step-4": "import numpy as np\n\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):\n j = s * i\n res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))\n return np.array(res)\n\n\nx = [1, 0, 2, 3, 0, 1, 1]\nw = [2, 1, 3]\nprint('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))\nprint('Numpy Results: ', np.convolve(x, w, mode='valid'))\nimport tensorflow as tf\ni = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')\nk = tf.constant([2, 1, 3], dtype=tf.float32, name='k')\nprint(i, '\\n', k, '\\n')\ndata = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')\nkernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')\nprint(data, '\\n', kernel, '\\n')\nres = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))\nwith tf.Session() as sess:\n print(sess.run(res))\n print(sess.run(data))\n",
"step-5": "# import tensorflow as tf\n\n# from tensorflow.examples.tutorials.mnist import input_data\n# mnist = input_data.read_data_sets('/tmp/data/',one_hot=True)\n# def build_CNN_clasifier(x):\n# x_image = tf.reshape (x, [-1,28,28,1])\n#\n# #layer1\n# w_conv1 = tf.Variable(tf.truncated_normal(shape = [5,5,1,32],stddev= 5e-2))\n# b_conv1 = tf.Variable(tf.constant(0.1,shape=[32]))\n# h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image,w_conv1,stride=[1,1,1,1,],padding='SAME')+b_conv1)\n# h_pool1 = tf.nn.max_pool(h_conv1,ksize=[1,2,2,1],strides = [1,2,2,1],padding='SAME')\n#\n# #layer2\n # w_conv2 = tf.Variable(tf.truncated_normal(shape=[5,5,32,64],stddev = 5e-2))\n # b_conv2 = tf.Variable(tf.constant(0.1,shape=[64]))\n # h_conv2 = tf.nn.relu(tf.nn.conv2d(h_conv1,w_conv2,strides=[1,1,1,1],padding='SAME')+b_conv2)\n #\n # h_pool2 = tf.nn.max_pool(h_conv2,ksize=[1,2,2,1],strides= [1,2,2,1],padding='SAME')\n #\n # #fully-connected layer\n # w_fc_1 = tf.Variable(tf.truncated_normal(shape=[7*7*64,1024],stddev=5e-2))\n # b_fc_1 = tf.Variable(tf.constant(0.1,shape=[1024]))\n # h_pool2_flat= tf.reshape(h_pool2,[-1,7*7*64])\n # h_fc_1 = tf.nn.relu(tf.matmul(h_pool2_flat,w_fc_1)+b_fc_1)\n #\n #\n #\n #\n # with tf.Session() as sess:\n # sess.run(x_image, feed_dict={x:mnist})\n # print(x_image)\n # print(x_image.shape)\n\n\nimport numpy as np\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x)+2*p-len(w))/s)+1):\n j = s*i;\n res.append(np.sum(x_padded[j:j+w_rot.shape[0]] * w_rot))\n\n return np.array(res)\n## Testing:\nx = [1, 0, 2, 3, 0, 1, 1]\nw = [2, 1, 3]\nprint('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))\nprint('Numpy Results: ', np.convolve(x, w, mode='valid'))\n\n\n\n\n\n\nimport tensorflow as tf\ni = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')\nk = tf.constant([2, 1, 3], dtype=tf.float32, name='k')\nprint(i, '\\n', k, '\\n')\ndata = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')\nkernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')\nprint(data, '\\n', kernel, '\\n')\nres = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))\n#res = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'SAME'))\n#res = tf.squeeze(tf.nn.conv1d(data, kernel, 2, 'SAME’))\n#res = tf.nn.conv1d(data, kernel, 2, 'SAME')\nwith tf.Session() as sess:\n print(sess.run(res))\n print(sess.run(data))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Instruction(QWidget):
<|reserved_special_token_0|>
def set_background_instruction(self):
img = QPixmap('../images/background_instruction.jpg')
self.background_instruction.setPixmap(img)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Instruction(QWidget):
def __init__(self):
super().__init__()
uic.loadUi('../ui/instruction.ui', self)
self.OK_btn.clicked.connect(self.show_game)
self.set_background_instruction()
def set_background_instruction(self):
img = QPixmap('../images/background_instruction.jpg')
self.background_instruction.setPixmap(img)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Instruction(QWidget):
def __init__(self):
super().__init__()
uic.loadUi('../ui/instruction.ui', self)
self.OK_btn.clicked.connect(self.show_game)
self.set_background_instruction()
def set_background_instruction(self):
img = QPixmap('../images/background_instruction.jpg')
self.background_instruction.setPixmap(img)
def show_game(self):
self.parent().show_game()
<|reserved_special_token_1|>
import sys
from PyQt5 import uic
from PyQt5.QtWidgets import QWidget
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap
class Instruction(QWidget):
def __init__(self):
super().__init__()
uic.loadUi('../ui/instruction.ui', self)
self.OK_btn.clicked.connect(self.show_game)
self.set_background_instruction()
def set_background_instruction(self):
img = QPixmap('../images/background_instruction.jpg')
self.background_instruction.setPixmap(img)
def show_game(self):
self.parent().show_game()
<|reserved_special_token_1|>
import sys
from PyQt5 import uic
from PyQt5.QtWidgets import QWidget
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap
class Instruction(QWidget):
def __init__(self):
super().__init__()
# Set UI file
uic.loadUi('../ui/instruction.ui', self)
# Connect handlers of buttons
self.OK_btn.clicked.connect(self.show_game)
self.set_background_instruction()
# Set background of the windows
def set_background_instruction(self):
img = QPixmap('../images/background_instruction.jpg')
self.background_instruction.setPixmap(img)
# Show window of the game
def show_game(self):
self.parent().show_game()
|
flexible
|
{
"blob_id": "da30cea4cfb1ffccabe708fe15e5a633b06d299f",
"index": 2265,
"step-1": "<mask token>\n\n\nclass Instruction(QWidget):\n <mask token>\n\n def set_background_instruction(self):\n img = QPixmap('../images/background_instruction.jpg')\n self.background_instruction.setPixmap(img)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Instruction(QWidget):\n\n def __init__(self):\n super().__init__()\n uic.loadUi('../ui/instruction.ui', self)\n self.OK_btn.clicked.connect(self.show_game)\n self.set_background_instruction()\n\n def set_background_instruction(self):\n img = QPixmap('../images/background_instruction.jpg')\n self.background_instruction.setPixmap(img)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Instruction(QWidget):\n\n def __init__(self):\n super().__init__()\n uic.loadUi('../ui/instruction.ui', self)\n self.OK_btn.clicked.connect(self.show_game)\n self.set_background_instruction()\n\n def set_background_instruction(self):\n img = QPixmap('../images/background_instruction.jpg')\n self.background_instruction.setPixmap(img)\n\n def show_game(self):\n self.parent().show_game()\n",
"step-4": "import sys\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QPixmap\n\n\nclass Instruction(QWidget):\n\n def __init__(self):\n super().__init__()\n uic.loadUi('../ui/instruction.ui', self)\n self.OK_btn.clicked.connect(self.show_game)\n self.set_background_instruction()\n\n def set_background_instruction(self):\n img = QPixmap('../images/background_instruction.jpg')\n self.background_instruction.setPixmap(img)\n\n def show_game(self):\n self.parent().show_game()\n",
"step-5": "import sys\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QPixmap\n\n\nclass Instruction(QWidget):\n def __init__(self):\n super().__init__()\n\n # Set UI file\n uic.loadUi('../ui/instruction.ui', self)\n\n # Connect handlers of buttons\n self.OK_btn.clicked.connect(self.show_game)\n\n self.set_background_instruction()\n\n # Set background of the windows\n def set_background_instruction(self):\n img = QPixmap('../images/background_instruction.jpg')\n self.background_instruction.setPixmap(img)\n\n # Show window of the game\n def show_game(self):\n self.parent().show_game()\n ",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class ycombinatorParser:
<|reserved_special_token_0|>
def getNextPage(pageurl):
response = requests.get(pageurl)
parsed_body = html.fromstring(response.text)
nextpage = parsed_body.xpath('//a[@class="morelink"]')
try:
nexthref = nextpage[0].get('href')
except IndexError:
nexthref = ''
return nexthref
def parsePage(parsed_body, rownumber):
def jsonWriteLine(rownumber, title, autor, url, site):
line = (
"""{"Rownumber": %d,
"title": "%s",
"autor": "%s",
"url": "%s",
"site": "%s",
}
"""
% (rownumber, title, autor, url, site))
return line
def getNews(rownews):
newsdict = {}
for news in rownews:
newsdict['title'] = ''.join(news.xpath('./a/text()'))
for i in news.xpath('./a'):
newsdict['url'] = i.get('href')
newsdict['site'] = ''.join(news.xpath('./span/a/span/text()'))
return newsdict
def getAuthor(rowautor):
authordict = {}
for author in rowautor:
authordict['autor'] = ''.join(author.xpath('./a[1]/text()'))
return authordict
for row in parsed_body.xpath('//tr'):
rownews = row.xpath('./td[@class="title"][2]')
rowautor = row.xpath('./td[@class="subtext"][1]')
datadict = {}
rowdata = {}
if rownews:
datadict = getNews(rownews)
if rowautor:
for author in rowautor:
datadict = getAuthor(rowautor)
if datadict:
autor = ''
try:
title = datadict['title']
url = datadict['url']
site = datadict['site']
except KeyError:
autor = datadict['autor']
if autor:
rowdata['rownumber'] = str(rownumber)
rowdata['title'] = str(title)
rowdata['autor'] = str(autor)
rowdata['url'] = str(url)
rowdata['site'] = str(site)
with open('nix.json', mode='a') as f:
json.dump(rowdata, f)
rownumber += 1
if rownumber > 2:
exit()
return rownumber
def __unicode__(self):
return unicode(self.rowdata)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
with open('nix.json', mode='w') as f:
json.dump('', f)
while pageflag:
response = requests.get(pageparse)
parsed_body = html.fromstring(response.text)
rownumber = parsePage(parsed_body, rownumber) - 1
pageparse = siteurl + getNextPage(pageparse)
if pageparse == siteurl:
pageflag = False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ycombinatorParser:
siteurl = 'https://news.ycombinator.com/'
def getNextPage(pageurl):
response = requests.get(pageurl)
parsed_body = html.fromstring(response.text)
nextpage = parsed_body.xpath('//a[@class="morelink"]')
try:
nexthref = nextpage[0].get('href')
except IndexError:
nexthref = ''
return nexthref
def parsePage(parsed_body, rownumber):
def jsonWriteLine(rownumber, title, autor, url, site):
line = (
"""{"Rownumber": %d,
"title": "%s",
"autor": "%s",
"url": "%s",
"site": "%s",
}
"""
% (rownumber, title, autor, url, site))
return line
def getNews(rownews):
newsdict = {}
for news in rownews:
newsdict['title'] = ''.join(news.xpath('./a/text()'))
for i in news.xpath('./a'):
newsdict['url'] = i.get('href')
newsdict['site'] = ''.join(news.xpath('./span/a/span/text()'))
return newsdict
def getAuthor(rowautor):
authordict = {}
for author in rowautor:
authordict['autor'] = ''.join(author.xpath('./a[1]/text()'))
return authordict
for row in parsed_body.xpath('//tr'):
rownews = row.xpath('./td[@class="title"][2]')
rowautor = row.xpath('./td[@class="subtext"][1]')
datadict = {}
rowdata = {}
if rownews:
datadict = getNews(rownews)
if rowautor:
for author in rowautor:
datadict = getAuthor(rowautor)
if datadict:
autor = ''
try:
title = datadict['title']
url = datadict['url']
site = datadict['site']
except KeyError:
autor = datadict['autor']
if autor:
rowdata['rownumber'] = str(rownumber)
rowdata['title'] = str(title)
rowdata['autor'] = str(autor)
rowdata['url'] = str(url)
rowdata['site'] = str(site)
with open('nix.json', mode='a') as f:
json.dump(rowdata, f)
rownumber += 1
if rownumber > 2:
exit()
return rownumber
def __unicode__(self):
return unicode(self.rowdata)
pageflag = True
rownumber = 1
pageparse = siteurl
with open('nix.json', mode='w') as f:
json.dump('', f)
while pageflag:
response = requests.get(pageparse)
parsed_body = html.fromstring(response.text)
rownumber = parsePage(parsed_body, rownumber) - 1
pageparse = siteurl + getNextPage(pageparse)
if pageparse == siteurl:
pageflag = False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ycombinatorParser:
siteurl = 'https://news.ycombinator.com/'
def getNextPage(pageurl):
response = requests.get(pageurl)
parsed_body = html.fromstring(response.text)
nextpage = parsed_body.xpath('//a[@class="morelink"]')
try:
nexthref = nextpage[0].get('href')
except IndexError:
nexthref = ''
return nexthref
def parsePage(parsed_body, rownumber):
def jsonWriteLine(rownumber, title, autor, url, site):
line = (
"""{"Rownumber": %d,
"title": "%s",
"autor": "%s",
"url": "%s",
"site": "%s",
}
"""
% (rownumber, title, autor, url, site))
return line
def getNews(rownews):
newsdict = {}
for news in rownews:
newsdict['title'] = ''.join(news.xpath('./a/text()'))
for i in news.xpath('./a'):
newsdict['url'] = i.get('href')
newsdict['site'] = ''.join(news.xpath('./span/a/span/text()'))
return newsdict
def getAuthor(rowautor):
authordict = {}
for author in rowautor:
authordict['autor'] = ''.join(author.xpath('./a[1]/text()'))
return authordict
for row in parsed_body.xpath('//tr'):
rownews = row.xpath('./td[@class="title"][2]')
rowautor = row.xpath('./td[@class="subtext"][1]')
datadict = {}
rowdata = {}
if rownews:
datadict = getNews(rownews)
if rowautor:
for author in rowautor:
datadict = getAuthor(rowautor)
if datadict:
autor = ''
try:
title = datadict['title']
url = datadict['url']
site = datadict['site']
except KeyError:
autor = datadict['autor']
if autor:
rowdata['rownumber'] = str(rownumber)
rowdata['title'] = str(title)
rowdata['autor'] = str(autor)
rowdata['url'] = str(url)
rowdata['site'] = str(site)
with open('nix.json', mode='a') as f:
json.dump(rowdata, f)
rownumber += 1
if rownumber > 2:
exit()
return rownumber
def __unicode__(self):
return unicode(self.rowdata)
pageflag = True
rownumber = 1
pageparse = siteurl
with open('nix.json', mode='w') as f:
json.dump('', f)
while pageflag:
response = requests.get(pageparse)
parsed_body = html.fromstring(response.text)
rownumber = parsePage(parsed_body, rownumber) - 1
pageparse = siteurl + getNextPage(pageparse)
if pageparse == siteurl:
pageflag = False
if __name__ == '__main__':
ycombinatorParser()
<|reserved_special_token_1|>
import requests
import csv
from lxml import html
import json
class ycombinatorParser:
siteurl = 'https://news.ycombinator.com/'
def getNextPage(pageurl):
response = requests.get(pageurl)
parsed_body = html.fromstring(response.text)
nextpage = parsed_body.xpath('//a[@class="morelink"]')
try:
nexthref = nextpage[0].get('href')
except IndexError:
nexthref = ''
return nexthref
def parsePage(parsed_body, rownumber):
def jsonWriteLine(rownumber, title, autor, url, site):
line = (
"""{"Rownumber": %d,
"title": "%s",
"autor": "%s",
"url": "%s",
"site": "%s",
}
"""
% (rownumber, title, autor, url, site))
return line
def getNews(rownews):
newsdict = {}
for news in rownews:
newsdict['title'] = ''.join(news.xpath('./a/text()'))
for i in news.xpath('./a'):
newsdict['url'] = i.get('href')
newsdict['site'] = ''.join(news.xpath('./span/a/span/text()'))
return newsdict
def getAuthor(rowautor):
authordict = {}
for author in rowautor:
authordict['autor'] = ''.join(author.xpath('./a[1]/text()'))
return authordict
for row in parsed_body.xpath('//tr'):
rownews = row.xpath('./td[@class="title"][2]')
rowautor = row.xpath('./td[@class="subtext"][1]')
datadict = {}
rowdata = {}
if rownews:
datadict = getNews(rownews)
if rowautor:
for author in rowautor:
datadict = getAuthor(rowautor)
if datadict:
autor = ''
try:
title = datadict['title']
url = datadict['url']
site = datadict['site']
except KeyError:
autor = datadict['autor']
if autor:
rowdata['rownumber'] = str(rownumber)
rowdata['title'] = str(title)
rowdata['autor'] = str(autor)
rowdata['url'] = str(url)
rowdata['site'] = str(site)
with open('nix.json', mode='a') as f:
json.dump(rowdata, f)
rownumber += 1
if rownumber > 2:
exit()
return rownumber
def __unicode__(self):
return unicode(self.rowdata)
pageflag = True
rownumber = 1
pageparse = siteurl
with open('nix.json', mode='w') as f:
json.dump('', f)
while pageflag:
response = requests.get(pageparse)
parsed_body = html.fromstring(response.text)
rownumber = parsePage(parsed_body, rownumber) - 1
pageparse = siteurl + getNextPage(pageparse)
if pageparse == siteurl:
pageflag = False
if __name__ == '__main__':
ycombinatorParser()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import requests
import csv
from lxml import html
import json
class ycombinatorParser():
siteurl = 'https://news.ycombinator.com/'
def getNextPage(pageurl):
response = requests.get(pageurl)
parsed_body = html.fromstring(response.text)
nextpage=parsed_body.xpath('//a[@class="morelink"]')
try:
nexthref=nextpage[0].get('href')
except IndexError:
nexthref = ''
return nexthref
def parsePage(parsed_body,rownumber):
def jsonWriteLine(rownumber,title,autor,url,site):
line = '{"Rownumber": %d,\n "title": "%s",\n "autor": "%s",\n "url": "%s",\n "site": "%s",\n }\n' %(rownumber,title,autor,url,site)
#print line
return line
def getNews(rownews):
newsdict = {}
for news in rownews:
newsdict["title"] = ''.join(news.xpath('./a/text()'))
for i in news.xpath('./a'):
newsdict["url"] = i.get('href')
newsdict["site"] = ''.join(news.xpath('./span/a/span/text()'))
return newsdict
def getAuthor(rowautor):
authordict = {}
for author in rowautor:
authordict["autor"] = ''.join(author.xpath('./a[1]/text()'))
return authordict
for row in parsed_body.xpath('//tr'):
rownews = row.xpath('./td[@class="title"][2]')
rowautor = row.xpath('./td[@class="subtext"][1]')
datadict = {}
rowdata = {}
if rownews:
datadict = getNews(rownews)
if rowautor:
for author in rowautor:
datadict = getAuthor(rowautor)
if datadict:
autor = ''
try:
title=datadict["title"]
url=datadict["url"]
site=datadict["site"]
except KeyError:
autor = datadict["autor"]
if autor:
rowdata['rownumber'] = str(rownumber)
rowdata['title'] = str(title)
rowdata['autor'] = str(autor)
rowdata['url'] = str(url)
rowdata['site'] = str(site)
with open('nix.json',mode='a') as f:
json.dump(rowdata,f)
#outputfile.write(jsonWriteLine(rownumber,title,autor,url,site))
#print jsonWriteLine(rownumber,title,autor,url,site)
rownumber += 1
if rownumber>2:
exit()
return rownumber
def __unicode__(self):
return unicode(self.rowdata)
pageflag = True
rownumber = 1
pageparse = siteurl
with open('nix.json',mode='w') as f:
json.dump('',f)
while pageflag:
response = requests.get(pageparse)
parsed_body = html.fromstring(response.text)
rownumber = parsePage(parsed_body,rownumber)-1
pageparse = siteurl+getNextPage(pageparse)
if pageparse == siteurl:
pageflag = False
if __name__ == '__main__':
ycombinatorParser()
|
flexible
|
{
"blob_id": "87c27711c0089ca2c7e5c7d0e9edb51b9d4008d9",
"index": 6717,
"step-1": "<mask token>\n\n\nclass ycombinatorParser:\n <mask token>\n\n def getNextPage(pageurl):\n response = requests.get(pageurl)\n parsed_body = html.fromstring(response.text)\n nextpage = parsed_body.xpath('//a[@class=\"morelink\"]')\n try:\n nexthref = nextpage[0].get('href')\n except IndexError:\n nexthref = ''\n return nexthref\n\n def parsePage(parsed_body, rownumber):\n\n def jsonWriteLine(rownumber, title, autor, url, site):\n line = (\n \"\"\"{\"Rownumber\": %d,\n \"title\": \"%s\",\n \"autor\": \"%s\",\n \"url\": \"%s\",\n \"site\": \"%s\",\n }\n\"\"\"\n % (rownumber, title, autor, url, site))\n return line\n\n def getNews(rownews):\n newsdict = {}\n for news in rownews:\n newsdict['title'] = ''.join(news.xpath('./a/text()'))\n for i in news.xpath('./a'):\n newsdict['url'] = i.get('href')\n newsdict['site'] = ''.join(news.xpath('./span/a/span/text()'))\n return newsdict\n\n def getAuthor(rowautor):\n authordict = {}\n for author in rowautor:\n authordict['autor'] = ''.join(author.xpath('./a[1]/text()'))\n return authordict\n for row in parsed_body.xpath('//tr'):\n rownews = row.xpath('./td[@class=\"title\"][2]')\n rowautor = row.xpath('./td[@class=\"subtext\"][1]')\n datadict = {}\n rowdata = {}\n if rownews:\n datadict = getNews(rownews)\n if rowautor:\n for author in rowautor:\n datadict = getAuthor(rowautor)\n if datadict:\n autor = ''\n try:\n title = datadict['title']\n url = datadict['url']\n site = datadict['site']\n except KeyError:\n autor = datadict['autor']\n if autor:\n rowdata['rownumber'] = str(rownumber)\n rowdata['title'] = str(title)\n rowdata['autor'] = str(autor)\n rowdata['url'] = str(url)\n rowdata['site'] = str(site)\n with open('nix.json', mode='a') as f:\n json.dump(rowdata, f)\n rownumber += 1\n if rownumber > 2:\n exit()\n return rownumber\n\n def __unicode__(self):\n return unicode(self.rowdata)\n <mask token>\n <mask token>\n <mask token>\n with open('nix.json', mode='w') as f:\n json.dump('', f)\n while pageflag:\n response = requests.get(pageparse)\n parsed_body = html.fromstring(response.text)\n rownumber = parsePage(parsed_body, rownumber) - 1\n pageparse = siteurl + getNextPage(pageparse)\n if pageparse == siteurl:\n pageflag = False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ycombinatorParser:\n siteurl = 'https://news.ycombinator.com/'\n\n def getNextPage(pageurl):\n response = requests.get(pageurl)\n parsed_body = html.fromstring(response.text)\n nextpage = parsed_body.xpath('//a[@class=\"morelink\"]')\n try:\n nexthref = nextpage[0].get('href')\n except IndexError:\n nexthref = ''\n return nexthref\n\n def parsePage(parsed_body, rownumber):\n\n def jsonWriteLine(rownumber, title, autor, url, site):\n line = (\n \"\"\"{\"Rownumber\": %d,\n \"title\": \"%s\",\n \"autor\": \"%s\",\n \"url\": \"%s\",\n \"site\": \"%s\",\n }\n\"\"\"\n % (rownumber, title, autor, url, site))\n return line\n\n def getNews(rownews):\n newsdict = {}\n for news in rownews:\n newsdict['title'] = ''.join(news.xpath('./a/text()'))\n for i in news.xpath('./a'):\n newsdict['url'] = i.get('href')\n newsdict['site'] = ''.join(news.xpath('./span/a/span/text()'))\n return newsdict\n\n def getAuthor(rowautor):\n authordict = {}\n for author in rowautor:\n authordict['autor'] = ''.join(author.xpath('./a[1]/text()'))\n return authordict\n for row in parsed_body.xpath('//tr'):\n rownews = row.xpath('./td[@class=\"title\"][2]')\n rowautor = row.xpath('./td[@class=\"subtext\"][1]')\n datadict = {}\n rowdata = {}\n if rownews:\n datadict = getNews(rownews)\n if rowautor:\n for author in rowautor:\n datadict = getAuthor(rowautor)\n if datadict:\n autor = ''\n try:\n title = datadict['title']\n url = datadict['url']\n site = datadict['site']\n except KeyError:\n autor = datadict['autor']\n if autor:\n rowdata['rownumber'] = str(rownumber)\n rowdata['title'] = str(title)\n rowdata['autor'] = str(autor)\n rowdata['url'] = str(url)\n rowdata['site'] = str(site)\n with open('nix.json', mode='a') as f:\n json.dump(rowdata, f)\n rownumber += 1\n if rownumber > 2:\n exit()\n return rownumber\n\n def __unicode__(self):\n return unicode(self.rowdata)\n pageflag = True\n rownumber = 1\n pageparse = siteurl\n with open('nix.json', mode='w') as f:\n json.dump('', f)\n while pageflag:\n response = requests.get(pageparse)\n parsed_body = html.fromstring(response.text)\n rownumber = parsePage(parsed_body, rownumber) - 1\n pageparse = siteurl + getNextPage(pageparse)\n if pageparse == siteurl:\n pageflag = False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ycombinatorParser:\n siteurl = 'https://news.ycombinator.com/'\n\n def getNextPage(pageurl):\n response = requests.get(pageurl)\n parsed_body = html.fromstring(response.text)\n nextpage = parsed_body.xpath('//a[@class=\"morelink\"]')\n try:\n nexthref = nextpage[0].get('href')\n except IndexError:\n nexthref = ''\n return nexthref\n\n def parsePage(parsed_body, rownumber):\n\n def jsonWriteLine(rownumber, title, autor, url, site):\n line = (\n \"\"\"{\"Rownumber\": %d,\n \"title\": \"%s\",\n \"autor\": \"%s\",\n \"url\": \"%s\",\n \"site\": \"%s\",\n }\n\"\"\"\n % (rownumber, title, autor, url, site))\n return line\n\n def getNews(rownews):\n newsdict = {}\n for news in rownews:\n newsdict['title'] = ''.join(news.xpath('./a/text()'))\n for i in news.xpath('./a'):\n newsdict['url'] = i.get('href')\n newsdict['site'] = ''.join(news.xpath('./span/a/span/text()'))\n return newsdict\n\n def getAuthor(rowautor):\n authordict = {}\n for author in rowautor:\n authordict['autor'] = ''.join(author.xpath('./a[1]/text()'))\n return authordict\n for row in parsed_body.xpath('//tr'):\n rownews = row.xpath('./td[@class=\"title\"][2]')\n rowautor = row.xpath('./td[@class=\"subtext\"][1]')\n datadict = {}\n rowdata = {}\n if rownews:\n datadict = getNews(rownews)\n if rowautor:\n for author in rowautor:\n datadict = getAuthor(rowautor)\n if datadict:\n autor = ''\n try:\n title = datadict['title']\n url = datadict['url']\n site = datadict['site']\n except KeyError:\n autor = datadict['autor']\n if autor:\n rowdata['rownumber'] = str(rownumber)\n rowdata['title'] = str(title)\n rowdata['autor'] = str(autor)\n rowdata['url'] = str(url)\n rowdata['site'] = str(site)\n with open('nix.json', mode='a') as f:\n json.dump(rowdata, f)\n rownumber += 1\n if rownumber > 2:\n exit()\n return rownumber\n\n def __unicode__(self):\n return unicode(self.rowdata)\n pageflag = True\n rownumber = 1\n pageparse = siteurl\n with open('nix.json', mode='w') as f:\n json.dump('', f)\n while pageflag:\n response = requests.get(pageparse)\n parsed_body = html.fromstring(response.text)\n rownumber = parsePage(parsed_body, rownumber) - 1\n pageparse = siteurl + getNextPage(pageparse)\n if pageparse == siteurl:\n pageflag = False\n\n\nif __name__ == '__main__':\n ycombinatorParser()\n",
"step-4": "import requests\nimport csv\nfrom lxml import html\nimport json\n\n\nclass ycombinatorParser:\n siteurl = 'https://news.ycombinator.com/'\n\n def getNextPage(pageurl):\n response = requests.get(pageurl)\n parsed_body = html.fromstring(response.text)\n nextpage = parsed_body.xpath('//a[@class=\"morelink\"]')\n try:\n nexthref = nextpage[0].get('href')\n except IndexError:\n nexthref = ''\n return nexthref\n\n def parsePage(parsed_body, rownumber):\n\n def jsonWriteLine(rownumber, title, autor, url, site):\n line = (\n \"\"\"{\"Rownumber\": %d,\n \"title\": \"%s\",\n \"autor\": \"%s\",\n \"url\": \"%s\",\n \"site\": \"%s\",\n }\n\"\"\"\n % (rownumber, title, autor, url, site))\n return line\n\n def getNews(rownews):\n newsdict = {}\n for news in rownews:\n newsdict['title'] = ''.join(news.xpath('./a/text()'))\n for i in news.xpath('./a'):\n newsdict['url'] = i.get('href')\n newsdict['site'] = ''.join(news.xpath('./span/a/span/text()'))\n return newsdict\n\n def getAuthor(rowautor):\n authordict = {}\n for author in rowautor:\n authordict['autor'] = ''.join(author.xpath('./a[1]/text()'))\n return authordict\n for row in parsed_body.xpath('//tr'):\n rownews = row.xpath('./td[@class=\"title\"][2]')\n rowautor = row.xpath('./td[@class=\"subtext\"][1]')\n datadict = {}\n rowdata = {}\n if rownews:\n datadict = getNews(rownews)\n if rowautor:\n for author in rowautor:\n datadict = getAuthor(rowautor)\n if datadict:\n autor = ''\n try:\n title = datadict['title']\n url = datadict['url']\n site = datadict['site']\n except KeyError:\n autor = datadict['autor']\n if autor:\n rowdata['rownumber'] = str(rownumber)\n rowdata['title'] = str(title)\n rowdata['autor'] = str(autor)\n rowdata['url'] = str(url)\n rowdata['site'] = str(site)\n with open('nix.json', mode='a') as f:\n json.dump(rowdata, f)\n rownumber += 1\n if rownumber > 2:\n exit()\n return rownumber\n\n def __unicode__(self):\n return unicode(self.rowdata)\n pageflag = True\n rownumber = 1\n pageparse = siteurl\n with open('nix.json', mode='w') as f:\n json.dump('', f)\n while pageflag:\n response = requests.get(pageparse)\n parsed_body = html.fromstring(response.text)\n rownumber = parsePage(parsed_body, rownumber) - 1\n pageparse = siteurl + getNextPage(pageparse)\n if pageparse == siteurl:\n pageflag = False\n\n\nif __name__ == '__main__':\n ycombinatorParser()\n",
"step-5": "# -*- coding: utf-8 -*-\nimport requests\nimport csv\nfrom lxml import html\nimport json\n\nclass ycombinatorParser():\n siteurl = 'https://news.ycombinator.com/' \n\n def getNextPage(pageurl):\n response = requests.get(pageurl)\n parsed_body = html.fromstring(response.text)\n nextpage=parsed_body.xpath('//a[@class=\"morelink\"]')\n try:\n nexthref=nextpage[0].get('href')\n except IndexError:\n nexthref = ''\n return nexthref \n\n\n def parsePage(parsed_body,rownumber):\n def jsonWriteLine(rownumber,title,autor,url,site):\n line = '{\"Rownumber\": %d,\\n \"title\": \"%s\",\\n \"autor\": \"%s\",\\n \"url\": \"%s\",\\n \"site\": \"%s\",\\n }\\n' %(rownumber,title,autor,url,site)\n #print line\n return line\n\n def getNews(rownews):\n newsdict = {}\n for news in rownews:\n newsdict[\"title\"] = ''.join(news.xpath('./a/text()'))\n for i in news.xpath('./a'):\n newsdict[\"url\"] = i.get('href')\n newsdict[\"site\"] = ''.join(news.xpath('./span/a/span/text()'))\n return newsdict\n\n def getAuthor(rowautor):\n authordict = {}\n for author in rowautor:\n authordict[\"autor\"] = ''.join(author.xpath('./a[1]/text()'))\n return authordict\n\n for row in parsed_body.xpath('//tr'):\n rownews = row.xpath('./td[@class=\"title\"][2]')\n rowautor = row.xpath('./td[@class=\"subtext\"][1]')\n datadict = {}\n rowdata = {}\n if rownews:\n datadict = getNews(rownews)\n if rowautor:\n for author in rowautor:\n datadict = getAuthor(rowautor)\n\n if datadict:\n autor = ''\n try:\n title=datadict[\"title\"]\n url=datadict[\"url\"]\n site=datadict[\"site\"]\n except KeyError:\n autor = datadict[\"autor\"]\n\n if autor:\n rowdata['rownumber'] = str(rownumber)\n rowdata['title'] = str(title)\n rowdata['autor'] = str(autor)\n rowdata['url'] = str(url)\n rowdata['site'] = str(site)\n \n with open('nix.json',mode='a') as f:\n json.dump(rowdata,f)\n \n #outputfile.write(jsonWriteLine(rownumber,title,autor,url,site)) \n \n #print jsonWriteLine(rownumber,title,autor,url,site)\n rownumber += 1\n if rownumber>2:\n exit()\n return rownumber\n \n def __unicode__(self):\n return unicode(self.rowdata)\n \n pageflag = True\n rownumber = 1\n pageparse = siteurl\n with open('nix.json',mode='w') as f:\n json.dump('',f)\n while pageflag: \n response = requests.get(pageparse)\n parsed_body = html.fromstring(response.text) \n\n rownumber = parsePage(parsed_body,rownumber)-1\n\n pageparse = siteurl+getNextPage(pageparse)\n if pageparse == siteurl:\n pageflag = False\nif __name__ == '__main__':\n ycombinatorParser()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
# description:
# author:jack
# create_time: 2017/12/30
"""
卡片基类
"""
import logging
class BaseCard(object):
def __init__(self, field=[]):
self.data = {}
self.support_set_field = field
def add_cue_words(self, arr):
"""
为卡片添加cue words 提示用户输入
:param arr:
:return:
"""
if arr:
if isinstance(arr, str):
arr = [arr]
if 'cueWords' in self.data:
self.data['cueWords'] = self.data['cueWords']
else:
self.data['cueWords'] = []
self.data['cueWords'].extend(arr)
return self
def set_anchor(self, url, anchor_text):
"""
设置卡片链接
:param url: 比如:http(s)://....
:param anchor_text: 链接显示的文字
:return:
"""
if url:
self.data['url'] = url
if anchor_text:
self.data['anchorText'] = anchor_text
return self
def get_data(self):
return self.data
def __getattr__(self, item):
"""
添加魔术方法
:param item:
:return:
"""
# 获取操作类型 set
operation = item[0:3]
# 获取被操作的属性 set_xxxx 获取xxxx
field = item[4:]
if operation == 'set' and field and (field.lower() in self.support_set_field):
def function(*args):
self.data[field.lower()] = args[0]
return function
else:
def function(*args):
logging.info("不支持 %s_%s" % (operation, field))
print('不支持', operation, field)
return function
if __name__ == '__main__':
pass
|
normal
|
{
"blob_id": "93e5852df00733c024a59d37699bae58bd893030",
"index": 112,
"step-1": "<mask token>\n\n\nclass BaseCard(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n operation = item[0:3]\n field = item[4:]\n if operation == 'set' and field and field.lower(\n ) in self.support_set_field:\n\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n\n def function(*args):\n logging.info('不支持 %s_%s' % (operation, field))\n print('不支持', operation, field)\n return function\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseCard(object):\n <mask token>\n <mask token>\n <mask token>\n\n def get_data(self):\n return self.data\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n operation = item[0:3]\n field = item[4:]\n if operation == 'set' and field and field.lower(\n ) in self.support_set_field:\n\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n\n def function(*args):\n logging.info('不支持 %s_%s' % (operation, field))\n print('不支持', operation, field)\n return function\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseCard(object):\n\n def __init__(self, field=[]):\n self.data = {}\n self.support_set_field = field\n <mask token>\n <mask token>\n\n def get_data(self):\n return self.data\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n operation = item[0:3]\n field = item[4:]\n if operation == 'set' and field and field.lower(\n ) in self.support_set_field:\n\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n\n def function(*args):\n logging.info('不支持 %s_%s' % (operation, field))\n print('不支持', operation, field)\n return function\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass BaseCard(object):\n\n def __init__(self, field=[]):\n self.data = {}\n self.support_set_field = field\n\n def add_cue_words(self, arr):\n \"\"\"\n 为卡片添加cue words 提示用户输入\n :param arr:\n :return:\n \"\"\"\n if arr:\n if isinstance(arr, str):\n arr = [arr]\n if 'cueWords' in self.data:\n self.data['cueWords'] = self.data['cueWords']\n else:\n self.data['cueWords'] = []\n self.data['cueWords'].extend(arr)\n return self\n\n def set_anchor(self, url, anchor_text):\n \"\"\"\n 设置卡片链接\n :param url: 比如:http(s)://....\n :param anchor_text: 链接显示的文字\n :return:\n \"\"\"\n if url:\n self.data['url'] = url\n if anchor_text:\n self.data['anchorText'] = anchor_text\n return self\n\n def get_data(self):\n return self.data\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n operation = item[0:3]\n field = item[4:]\n if operation == 'set' and field and field.lower(\n ) in self.support_set_field:\n\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n\n def function(*args):\n logging.info('不支持 %s_%s' % (operation, field))\n print('不支持', operation, field)\n return function\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding=utf-8 -*-\n\n# description:\n# author:jack\n# create_time: 2017/12/30\n\"\"\"\n卡片基类\n\"\"\"\nimport logging\n\n\nclass BaseCard(object):\n\n def __init__(self, field=[]):\n self.data = {}\n self.support_set_field = field\n\n def add_cue_words(self, arr):\n \"\"\"\n 为卡片添加cue words 提示用户输入\n :param arr:\n :return:\n \"\"\"\n\n if arr:\n if isinstance(arr, str):\n arr = [arr]\n\n if 'cueWords' in self.data:\n self.data['cueWords'] = self.data['cueWords']\n else:\n self.data['cueWords'] = []\n\n self.data['cueWords'].extend(arr)\n return self\n\n def set_anchor(self, url, anchor_text):\n \"\"\"\n 设置卡片链接\n :param url: 比如:http(s)://....\n :param anchor_text: 链接显示的文字\n :return:\n \"\"\"\n\n if url:\n self.data['url'] = url\n if anchor_text:\n self.data['anchorText'] = anchor_text\n return self\n\n def get_data(self):\n return self.data\n\n def __getattr__(self, item):\n \"\"\"\n 添加魔术方法\n :param item:\n :return:\n \"\"\"\n # 获取操作类型 set\n operation = item[0:3]\n # 获取被操作的属性 set_xxxx 获取xxxx\n field = item[4:]\n if operation == 'set' and field and (field.lower() in self.support_set_field):\n def function(*args):\n self.data[field.lower()] = args[0]\n return function\n else:\n def function(*args):\n logging.info(\"不支持 %s_%s\" % (operation, field))\n print('不支持', operation, field)\n\n return function\n\n\nif __name__ == '__main__':\n pass\n",
"step-ids": [
2,
3,
4,
7,
9
]
}
|
[
2,
3,
4,
7,
9
] |
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import os
# Init app
app = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
# Database
app.config['SQLALCHEM_DATABASE_URI'] = 'sqlite///' + \
os.path.join(basedir, 'db.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Init db
db = SQLAlchemy(app)
# Init ma
ma = Marshmallow(app)
# Product Class/Model
class Product(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True)
description = db.Column(db.String(200))
price = db.Column(db.Float)
qty = db.Column(db.Integer)
# Product Schema
class ProductSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'description', 'price', 'qty')
# Init schema
product_schema = ProductSchema(strict=True)
product_schema = ProductSchema(many=True, strict=True)
# Run Server
if __name__ == '__main__':
app.run(debug=True)
|
normal
|
{
"blob_id": "ccb131171472d0a92d571e94453be97b323b4484",
"index": 7081,
"step-1": "<mask token>\n\n\nclass ProductSchema(ma.Schema):\n\n\n class Meta:\n fields = 'id', 'name', 'description', 'price', 'qty'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Product(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100), unique=True)\n description = db.Column(db.String(200))\n price = db.Column(db.Float)\n qty = db.Column(db.Integer)\n\n\nclass ProductSchema(ma.Schema):\n\n\n class Meta:\n fields = 'id', 'name', 'description', 'price', 'qty'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Product(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100), unique=True)\n description = db.Column(db.String(200))\n price = db.Column(db.Float)\n qty = db.Column(db.Integer)\n\n\nclass ProductSchema(ma.Schema):\n\n\n class Meta:\n fields = 'id', 'name', 'description', 'price', 'qty'\n\n\n<mask token>\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "<mask token>\napp = Flask(__name__)\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config['SQLALCHEM_DATABASE_URI'] = 'sqlite///' + os.path.join(basedir,\n 'db.sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\n\n\nclass Product(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100), unique=True)\n description = db.Column(db.String(200))\n price = db.Column(db.Float)\n qty = db.Column(db.Integer)\n\n\nclass ProductSchema(ma.Schema):\n\n\n class Meta:\n fields = 'id', 'name', 'description', 'price', 'qty'\n\n\nproduct_schema = ProductSchema(strict=True)\nproduct_schema = ProductSchema(many=True, strict=True)\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nimport os\n\n# Init app\napp = Flask(__name__)\nbasedir = os.path.abspath(os.path.dirname(__file__))\n# Database\napp.config['SQLALCHEM_DATABASE_URI'] = 'sqlite///' + \\\n os.path.join(basedir, 'db.sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n# Init db\ndb = SQLAlchemy(app)\n# Init ma\nma = Marshmallow(app)\n\n# Product Class/Model\n\n\nclass Product(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100), unique=True)\n description = db.Column(db.String(200))\n price = db.Column(db.Float)\n qty = db.Column(db.Integer)\n\n \n\n# Product Schema\n\n\nclass ProductSchema(ma.Schema):\n class Meta:\n fields = ('id', 'name', 'description', 'price', 'qty')\n\n\n# Init schema\nproduct_schema = ProductSchema(strict=True)\nproduct_schema = ProductSchema(many=True, strict=True)\n\n\n\n# Run Server\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-ids": [
1,
3,
4,
5,
7
]
}
|
[
1,
3,
4,
5,
7
] |
from joecceasy import Easy
def main():
paths = ['..','.']
absOfEntries = [ i.abs for i in Easy.WalkAnIter(paths) ]
for i in absOfEntries:
print( i )
if __name__=='__main__':
main()
"""
def main(maxEntries = 99):
i = -1
print( "Walker test, Walking current directory:" )
for entry in Easy.WalkAnIter( ['.'] ):
i += 1 ## because i start at -1, 1st run of line will be 0
if i > maxEntries:
break
print(entry.abs)
print( ' \n ' )
"""
#isFileByPython = os.path.isfile(entry.abs)
# print( 'entry: ', entry.name, 'f', entry.isFile, 'd', entry.isDir,
# 'fa', entry.isFileAt, 'da', entry.isDirAt, 'pf', isFileByPython, se#p=' ')
#end='' )
#print( entry.abs, entry.isFileAt, entry.isDirAt, sep=' ' )
#print( entry.__dict__ )
|
normal
|
{
"blob_id": "b720a52f1c2e6e6be7c0887cd94441d248382242",
"index": 1836,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n paths = ['..', '.']\n absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]\n for i in absOfEntries:\n print(i)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n paths = ['..', '.']\n absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]\n for i in absOfEntries:\n print(i)\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-4": "from joecceasy import Easy\n\n\ndef main():\n paths = ['..', '.']\n absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]\n for i in absOfEntries:\n print(i)\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-5": "from joecceasy import Easy\r\n\r\ndef main():\r\n \r\n paths = ['..','.']\r\n absOfEntries = [ i.abs for i in Easy.WalkAnIter(paths) ]\r\n for i in absOfEntries:\r\n print( i )\r\n \r\nif __name__=='__main__':\r\n main()\r\n \r\n \r\n\"\"\"\r\ndef main(maxEntries = 99):\r\n i = -1\r\n print( \"Walker test, Walking current directory:\" )\r\n for entry in Easy.WalkAnIter( ['.'] ):\r\n i += 1 ## because i start at -1, 1st run of line will be 0\r\n if i > maxEntries:\r\n break\r\n print(entry.abs)\r\n print( ' \\n ' )\r\n\"\"\"\r\n\r\n#isFileByPython = os.path.isfile(entry.abs)\r\n# print( 'entry: ', entry.name, 'f', entry.isFile, 'd', entry.isDir,\r\n# 'fa', entry.isFileAt, 'da', entry.isDirAt, 'pf', isFileByPython, se#p=' ')\r\n#end='' )\r\n#print( entry.abs, entry.isFileAt, entry.isDirAt, sep=' ' )\r\n#print( entry.__dict__ )",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@app.route('/verify', methods=['GET', 'POST'])
def verify():
content = request.get_json(silent=True, force=True)
print(content)
if content == None:
return jsonify('No json data is sent.')
sig = content.get('sig')
payload = content.get('payload')
message = payload.get('message')
pk = payload.get('pk')
platform = payload.get('platform')
if platform == 'Ethereum':
encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(
payload))
result = eth_account.Account.recover_message(encoded_msg, signature=sig
) == pk
else:
result = algosdk.util.verify_bytes(json.dumps(payload).encode(
'utf-8'), sig, pk)
return jsonify(result)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/verify', methods=['GET', 'POST'])
def verify():
content = request.get_json(silent=True, force=True)
print(content)
if content == None:
return jsonify('No json data is sent.')
sig = content.get('sig')
payload = content.get('payload')
message = payload.get('message')
pk = payload.get('pk')
platform = payload.get('platform')
if platform == 'Ethereum':
encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(
payload))
result = eth_account.Account.recover_message(encoded_msg, signature=sig
) == pk
else:
result = algosdk.util.verify_bytes(json.dumps(payload).encode(
'utf-8'), sig, pk)
return jsonify(result)
if __name__ == '__main__':
app.run(port='5002')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
api = Api(app)
app.url_map.strict_slashes = False
@app.route('/verify', methods=['GET', 'POST'])
def verify():
content = request.get_json(silent=True, force=True)
print(content)
if content == None:
return jsonify('No json data is sent.')
sig = content.get('sig')
payload = content.get('payload')
message = payload.get('message')
pk = payload.get('pk')
platform = payload.get('platform')
if platform == 'Ethereum':
encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(
payload))
result = eth_account.Account.recover_message(encoded_msg, signature=sig
) == pk
else:
result = algosdk.util.verify_bytes(json.dumps(payload).encode(
'utf-8'), sig, pk)
return jsonify(result)
if __name__ == '__main__':
app.run(port='5002')
<|reserved_special_token_1|>
from flask import Flask, request, jsonify
from flask_restful import Api
import json
import eth_account
import algosdk
app = Flask(__name__)
api = Api(app)
app.url_map.strict_slashes = False
@app.route('/verify', methods=['GET', 'POST'])
def verify():
content = request.get_json(silent=True, force=True)
print(content)
if content == None:
return jsonify('No json data is sent.')
sig = content.get('sig')
payload = content.get('payload')
message = payload.get('message')
pk = payload.get('pk')
platform = payload.get('platform')
if platform == 'Ethereum':
encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(
payload))
result = eth_account.Account.recover_message(encoded_msg, signature=sig
) == pk
else:
result = algosdk.util.verify_bytes(json.dumps(payload).encode(
'utf-8'), sig, pk)
return jsonify(result)
if __name__ == '__main__':
app.run(port='5002')
<|reserved_special_token_1|>
from flask import Flask, request, jsonify
from flask_restful import Api
import json
import eth_account
import algosdk
app = Flask(__name__)
api = Api(app)
app.url_map.strict_slashes = False
@app.route('/verify', methods=['GET','POST'])
def verify():
content = request.get_json(silent=True, force=True)
#Check if signature is valid
print(content)
if content == None:
return jsonify("No json data is sent.")
sig = content.get('sig')
payload = content.get('payload')
message = payload.get('message')
pk = payload.get('pk')
platform = payload.get('platform')
if platform == "Ethereum":
encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(payload))
result = eth_account.Account.recover_message(encoded_msg,signature=sig) == pk
else:
result = algosdk.util.verify_bytes(json.dumps(payload).encode('utf-8'), sig, pk)
return jsonify(result)
if __name__ == '__main__':
app.run(port='5002')
|
flexible
|
{
"blob_id": "8bae45de54535e7b0788aa12717645ae9f193664",
"index": 8113,
"step-1": "<mask token>\n\n\[email protected]('/verify', methods=['GET', 'POST'])\ndef verify():\n content = request.get_json(silent=True, force=True)\n print(content)\n if content == None:\n return jsonify('No json data is sent.')\n sig = content.get('sig')\n payload = content.get('payload')\n message = payload.get('message')\n pk = payload.get('pk')\n platform = payload.get('platform')\n if platform == 'Ethereum':\n encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(\n payload))\n result = eth_account.Account.recover_message(encoded_msg, signature=sig\n ) == pk\n else:\n result = algosdk.util.verify_bytes(json.dumps(payload).encode(\n 'utf-8'), sig, pk)\n return jsonify(result)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/verify', methods=['GET', 'POST'])\ndef verify():\n content = request.get_json(silent=True, force=True)\n print(content)\n if content == None:\n return jsonify('No json data is sent.')\n sig = content.get('sig')\n payload = content.get('payload')\n message = payload.get('message')\n pk = payload.get('pk')\n platform = payload.get('platform')\n if platform == 'Ethereum':\n encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(\n payload))\n result = eth_account.Account.recover_message(encoded_msg, signature=sig\n ) == pk\n else:\n result = algosdk.util.verify_bytes(json.dumps(payload).encode(\n 'utf-8'), sig, pk)\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n",
"step-3": "<mask token>\napp = Flask(__name__)\napi = Api(app)\napp.url_map.strict_slashes = False\n\n\[email protected]('/verify', methods=['GET', 'POST'])\ndef verify():\n content = request.get_json(silent=True, force=True)\n print(content)\n if content == None:\n return jsonify('No json data is sent.')\n sig = content.get('sig')\n payload = content.get('payload')\n message = payload.get('message')\n pk = payload.get('pk')\n platform = payload.get('platform')\n if platform == 'Ethereum':\n encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(\n payload))\n result = eth_account.Account.recover_message(encoded_msg, signature=sig\n ) == pk\n else:\n result = algosdk.util.verify_bytes(json.dumps(payload).encode(\n 'utf-8'), sig, pk)\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n",
"step-4": "from flask import Flask, request, jsonify\nfrom flask_restful import Api\nimport json\nimport eth_account\nimport algosdk\napp = Flask(__name__)\napi = Api(app)\napp.url_map.strict_slashes = False\n\n\[email protected]('/verify', methods=['GET', 'POST'])\ndef verify():\n content = request.get_json(silent=True, force=True)\n print(content)\n if content == None:\n return jsonify('No json data is sent.')\n sig = content.get('sig')\n payload = content.get('payload')\n message = payload.get('message')\n pk = payload.get('pk')\n platform = payload.get('platform')\n if platform == 'Ethereum':\n encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(\n payload))\n result = eth_account.Account.recover_message(encoded_msg, signature=sig\n ) == pk\n else:\n result = algosdk.util.verify_bytes(json.dumps(payload).encode(\n 'utf-8'), sig, pk)\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n",
"step-5": "from flask import Flask, request, jsonify\nfrom flask_restful import Api\nimport json\nimport eth_account\nimport algosdk\n\napp = Flask(__name__)\napi = Api(app)\napp.url_map.strict_slashes = False\n\[email protected]('/verify', methods=['GET','POST'])\ndef verify():\n content = request.get_json(silent=True, force=True)\n #Check if signature is valid\n print(content)\n if content == None:\n return jsonify(\"No json data is sent.\")\n sig = content.get('sig')\n payload = content.get('payload')\n message = payload.get('message')\n pk = payload.get('pk')\n platform = payload.get('platform')\n if platform == \"Ethereum\":\n encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(payload))\n result = eth_account.Account.recover_message(encoded_msg,signature=sig) == pk\n else:\n result = algosdk.util.verify_bytes(json.dumps(payload).encode('utf-8'), sig, pk)\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
STOP_WORDS = set(
"""
あそこ
あたり
あちら
あっち
あと
あな
あなた
あれ
いくつ
いつ
いま
いや
いろいろ
うち
おおまか
おまえ
おれ
がい
かく
かたち
かやの
から
がら
きた
くせ
ここ
こっち
こと
ごと
こちら
ごっちゃ
これ
これら
ごろ
さまざま
さらい
さん
しかた
しよう
すか
ずつ
すね
すべて
ぜんぶ
そう
そこ
そちら
そっち
そで
それ
それぞれ
それなり
たくさん
たち
たび
ため
だめ
ちゃ
ちゃん
てん
とおり
とき
どこ
どこか
ところ
どちら
どっか
どっち
どれ
なか
なかば
なに
など
なん
はじめ
はず
はるか
ひと
ひとつ
ふく
ぶり
べつ
へん
ぺん
ほう
ほか
まさ
まし
まとも
まま
みたい
みつ
みなさん
みんな
もと
もの
もん
やつ
よう
よそ
わけ
わたし
ハイ
上
中
下
字
年
月
日
時
分
秒
週
火
水
木
金
土
国
都
道
府
県
市
区
町
村
各
第
方
何
的
度
文
者
性
体
人
他
今
部
課
係
外
類
達
気
室
口
誰
用
界
会
首
男
女
別
話
私
屋
店
家
場
等
見
際
観
段
略
例
系
論
形
間
地
員
線
点
書
品
力
法
感
作
元
手
数
彼
彼女
子
内
楽
喜
怒
哀
輪
頃
化
境
俺
奴
高
校
婦
伸
紀
誌
レ
行
列
事
士
台
集
様
所
歴
器
名
情
連
毎
式
簿
回
匹
個
席
束
歳
目
通
面
円
玉
枚
前
後
左
右
次
先
春
夏
秋
冬
一
二
三
四
五
六
七
八
九
十
百
千
万
億
兆
下記
上記
時間
今回
前回
場合
一つ
年生
自分
ヶ所
ヵ所
カ所
箇所
ヶ月
ヵ月
カ月
箇月
名前
本当
確か
時点
全部
関係
近く
方法
我々
違い
多く
扱い
新た
その後
半ば
結局
様々
以前
以後
以降
未満
以上
以下
幾つ
毎日
自体
向こう
何人
手段
同じ
感じ
"""
.split())
<|reserved_special_token_1|>
"""Copied from http://svn.sourceforge.jp/svnroot/slothlib/CSharp/Version1/SlothLib/NLP/Filter/StopWord/word/Japanese.txt"""
STOP_WORDS = set(
"""
あそこ
あたり
あちら
あっち
あと
あな
あなた
あれ
いくつ
いつ
いま
いや
いろいろ
うち
おおまか
おまえ
おれ
がい
かく
かたち
かやの
から
がら
きた
くせ
ここ
こっち
こと
ごと
こちら
ごっちゃ
これ
これら
ごろ
さまざま
さらい
さん
しかた
しよう
すか
ずつ
すね
すべて
ぜんぶ
そう
そこ
そちら
そっち
そで
それ
それぞれ
それなり
たくさん
たち
たび
ため
だめ
ちゃ
ちゃん
てん
とおり
とき
どこ
どこか
ところ
どちら
どっか
どっち
どれ
なか
なかば
なに
など
なん
はじめ
はず
はるか
ひと
ひとつ
ふく
ぶり
べつ
へん
ぺん
ほう
ほか
まさ
まし
まとも
まま
みたい
みつ
みなさん
みんな
もと
もの
もん
やつ
よう
よそ
わけ
わたし
ハイ
上
中
下
字
年
月
日
時
分
秒
週
火
水
木
金
土
国
都
道
府
県
市
区
町
村
各
第
方
何
的
度
文
者
性
体
人
他
今
部
課
係
外
類
達
気
室
口
誰
用
界
会
首
男
女
別
話
私
屋
店
家
場
等
見
際
観
段
略
例
系
論
形
間
地
員
線
点
書
品
力
法
感
作
元
手
数
彼
彼女
子
内
楽
喜
怒
哀
輪
頃
化
境
俺
奴
高
校
婦
伸
紀
誌
レ
行
列
事
士
台
集
様
所
歴
器
名
情
連
毎
式
簿
回
匹
個
席
束
歳
目
通
面
円
玉
枚
前
後
左
右
次
先
春
夏
秋
冬
一
二
三
四
五
六
七
八
九
十
百
千
万
億
兆
下記
上記
時間
今回
前回
場合
一つ
年生
自分
ヶ所
ヵ所
カ所
箇所
ヶ月
ヵ月
カ月
箇月
名前
本当
確か
時点
全部
関係
近く
方法
我々
違い
多く
扱い
新た
その後
半ば
結局
様々
以前
以後
以降
未満
以上
以下
幾つ
毎日
自体
向こう
何人
手段
同じ
感じ
""".split()
)
|
flexible
|
{
"blob_id": "254afebcc909c805d1e4972a0910eb4451d1e64e",
"index": 8704,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nSTOP_WORDS = set(\n \"\"\"\nあそこ\nあたり\nあちら\nあっち\nあと\nあな\nあなた\nあれ\nいくつ\nいつ\nいま\nいや\nいろいろ\nうち\nおおまか\nおまえ\nおれ\nがい\nかく\nかたち\nかやの\nから\nがら\nきた\nくせ\nここ\nこっち\nこと\nごと\nこちら\nごっちゃ\nこれ\nこれら\nごろ\nさまざま\nさらい\nさん\nしかた\nしよう\nすか\nずつ\nすね\nすべて\nぜんぶ\nそう\nそこ\nそちら\nそっち\nそで\nそれ\nそれぞれ\nそれなり\nたくさん\nたち\nたび\nため\nだめ\nちゃ\nちゃん\nてん\nとおり\nとき\nどこ\nどこか\nところ\nどちら\nどっか\nどっち\nどれ\nなか\nなかば\nなに\nなど\nなん\nはじめ\nはず\nはるか\nひと\nひとつ\nふく\nぶり\nべつ\nへん\nぺん\nほう\nほか\nまさ\nまし\nまとも\nまま\nみたい\nみつ\nみなさん\nみんな\nもと\nもの\nもん\nやつ\nよう\nよそ\nわけ\nわたし\nハイ\n上\n中\n下\n字\n年\n月\n日\n時\n分\n秒\n週\n火\n水\n木\n金\n土\n国\n都\n道\n府\n県\n市\n区\n町\n村\n\n\n各\n第\n方\n何\n的\n度\n文\n者\n性\n体\n人\n他\n今\n部\n課\n係\n外\n類\n達\n気\n室\n口\n誰\n用\n界\n会\n首\n男\n女\n別\n話\n私\n屋\n店\n家\n場\n等\n見\n際\n観\n段\n略\n例\n系\n論\n形\n間\n地\n員\n線\n点\n書\n品\n力\n法\n感\n作\n元\n手\n数\n彼\n彼女\n子\n内\n楽\n喜\n怒\n哀\n輪\n頃\n化\n境\n俺\n奴\n高\n校\n婦\n伸\n紀\n誌\nレ\n行\n列\n事\n士\n台\n集\n様\n所\n歴\n器\n名\n情\n連\n毎\n式\n簿\n\n\n\n\n回\n匹\n個\n席\n束\n歳\n目\n通\n面\n円\n玉\n枚\n\n前\n後\n左\n右\n次\n先\n\n春\n夏\n秋\n冬\n\n\n\n一\n二\n三\n四\n五\n六\n七\n八\n九\n十\n百\n千\n万\n億\n兆\n\n\n下記\n上記\n時間\n今回\n前回\n場合\n一つ\n年生\n自分\nヶ所\nヵ所\nカ所\n箇所\nヶ月\nヵ月\nカ月\n箇月\n名前\n本当\n確か\n時点\n全部\n関係\n近く\n方法\n我々\n違い\n多く\n扱い\n新た\nその後\n半ば\n結局\n様々\n以前\n以後\n以降\n未満\n以上\n以下\n幾つ\n毎日\n自体\n向こう\n何人\n手段\n同じ\n感じ\n\"\"\"\n .split())\n",
"step-3": "\"\"\"Copied from http://svn.sourceforge.jp/svnroot/slothlib/CSharp/Version1/SlothLib/NLP/Filter/StopWord/word/Japanese.txt\"\"\"\nSTOP_WORDS = set(\n \"\"\"\nあそこ\nあたり\nあちら\nあっち\nあと\nあな\nあなた\nあれ\nいくつ\nいつ\nいま\nいや\nいろいろ\nうち\nおおまか\nおまえ\nおれ\nがい\nかく\nかたち\nかやの\nから\nがら\nきた\nくせ\nここ\nこっち\nこと\nごと\nこちら\nごっちゃ\nこれ\nこれら\nごろ\nさまざま\nさらい\nさん\nしかた\nしよう\nすか\nずつ\nすね\nすべて\nぜんぶ\nそう\nそこ\nそちら\nそっち\nそで\nそれ\nそれぞれ\nそれなり\nたくさん\nたち\nたび\nため\nだめ\nちゃ\nちゃん\nてん\nとおり\nとき\nどこ\nどこか\nところ\nどちら\nどっか\nどっち\nどれ\nなか\nなかば\nなに\nなど\nなん\nはじめ\nはず\nはるか\nひと\nひとつ\nふく\nぶり\nべつ\nへん\nぺん\nほう\nほか\nまさ\nまし\nまとも\nまま\nみたい\nみつ\nみなさん\nみんな\nもと\nもの\nもん\nやつ\nよう\nよそ\nわけ\nわたし\nハイ\n上\n中\n下\n字\n年\n月\n日\n時\n分\n秒\n週\n火\n水\n木\n金\n土\n国\n都\n道\n府\n県\n市\n区\n町\n村\n\n\n各\n第\n方\n何\n的\n度\n文\n者\n性\n体\n人\n他\n今\n部\n課\n係\n外\n類\n達\n気\n室\n口\n誰\n用\n界\n会\n首\n男\n女\n別\n話\n私\n屋\n店\n家\n場\n等\n見\n際\n観\n段\n略\n例\n系\n論\n形\n間\n地\n員\n線\n点\n書\n品\n力\n法\n感\n作\n元\n手\n数\n彼\n彼女\n子\n内\n楽\n喜\n怒\n哀\n輪\n頃\n化\n境\n俺\n奴\n高\n校\n婦\n伸\n紀\n誌\nレ\n行\n列\n事\n士\n台\n集\n様\n所\n歴\n器\n名\n情\n連\n毎\n式\n簿\n\n\n\n\n回\n匹\n個\n席\n束\n歳\n目\n通\n面\n円\n玉\n枚\n\n前\n後\n左\n右\n次\n先\n\n春\n夏\n秋\n冬\n\n\n\n一\n二\n三\n四\n五\n六\n七\n八\n九\n十\n百\n千\n万\n億\n兆\n\n\n下記\n上記\n時間\n今回\n前回\n場合\n一つ\n年生\n自分\nヶ所\nヵ所\nカ所\n箇所\nヶ月\nヵ月\nカ月\n箇月\n名前\n本当\n確か\n時点\n全部\n関係\n近く\n方法\n我々\n違い\n多く\n扱い\n新た\nその後\n半ば\n結局\n様々\n以前\n以後\n以降\n未満\n以上\n以下\n幾つ\n毎日\n自体\n向こう\n何人\n手段\n同じ\n感じ\n\"\"\".split()\n)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# csv URL
url = "https://covid19-dashboard.ages.at/data/CovidFallzahlen.csv"
# read csv from URL
import pandas as pd
import geopandas as gpd
import numpy as np
df=pd.read_csv(url,sep=";")
df.to_csv("/var/www/FlaskApp/FlaskApp/data/covid_data.csv",sep=";",index=False)
# transforming timestamps to proper DateTime format
import datetime as dt
from datetime import datetime
import time
timestamps = []
for i in df["MeldeDatum"]:
i = i.replace(".","")
i = i.replace(":","")
timestamps.append(dt.datetime.strptime(i, "%d%m%Y %H%M%S"))
df["MeldeDatum"] = timestamps
df = df.drop(["Meldedat"], axis=1)
# get List of State Names
states = list(df["Bundesland"].unique())
# append total hospitalizations to DF
l_temp = []
for a,b in zip(df["FZHosp"],df["FZICU"]):
l_temp.append(a+b)
df["Hospitalizations_total"] = l_temp
# append total ICU capacity to DF
l_temp = []
for a,b in zip(df["FZICU"],df["FZICUFree"]):
l_temp.append(a+b)
df["ICU_capacity"] = l_temp
# append ICU occupancy percentages to DF
l_temp = []
for a,b in zip(df["FZICU"],df["ICU_capacity"]):
try:
l_temp.append(100.0 * float(a)/float(b))
except ZeroDivisionError:
l_temp.append(0.0)
df["ICU_perc"] = l_temp
# create list of dataframes by Bundesland
ls_df = []
for i in states:
temp = df[df["Bundesland"]==i]
ls_df.append(temp)
# importing adm0 and adm1 shapefilesas geopandas dataframes
adm1 = gpd.read_file("/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_1.shp")
adm0 = gpd.read_file("/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_0.shp")
#writing to json
#adm1.to_file("data/austria_adm1.geojson", driver="GeoJSON")
#adm0.to_file("data/austria_adm0.geojson", driver="GeoJSON")
# save CSV after manipulating & rounding
df = df.round(1)
df.to_csv("/var/www/FlaskApp/FlaskApp/data/ICU_data.csv")
# create most recent DF for map
most_recent_date = df['MeldeDatum'].max()
df2 = df.loc[df['MeldeDatum'] == most_recent_date]
df2.to_pickle("/var/www/FlaskApp/FlaskApp/data/df2.pkl")
# join geometries with most recent data per state
df_map =gpd.read_file("/var/www/FlaskApp/FlaskApp/data/austria_adm1.geojson")
df_map["Bundesland"] = df_map["NAME_1"]
df_map = pd.merge(df2,df_map,on="Bundesland")
df_map = gpd.GeoDataFrame(df_map, geometry="geometry")
df_map.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_map.pkl")
# drop unused columns and save file in data folder
df_map.drop(["BundeslandID","GID_0","NAME_0","NAME_1","GID_1","VARNAME_1","NL_NAME_1","TYPE_1","ENGTYPE_1","CC_1","HASC_1","test_value"],axis=1).to_csv("/var/www/FlaskApp/FlaskApp/data/df_map.csv",index=False)
"""
CREATE DFs FOR UPDATE GRAPHS
"""
df_perc = pd.DataFrame({
"MeldeDatum": np.asarray(df.loc[df['Bundesland'] == "Alle"]["MeldeDatum"]),
"Alle": np.asarray(df.loc[df['Bundesland'] == "Alle"]["ICU_perc"]),
"Burgenland": np.asarray(df.loc[df["Bundesland"] == "Burgenland"]["ICU_perc"]),
"Kärnten": np.asarray(df.loc[df['Bundesland'] == "Kärnten"]["ICU_perc"]),
"Niederösterreich": np.asarray(df.loc[df["Bundesland"] == "Niederösterreich"]["ICU_perc"]),
"Oberösterreich": np.asarray(df.loc[df['Bundesland'] == "Oberösterreich"]["ICU_perc"]),
"Salzburg": np.asarray(df.loc[df["Bundesland"] == "Salzburg"]["ICU_perc"]),
"Steiermark": np.asarray(df.loc[df['Bundesland'] == "Steiermark"]["ICU_perc"]),
"Tirol": np.asarray(df.loc[df["Bundesland"] == "Tirol"]["ICU_perc"]),
"Vorarlberg": np.asarray(df.loc[df['Bundesland'] == "Vorarlberg"]["ICU_perc"]),
"Wien": np.asarray(df.loc[df["Bundesland"] == "Wien"]["ICU_perc"]),
})
df_perc.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_perc.pkl")
df_FZICU = pd.DataFrame({
"MeldeDatum": np.asarray(df.loc[df['Bundesland'] == "Alle"]["MeldeDatum"]),
"Alle": np.asarray(df.loc[df['Bundesland'] == "Alle"]["FZICU"]),
"Burgenland": np.asarray(df.loc[df["Bundesland"] == "Burgenland"]["FZICU"]),
"Kärnten": np.asarray(df.loc[df['Bundesland'] == "Kärnten"]["FZICU"]),
"Niederösterreich": np.asarray(df.loc[df["Bundesland"] == "Niederösterreich"]["FZICU"]),
"Oberösterreich": np.asarray(df.loc[df['Bundesland'] == "Oberösterreich"]["FZICU"]),
"Salzburg": np.asarray(df.loc[df["Bundesland"] == "Salzburg"]["FZICU"]),
"Steiermark": np.asarray(df.loc[df['Bundesland'] == "Steiermark"]["FZICU"]),
"Tirol": np.asarray(df.loc[df["Bundesland"] == "Tirol"]["FZICU"]),
"Vorarlberg": np.asarray(df.loc[df['Bundesland'] == "Vorarlberg"]["FZICU"]),
"Wien": np.asarray(df.loc[df["Bundesland"] == "Wien"]["FZICU"]),
})
df_FZICU.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_FZICU.pkl")
df_ICU_cap = pd.DataFrame({
"MeldeDatum": np.asarray(df.loc[df['Bundesland'] == "Alle"]["MeldeDatum"]),
"Alle": np.asarray(df.loc[df['Bundesland'] == "Alle"]["ICU_capacity"]),
"Burgenland": np.asarray(df.loc[df["Bundesland"] == "Burgenland"]["ICU_capacity"]),
"Kärnten": np.asarray(df.loc[df['Bundesland'] == "Kärnten"]["ICU_capacity"]),
"Niederösterreich": np.asarray(df.loc[df["Bundesland"] == "Niederösterreich"]["ICU_capacity"]),
"Oberösterreich": np.asarray(df.loc[df['Bundesland'] == "Oberösterreich"]["ICU_capacity"]),
"Salzburg": np.asarray(df.loc[df["Bundesland"] == "Salzburg"]["ICU_capacity"]),
"Steiermark": np.asarray(df.loc[df['Bundesland'] == "Steiermark"]["ICU_capacity"]),
"Tirol": np.asarray(df.loc[df["Bundesland"] == "Tirol"]["ICU_capacity"]),
"Vorarlberg": np.asarray(df.loc[df['Bundesland'] == "Vorarlberg"]["ICU_capacity"]),
"Wien": np.asarray(df.loc[df["Bundesland"] == "Wien"]["ICU_capacity"]),
})
df_ICU_cap.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_ICU_cap.pkl")
# Writing to logfile
file_object = open('/var/www/FlaskApp/FlaskApp/log.txt', 'a')
now = datetime.now() # current date and time
date_time = now.strftime("%m/%d/%Y, %H:%M:%S")
file_object.write('Success: '+date_time+"\n")
file_object.close()
"""
DB CONNECTOR
"""
# DB create string from csv for COVID data
import csv
with open('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', 'r') as f:
instr = ""
reader = csv.reader(f,delimiter=";")
#print(reader)
next(reader) # Skip the header row.
for row in reader:
instr=instr+("INSERT INTO icu_data VALUES ('"+str(row[0])+"','"+str(row[1])+"','"+str(row[2])+"','"+str(row[3])+"','"+str(row[4])+"','"+str(row[5])+"','"+str(row[6])+"','"+str(row[7])+"','"+str(row[8])+"');" )
# DB create string from csv for MAP data
import csv
import sys
csv.field_size_limit(sys.maxsize)
with open('/var/www/FlaskApp/FlaskApp/data/df_map.csv', 'r') as f:
instr_map = ""
reader = csv.reader(f,delimiter=",")
#print(reader)
next(reader) # Skip the header row.
for row in reader:
instr_map=instr_map+("INSERT INTO icu_map VALUES ('"+str(row[0])+"','"+str(row[1])+"','"+str(row[2])+"','"+str(row[3])+"','"+str(row[4])+"','"+str(row[5])+"','"+str(row[6])+"','"+str(row[7])+"','"+str(row[8])+"','"+str(row[9])+"','"+str(row[10])+"');" )
""" connecting to DB, parsing SQL statements """
def csv_parser(statement):
import psycopg2
return_ls = []
try:
connection = psycopg2.connect(user="icu_bot",
password="5B2xwP8h4Ln4Y8Xs",
host="85.214.150.208",
port="5432",
database="ICU")
cursor = connection.cursor()
sql_Query = statement
#print(sql_Query)
cursor.execute(sql_Query)
connection.commit()
#print("Selecting rows from mobile table using cursor.fetchall")
#mobile_records = cursor.fetchall()
#print("Print each row and it's columns values")
#for row in mobile_records:
# return_ls.append(list(row))
except (Exception, psycopg2.Error) as error :
print ("Error while fetching data from PostgreSQL: ", error)
finally:
#closing database connection.
if(connection):
cursor.close()
connection.close()
#print("PostgreSQL connection is closed")
return return_ls
# update database in postgis
csv_parser("DELETE FROM icu_data")
csv_parser(instr)
# Update map data in server
csv_parser("DELETE FROM icu_map")
csv_parser(instr_map)
"""
GeoServer Connector
"""
try:
df_geojson = pd.read_json("https://zgis187.geo.sbg.ac.at/geoserver/IPSDI_WT20/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=IPSDI_WT20%3Aicu_map&maxFeatures=50&outputFormat=application%2Fjson")
df_geojson.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_geojson.pkl")
except:
print("an exception occured connecting to the geoserver")
|
normal
|
{
"blob_id": "516ea681a55255e4c98e7106393180f9ad2e0250",
"index": 8455,
"step-1": "<mask token>\n\n\ndef csv_parser(statement):\n import psycopg2\n return_ls = []\n try:\n connection = psycopg2.connect(user='icu_bot', password=\n '5B2xwP8h4Ln4Y8Xs', host='85.214.150.208', port='5432',\n database='ICU')\n cursor = connection.cursor()\n sql_Query = statement\n cursor.execute(sql_Query)\n connection.commit()\n except (Exception, psycopg2.Error) as error:\n print('Error while fetching data from PostgreSQL: ', error)\n finally:\n if connection:\n cursor.close()\n connection.close()\n return return_ls\n\n\n<mask token>\n",
"step-2": "<mask token>\ndf.to_csv('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', sep=';', index=\n False)\n<mask token>\nfor i in df['MeldeDatum']:\n i = i.replace('.', '')\n i = i.replace(':', '')\n timestamps.append(dt.datetime.strptime(i, '%d%m%Y %H%M%S'))\n<mask token>\nfor a, b in zip(df['FZHosp'], df['FZICU']):\n l_temp.append(a + b)\n<mask token>\nfor a, b in zip(df['FZICU'], df['FZICUFree']):\n l_temp.append(a + b)\n<mask token>\nfor a, b in zip(df['FZICU'], df['ICU_capacity']):\n try:\n l_temp.append(100.0 * float(a) / float(b))\n except ZeroDivisionError:\n l_temp.append(0.0)\n<mask token>\nfor i in states:\n temp = df[df['Bundesland'] == i]\n ls_df.append(temp)\n<mask token>\ndf.to_csv('/var/www/FlaskApp/FlaskApp/data/ICU_data.csv')\n<mask token>\ndf2.to_pickle('/var/www/FlaskApp/FlaskApp/data/df2.pkl')\n<mask token>\ndf_map.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_map.pkl')\ndf_map.drop(['BundeslandID', 'GID_0', 'NAME_0', 'NAME_1', 'GID_1',\n 'VARNAME_1', 'NL_NAME_1', 'TYPE_1', 'ENGTYPE_1', 'CC_1', 'HASC_1',\n 'test_value'], axis=1).to_csv('/var/www/FlaskApp/FlaskApp/data/df_map.csv',\n index=False)\n<mask token>\ndf_perc.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_perc.pkl')\n<mask token>\ndf_FZICU.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_FZICU.pkl')\n<mask token>\ndf_ICU_cap.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_ICU_cap.pkl')\n<mask token>\nfile_object.write('Success: ' + date_time + '\\n')\nfile_object.close()\n<mask token>\nwith open('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', 'r') as f:\n instr = ''\n reader = csv.reader(f, delimiter=';')\n next(reader)\n for row in reader:\n instr = instr + (\"INSERT INTO icu_data VALUES ('\" + str(row[0]) +\n \"','\" + str(row[1]) + \"','\" + str(row[2]) + \"','\" + str(row[3]) +\n \"','\" + str(row[4]) + \"','\" + str(row[5]) + \"','\" + str(row[6]) +\n \"','\" + str(row[7]) + \"','\" + str(row[8]) + \"');\")\n<mask token>\ncsv.field_size_limit(sys.maxsize)\nwith open('/var/www/FlaskApp/FlaskApp/data/df_map.csv', 'r') as f:\n instr_map = ''\n reader = csv.reader(f, delimiter=',')\n next(reader)\n for row in reader:\n instr_map = instr_map + (\"INSERT INTO icu_map VALUES ('\" + str(row[\n 0]) + \"','\" + str(row[1]) + \"','\" + str(row[2]) + \"','\" + str(\n row[3]) + \"','\" + str(row[4]) + \"','\" + str(row[5]) + \"','\" +\n str(row[6]) + \"','\" + str(row[7]) + \"','\" + str(row[8]) + \"','\" +\n str(row[9]) + \"','\" + str(row[10]) + \"');\")\n<mask token>\n\n\ndef csv_parser(statement):\n import psycopg2\n return_ls = []\n try:\n connection = psycopg2.connect(user='icu_bot', password=\n '5B2xwP8h4Ln4Y8Xs', host='85.214.150.208', port='5432',\n database='ICU')\n cursor = connection.cursor()\n sql_Query = statement\n cursor.execute(sql_Query)\n connection.commit()\n except (Exception, psycopg2.Error) as error:\n print('Error while fetching data from PostgreSQL: ', error)\n finally:\n if connection:\n cursor.close()\n connection.close()\n return return_ls\n\n\ncsv_parser('DELETE FROM icu_data')\ncsv_parser(instr)\ncsv_parser('DELETE FROM icu_map')\ncsv_parser(instr_map)\n<mask token>\ntry:\n df_geojson = pd.read_json(\n 'https://zgis187.geo.sbg.ac.at/geoserver/IPSDI_WT20/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=IPSDI_WT20%3Aicu_map&maxFeatures=50&outputFormat=application%2Fjson'\n )\n df_geojson.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_geojson.pkl')\nexcept:\n print('an exception occured connecting to the geoserver')\n",
"step-3": "url = 'https://covid19-dashboard.ages.at/data/CovidFallzahlen.csv'\n<mask token>\ndf = pd.read_csv(url, sep=';')\ndf.to_csv('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', sep=';', index=\n False)\n<mask token>\ntimestamps = []\nfor i in df['MeldeDatum']:\n i = i.replace('.', '')\n i = i.replace(':', '')\n timestamps.append(dt.datetime.strptime(i, '%d%m%Y %H%M%S'))\ndf['MeldeDatum'] = timestamps\ndf = df.drop(['Meldedat'], axis=1)\nstates = list(df['Bundesland'].unique())\nl_temp = []\nfor a, b in zip(df['FZHosp'], df['FZICU']):\n l_temp.append(a + b)\ndf['Hospitalizations_total'] = l_temp\nl_temp = []\nfor a, b in zip(df['FZICU'], df['FZICUFree']):\n l_temp.append(a + b)\ndf['ICU_capacity'] = l_temp\nl_temp = []\nfor a, b in zip(df['FZICU'], df['ICU_capacity']):\n try:\n l_temp.append(100.0 * float(a) / float(b))\n except ZeroDivisionError:\n l_temp.append(0.0)\ndf['ICU_perc'] = l_temp\nls_df = []\nfor i in states:\n temp = df[df['Bundesland'] == i]\n ls_df.append(temp)\nadm1 = gpd.read_file('/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_1.shp')\nadm0 = gpd.read_file('/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_0.shp')\ndf = df.round(1)\ndf.to_csv('/var/www/FlaskApp/FlaskApp/data/ICU_data.csv')\nmost_recent_date = df['MeldeDatum'].max()\ndf2 = df.loc[df['MeldeDatum'] == most_recent_date]\ndf2.to_pickle('/var/www/FlaskApp/FlaskApp/data/df2.pkl')\ndf_map = gpd.read_file('/var/www/FlaskApp/FlaskApp/data/austria_adm1.geojson')\ndf_map['Bundesland'] = df_map['NAME_1']\ndf_map = pd.merge(df2, df_map, on='Bundesland')\ndf_map = gpd.GeoDataFrame(df_map, geometry='geometry')\ndf_map.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_map.pkl')\ndf_map.drop(['BundeslandID', 'GID_0', 'NAME_0', 'NAME_1', 'GID_1',\n 'VARNAME_1', 'NL_NAME_1', 'TYPE_1', 'ENGTYPE_1', 'CC_1', 'HASC_1',\n 'test_value'], axis=1).to_csv('/var/www/FlaskApp/FlaskApp/data/df_map.csv',\n index=False)\n<mask token>\ndf_perc = pd.DataFrame({'MeldeDatum': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['MeldeDatum']), 'Alle': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['ICU_perc']), 'Burgenland': np.asarray(df.loc[df['Bundesland'] ==\n 'Burgenland']['ICU_perc']), 'Kärnten': np.asarray(df.loc[df[\n 'Bundesland'] == 'Kärnten']['ICU_perc']), 'Niederösterreich': np.\n asarray(df.loc[df['Bundesland'] == 'Niederösterreich']['ICU_perc']),\n 'Oberösterreich': np.asarray(df.loc[df['Bundesland'] ==\n 'Oberösterreich']['ICU_perc']), 'Salzburg': np.asarray(df.loc[df[\n 'Bundesland'] == 'Salzburg']['ICU_perc']), 'Steiermark': np.asarray(df.\n loc[df['Bundesland'] == 'Steiermark']['ICU_perc']), 'Tirol': np.asarray\n (df.loc[df['Bundesland'] == 'Tirol']['ICU_perc']), 'Vorarlberg': np.\n asarray(df.loc[df['Bundesland'] == 'Vorarlberg']['ICU_perc']), 'Wien':\n np.asarray(df.loc[df['Bundesland'] == 'Wien']['ICU_perc'])})\ndf_perc.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_perc.pkl')\ndf_FZICU = pd.DataFrame({'MeldeDatum': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['MeldeDatum']), 'Alle': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['FZICU']), 'Burgenland': np.asarray(df.loc[df['Bundesland'] ==\n 'Burgenland']['FZICU']), 'Kärnten': np.asarray(df.loc[df['Bundesland'] ==\n 'Kärnten']['FZICU']), 'Niederösterreich': np.asarray(df.loc[df[\n 'Bundesland'] == 'Niederösterreich']['FZICU']), 'Oberösterreich': np.\n asarray(df.loc[df['Bundesland'] == 'Oberösterreich']['FZICU']),\n 'Salzburg': np.asarray(df.loc[df['Bundesland'] == 'Salzburg']['FZICU']),\n 'Steiermark': np.asarray(df.loc[df['Bundesland'] == 'Steiermark'][\n 'FZICU']), 'Tirol': np.asarray(df.loc[df['Bundesland'] == 'Tirol'][\n 'FZICU']), 'Vorarlberg': np.asarray(df.loc[df['Bundesland'] ==\n 'Vorarlberg']['FZICU']), 'Wien': np.asarray(df.loc[df['Bundesland'] ==\n 'Wien']['FZICU'])})\ndf_FZICU.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_FZICU.pkl')\ndf_ICU_cap = pd.DataFrame({'MeldeDatum': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['MeldeDatum']), 'Alle': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['ICU_capacity']), 'Burgenland': np.asarray(df.loc[df[\n 'Bundesland'] == 'Burgenland']['ICU_capacity']), 'Kärnten': np.asarray(\n df.loc[df['Bundesland'] == 'Kärnten']['ICU_capacity']),\n 'Niederösterreich': np.asarray(df.loc[df['Bundesland'] ==\n 'Niederösterreich']['ICU_capacity']), 'Oberösterreich': np.asarray(df.\n loc[df['Bundesland'] == 'Oberösterreich']['ICU_capacity']), 'Salzburg':\n np.asarray(df.loc[df['Bundesland'] == 'Salzburg']['ICU_capacity']),\n 'Steiermark': np.asarray(df.loc[df['Bundesland'] == 'Steiermark'][\n 'ICU_capacity']), 'Tirol': np.asarray(df.loc[df['Bundesland'] ==\n 'Tirol']['ICU_capacity']), 'Vorarlberg': np.asarray(df.loc[df[\n 'Bundesland'] == 'Vorarlberg']['ICU_capacity']), 'Wien': np.asarray(df.\n loc[df['Bundesland'] == 'Wien']['ICU_capacity'])})\ndf_ICU_cap.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_ICU_cap.pkl')\nfile_object = open('/var/www/FlaskApp/FlaskApp/log.txt', 'a')\nnow = datetime.now()\ndate_time = now.strftime('%m/%d/%Y, %H:%M:%S')\nfile_object.write('Success: ' + date_time + '\\n')\nfile_object.close()\n<mask token>\nwith open('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', 'r') as f:\n instr = ''\n reader = csv.reader(f, delimiter=';')\n next(reader)\n for row in reader:\n instr = instr + (\"INSERT INTO icu_data VALUES ('\" + str(row[0]) +\n \"','\" + str(row[1]) + \"','\" + str(row[2]) + \"','\" + str(row[3]) +\n \"','\" + str(row[4]) + \"','\" + str(row[5]) + \"','\" + str(row[6]) +\n \"','\" + str(row[7]) + \"','\" + str(row[8]) + \"');\")\n<mask token>\ncsv.field_size_limit(sys.maxsize)\nwith open('/var/www/FlaskApp/FlaskApp/data/df_map.csv', 'r') as f:\n instr_map = ''\n reader = csv.reader(f, delimiter=',')\n next(reader)\n for row in reader:\n instr_map = instr_map + (\"INSERT INTO icu_map VALUES ('\" + str(row[\n 0]) + \"','\" + str(row[1]) + \"','\" + str(row[2]) + \"','\" + str(\n row[3]) + \"','\" + str(row[4]) + \"','\" + str(row[5]) + \"','\" +\n str(row[6]) + \"','\" + str(row[7]) + \"','\" + str(row[8]) + \"','\" +\n str(row[9]) + \"','\" + str(row[10]) + \"');\")\n<mask token>\n\n\ndef csv_parser(statement):\n import psycopg2\n return_ls = []\n try:\n connection = psycopg2.connect(user='icu_bot', password=\n '5B2xwP8h4Ln4Y8Xs', host='85.214.150.208', port='5432',\n database='ICU')\n cursor = connection.cursor()\n sql_Query = statement\n cursor.execute(sql_Query)\n connection.commit()\n except (Exception, psycopg2.Error) as error:\n print('Error while fetching data from PostgreSQL: ', error)\n finally:\n if connection:\n cursor.close()\n connection.close()\n return return_ls\n\n\ncsv_parser('DELETE FROM icu_data')\ncsv_parser(instr)\ncsv_parser('DELETE FROM icu_map')\ncsv_parser(instr_map)\n<mask token>\ntry:\n df_geojson = pd.read_json(\n 'https://zgis187.geo.sbg.ac.at/geoserver/IPSDI_WT20/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=IPSDI_WT20%3Aicu_map&maxFeatures=50&outputFormat=application%2Fjson'\n )\n df_geojson.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_geojson.pkl')\nexcept:\n print('an exception occured connecting to the geoserver')\n",
"step-4": "url = 'https://covid19-dashboard.ages.at/data/CovidFallzahlen.csv'\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\ndf = pd.read_csv(url, sep=';')\ndf.to_csv('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', sep=';', index=\n False)\nimport datetime as dt\nfrom datetime import datetime\nimport time\ntimestamps = []\nfor i in df['MeldeDatum']:\n i = i.replace('.', '')\n i = i.replace(':', '')\n timestamps.append(dt.datetime.strptime(i, '%d%m%Y %H%M%S'))\ndf['MeldeDatum'] = timestamps\ndf = df.drop(['Meldedat'], axis=1)\nstates = list(df['Bundesland'].unique())\nl_temp = []\nfor a, b in zip(df['FZHosp'], df['FZICU']):\n l_temp.append(a + b)\ndf['Hospitalizations_total'] = l_temp\nl_temp = []\nfor a, b in zip(df['FZICU'], df['FZICUFree']):\n l_temp.append(a + b)\ndf['ICU_capacity'] = l_temp\nl_temp = []\nfor a, b in zip(df['FZICU'], df['ICU_capacity']):\n try:\n l_temp.append(100.0 * float(a) / float(b))\n except ZeroDivisionError:\n l_temp.append(0.0)\ndf['ICU_perc'] = l_temp\nls_df = []\nfor i in states:\n temp = df[df['Bundesland'] == i]\n ls_df.append(temp)\nadm1 = gpd.read_file('/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_1.shp')\nadm0 = gpd.read_file('/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_0.shp')\ndf = df.round(1)\ndf.to_csv('/var/www/FlaskApp/FlaskApp/data/ICU_data.csv')\nmost_recent_date = df['MeldeDatum'].max()\ndf2 = df.loc[df['MeldeDatum'] == most_recent_date]\ndf2.to_pickle('/var/www/FlaskApp/FlaskApp/data/df2.pkl')\ndf_map = gpd.read_file('/var/www/FlaskApp/FlaskApp/data/austria_adm1.geojson')\ndf_map['Bundesland'] = df_map['NAME_1']\ndf_map = pd.merge(df2, df_map, on='Bundesland')\ndf_map = gpd.GeoDataFrame(df_map, geometry='geometry')\ndf_map.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_map.pkl')\ndf_map.drop(['BundeslandID', 'GID_0', 'NAME_0', 'NAME_1', 'GID_1',\n 'VARNAME_1', 'NL_NAME_1', 'TYPE_1', 'ENGTYPE_1', 'CC_1', 'HASC_1',\n 'test_value'], axis=1).to_csv('/var/www/FlaskApp/FlaskApp/data/df_map.csv',\n index=False)\n<mask token>\ndf_perc = pd.DataFrame({'MeldeDatum': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['MeldeDatum']), 'Alle': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['ICU_perc']), 'Burgenland': np.asarray(df.loc[df['Bundesland'] ==\n 'Burgenland']['ICU_perc']), 'Kärnten': np.asarray(df.loc[df[\n 'Bundesland'] == 'Kärnten']['ICU_perc']), 'Niederösterreich': np.\n asarray(df.loc[df['Bundesland'] == 'Niederösterreich']['ICU_perc']),\n 'Oberösterreich': np.asarray(df.loc[df['Bundesland'] ==\n 'Oberösterreich']['ICU_perc']), 'Salzburg': np.asarray(df.loc[df[\n 'Bundesland'] == 'Salzburg']['ICU_perc']), 'Steiermark': np.asarray(df.\n loc[df['Bundesland'] == 'Steiermark']['ICU_perc']), 'Tirol': np.asarray\n (df.loc[df['Bundesland'] == 'Tirol']['ICU_perc']), 'Vorarlberg': np.\n asarray(df.loc[df['Bundesland'] == 'Vorarlberg']['ICU_perc']), 'Wien':\n np.asarray(df.loc[df['Bundesland'] == 'Wien']['ICU_perc'])})\ndf_perc.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_perc.pkl')\ndf_FZICU = pd.DataFrame({'MeldeDatum': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['MeldeDatum']), 'Alle': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['FZICU']), 'Burgenland': np.asarray(df.loc[df['Bundesland'] ==\n 'Burgenland']['FZICU']), 'Kärnten': np.asarray(df.loc[df['Bundesland'] ==\n 'Kärnten']['FZICU']), 'Niederösterreich': np.asarray(df.loc[df[\n 'Bundesland'] == 'Niederösterreich']['FZICU']), 'Oberösterreich': np.\n asarray(df.loc[df['Bundesland'] == 'Oberösterreich']['FZICU']),\n 'Salzburg': np.asarray(df.loc[df['Bundesland'] == 'Salzburg']['FZICU']),\n 'Steiermark': np.asarray(df.loc[df['Bundesland'] == 'Steiermark'][\n 'FZICU']), 'Tirol': np.asarray(df.loc[df['Bundesland'] == 'Tirol'][\n 'FZICU']), 'Vorarlberg': np.asarray(df.loc[df['Bundesland'] ==\n 'Vorarlberg']['FZICU']), 'Wien': np.asarray(df.loc[df['Bundesland'] ==\n 'Wien']['FZICU'])})\ndf_FZICU.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_FZICU.pkl')\ndf_ICU_cap = pd.DataFrame({'MeldeDatum': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['MeldeDatum']), 'Alle': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['ICU_capacity']), 'Burgenland': np.asarray(df.loc[df[\n 'Bundesland'] == 'Burgenland']['ICU_capacity']), 'Kärnten': np.asarray(\n df.loc[df['Bundesland'] == 'Kärnten']['ICU_capacity']),\n 'Niederösterreich': np.asarray(df.loc[df['Bundesland'] ==\n 'Niederösterreich']['ICU_capacity']), 'Oberösterreich': np.asarray(df.\n loc[df['Bundesland'] == 'Oberösterreich']['ICU_capacity']), 'Salzburg':\n np.asarray(df.loc[df['Bundesland'] == 'Salzburg']['ICU_capacity']),\n 'Steiermark': np.asarray(df.loc[df['Bundesland'] == 'Steiermark'][\n 'ICU_capacity']), 'Tirol': np.asarray(df.loc[df['Bundesland'] ==\n 'Tirol']['ICU_capacity']), 'Vorarlberg': np.asarray(df.loc[df[\n 'Bundesland'] == 'Vorarlberg']['ICU_capacity']), 'Wien': np.asarray(df.\n loc[df['Bundesland'] == 'Wien']['ICU_capacity'])})\ndf_ICU_cap.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_ICU_cap.pkl')\nfile_object = open('/var/www/FlaskApp/FlaskApp/log.txt', 'a')\nnow = datetime.now()\ndate_time = now.strftime('%m/%d/%Y, %H:%M:%S')\nfile_object.write('Success: ' + date_time + '\\n')\nfile_object.close()\n<mask token>\nimport csv\nwith open('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', 'r') as f:\n instr = ''\n reader = csv.reader(f, delimiter=';')\n next(reader)\n for row in reader:\n instr = instr + (\"INSERT INTO icu_data VALUES ('\" + str(row[0]) +\n \"','\" + str(row[1]) + \"','\" + str(row[2]) + \"','\" + str(row[3]) +\n \"','\" + str(row[4]) + \"','\" + str(row[5]) + \"','\" + str(row[6]) +\n \"','\" + str(row[7]) + \"','\" + str(row[8]) + \"');\")\nimport csv\nimport sys\ncsv.field_size_limit(sys.maxsize)\nwith open('/var/www/FlaskApp/FlaskApp/data/df_map.csv', 'r') as f:\n instr_map = ''\n reader = csv.reader(f, delimiter=',')\n next(reader)\n for row in reader:\n instr_map = instr_map + (\"INSERT INTO icu_map VALUES ('\" + str(row[\n 0]) + \"','\" + str(row[1]) + \"','\" + str(row[2]) + \"','\" + str(\n row[3]) + \"','\" + str(row[4]) + \"','\" + str(row[5]) + \"','\" +\n str(row[6]) + \"','\" + str(row[7]) + \"','\" + str(row[8]) + \"','\" +\n str(row[9]) + \"','\" + str(row[10]) + \"');\")\n<mask token>\n\n\ndef csv_parser(statement):\n import psycopg2\n return_ls = []\n try:\n connection = psycopg2.connect(user='icu_bot', password=\n '5B2xwP8h4Ln4Y8Xs', host='85.214.150.208', port='5432',\n database='ICU')\n cursor = connection.cursor()\n sql_Query = statement\n cursor.execute(sql_Query)\n connection.commit()\n except (Exception, psycopg2.Error) as error:\n print('Error while fetching data from PostgreSQL: ', error)\n finally:\n if connection:\n cursor.close()\n connection.close()\n return return_ls\n\n\ncsv_parser('DELETE FROM icu_data')\ncsv_parser(instr)\ncsv_parser('DELETE FROM icu_map')\ncsv_parser(instr_map)\n<mask token>\ntry:\n df_geojson = pd.read_json(\n 'https://zgis187.geo.sbg.ac.at/geoserver/IPSDI_WT20/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=IPSDI_WT20%3Aicu_map&maxFeatures=50&outputFormat=application%2Fjson'\n )\n df_geojson.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_geojson.pkl')\nexcept:\n print('an exception occured connecting to the geoserver')\n",
"step-5": "# csv URL\nurl = \"https://covid19-dashboard.ages.at/data/CovidFallzahlen.csv\"\n\n# read csv from URL\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\ndf=pd.read_csv(url,sep=\";\")\ndf.to_csv(\"/var/www/FlaskApp/FlaskApp/data/covid_data.csv\",sep=\";\",index=False)\n\n# transforming timestamps to proper DateTime format\nimport datetime as dt\nfrom datetime import datetime\nimport time\ntimestamps = []\nfor i in df[\"MeldeDatum\"]:\n i = i.replace(\".\",\"\")\n i = i.replace(\":\",\"\")\n timestamps.append(dt.datetime.strptime(i, \"%d%m%Y %H%M%S\"))\ndf[\"MeldeDatum\"] = timestamps\ndf = df.drop([\"Meldedat\"], axis=1)\n\n# get List of State Names\nstates = list(df[\"Bundesland\"].unique())\n\n# append total hospitalizations to DF\nl_temp = []\nfor a,b in zip(df[\"FZHosp\"],df[\"FZICU\"]):\n l_temp.append(a+b)\ndf[\"Hospitalizations_total\"] = l_temp\n\n# append total ICU capacity to DF\nl_temp = []\nfor a,b in zip(df[\"FZICU\"],df[\"FZICUFree\"]):\n l_temp.append(a+b)\ndf[\"ICU_capacity\"] = l_temp\n\n# append ICU occupancy percentages to DF\nl_temp = []\nfor a,b in zip(df[\"FZICU\"],df[\"ICU_capacity\"]):\n try:\n l_temp.append(100.0 * float(a)/float(b))\n except ZeroDivisionError:\n l_temp.append(0.0)\ndf[\"ICU_perc\"] = l_temp\n\n# create list of dataframes by Bundesland\nls_df = []\nfor i in states:\n temp = df[df[\"Bundesland\"]==i]\n ls_df.append(temp)\n \n# importing adm0 and adm1 shapefilesas geopandas dataframes\nadm1 = gpd.read_file(\"/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_1.shp\")\nadm0 = gpd.read_file(\"/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_0.shp\")\n\n#writing to json\n#adm1.to_file(\"data/austria_adm1.geojson\", driver=\"GeoJSON\")\n#adm0.to_file(\"data/austria_adm0.geojson\", driver=\"GeoJSON\") \n\n# save CSV after manipulating & rounding\ndf = df.round(1)\ndf.to_csv(\"/var/www/FlaskApp/FlaskApp/data/ICU_data.csv\")\n\n# create most recent DF for map\nmost_recent_date = df['MeldeDatum'].max()\ndf2 = df.loc[df['MeldeDatum'] == most_recent_date]\ndf2.to_pickle(\"/var/www/FlaskApp/FlaskApp/data/df2.pkl\")\n\n# join geometries with most recent data per state\ndf_map =gpd.read_file(\"/var/www/FlaskApp/FlaskApp/data/austria_adm1.geojson\")\ndf_map[\"Bundesland\"] = df_map[\"NAME_1\"]\ndf_map = pd.merge(df2,df_map,on=\"Bundesland\")\ndf_map = gpd.GeoDataFrame(df_map, geometry=\"geometry\")\ndf_map.to_pickle(\"/var/www/FlaskApp/FlaskApp/data/df_map.pkl\")\n# drop unused columns and save file in data folder\ndf_map.drop([\"BundeslandID\",\"GID_0\",\"NAME_0\",\"NAME_1\",\"GID_1\",\"VARNAME_1\",\"NL_NAME_1\",\"TYPE_1\",\"ENGTYPE_1\",\"CC_1\",\"HASC_1\",\"test_value\"],axis=1).to_csv(\"/var/www/FlaskApp/FlaskApp/data/df_map.csv\",index=False)\n\n\n\"\"\"\nCREATE DFs FOR UPDATE GRAPHS\n\"\"\"\ndf_perc = pd.DataFrame({\n \"MeldeDatum\": np.asarray(df.loc[df['Bundesland'] == \"Alle\"][\"MeldeDatum\"]),\n \"Alle\": np.asarray(df.loc[df['Bundesland'] == \"Alle\"][\"ICU_perc\"]),\n \"Burgenland\": np.asarray(df.loc[df[\"Bundesland\"] == \"Burgenland\"][\"ICU_perc\"]),\n \"Kärnten\": np.asarray(df.loc[df['Bundesland'] == \"Kärnten\"][\"ICU_perc\"]),\n \"Niederösterreich\": np.asarray(df.loc[df[\"Bundesland\"] == \"Niederösterreich\"][\"ICU_perc\"]),\n \"Oberösterreich\": np.asarray(df.loc[df['Bundesland'] == \"Oberösterreich\"][\"ICU_perc\"]),\n \"Salzburg\": np.asarray(df.loc[df[\"Bundesland\"] == \"Salzburg\"][\"ICU_perc\"]),\n \"Steiermark\": np.asarray(df.loc[df['Bundesland'] == \"Steiermark\"][\"ICU_perc\"]),\n \"Tirol\": np.asarray(df.loc[df[\"Bundesland\"] == \"Tirol\"][\"ICU_perc\"]),\n \"Vorarlberg\": np.asarray(df.loc[df['Bundesland'] == \"Vorarlberg\"][\"ICU_perc\"]),\n \"Wien\": np.asarray(df.loc[df[\"Bundesland\"] == \"Wien\"][\"ICU_perc\"]),\n})\ndf_perc.to_pickle(\"/var/www/FlaskApp/FlaskApp/data/df_perc.pkl\")\n\ndf_FZICU = pd.DataFrame({\n \"MeldeDatum\": np.asarray(df.loc[df['Bundesland'] == \"Alle\"][\"MeldeDatum\"]),\n \"Alle\": np.asarray(df.loc[df['Bundesland'] == \"Alle\"][\"FZICU\"]),\n \"Burgenland\": np.asarray(df.loc[df[\"Bundesland\"] == \"Burgenland\"][\"FZICU\"]),\n \"Kärnten\": np.asarray(df.loc[df['Bundesland'] == \"Kärnten\"][\"FZICU\"]),\n \"Niederösterreich\": np.asarray(df.loc[df[\"Bundesland\"] == \"Niederösterreich\"][\"FZICU\"]),\n \"Oberösterreich\": np.asarray(df.loc[df['Bundesland'] == \"Oberösterreich\"][\"FZICU\"]),\n \"Salzburg\": np.asarray(df.loc[df[\"Bundesland\"] == \"Salzburg\"][\"FZICU\"]),\n \"Steiermark\": np.asarray(df.loc[df['Bundesland'] == \"Steiermark\"][\"FZICU\"]),\n \"Tirol\": np.asarray(df.loc[df[\"Bundesland\"] == \"Tirol\"][\"FZICU\"]),\n \"Vorarlberg\": np.asarray(df.loc[df['Bundesland'] == \"Vorarlberg\"][\"FZICU\"]),\n \"Wien\": np.asarray(df.loc[df[\"Bundesland\"] == \"Wien\"][\"FZICU\"]),\n})\ndf_FZICU.to_pickle(\"/var/www/FlaskApp/FlaskApp/data/df_FZICU.pkl\")\n\ndf_ICU_cap = pd.DataFrame({\n \"MeldeDatum\": np.asarray(df.loc[df['Bundesland'] == \"Alle\"][\"MeldeDatum\"]),\n \"Alle\": np.asarray(df.loc[df['Bundesland'] == \"Alle\"][\"ICU_capacity\"]),\n \"Burgenland\": np.asarray(df.loc[df[\"Bundesland\"] == \"Burgenland\"][\"ICU_capacity\"]),\n \"Kärnten\": np.asarray(df.loc[df['Bundesland'] == \"Kärnten\"][\"ICU_capacity\"]),\n \"Niederösterreich\": np.asarray(df.loc[df[\"Bundesland\"] == \"Niederösterreich\"][\"ICU_capacity\"]),\n \"Oberösterreich\": np.asarray(df.loc[df['Bundesland'] == \"Oberösterreich\"][\"ICU_capacity\"]),\n \"Salzburg\": np.asarray(df.loc[df[\"Bundesland\"] == \"Salzburg\"][\"ICU_capacity\"]),\n \"Steiermark\": np.asarray(df.loc[df['Bundesland'] == \"Steiermark\"][\"ICU_capacity\"]),\n \"Tirol\": np.asarray(df.loc[df[\"Bundesland\"] == \"Tirol\"][\"ICU_capacity\"]),\n \"Vorarlberg\": np.asarray(df.loc[df['Bundesland'] == \"Vorarlberg\"][\"ICU_capacity\"]),\n \"Wien\": np.asarray(df.loc[df[\"Bundesland\"] == \"Wien\"][\"ICU_capacity\"]),\n})\ndf_ICU_cap.to_pickle(\"/var/www/FlaskApp/FlaskApp/data/df_ICU_cap.pkl\")\n\n# Writing to logfile\nfile_object = open('/var/www/FlaskApp/FlaskApp/log.txt', 'a')\nnow = datetime.now() # current date and time\ndate_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\nfile_object.write('Success: '+date_time+\"\\n\")\nfile_object.close()\n\n\n\n\"\"\"\n\nDB CONNECTOR\n\n\"\"\"\n\n# DB create string from csv for COVID data\nimport csv\nwith open('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', 'r') as f:\n instr = \"\"\n reader = csv.reader(f,delimiter=\";\")\n #print(reader)\n next(reader) # Skip the header row.\n for row in reader:\n instr=instr+(\"INSERT INTO icu_data VALUES ('\"+str(row[0])+\"','\"+str(row[1])+\"','\"+str(row[2])+\"','\"+str(row[3])+\"','\"+str(row[4])+\"','\"+str(row[5])+\"','\"+str(row[6])+\"','\"+str(row[7])+\"','\"+str(row[8])+\"');\" ) \n\n# DB create string from csv for MAP data\nimport csv\nimport sys\ncsv.field_size_limit(sys.maxsize)\nwith open('/var/www/FlaskApp/FlaskApp/data/df_map.csv', 'r') as f:\n instr_map = \"\"\n reader = csv.reader(f,delimiter=\",\")\n #print(reader)\n next(reader) # Skip the header row.\n for row in reader:\n instr_map=instr_map+(\"INSERT INTO icu_map VALUES ('\"+str(row[0])+\"','\"+str(row[1])+\"','\"+str(row[2])+\"','\"+str(row[3])+\"','\"+str(row[4])+\"','\"+str(row[5])+\"','\"+str(row[6])+\"','\"+str(row[7])+\"','\"+str(row[8])+\"','\"+str(row[9])+\"','\"+str(row[10])+\"');\" )\n\n\"\"\" connecting to DB, parsing SQL statements \"\"\"\ndef csv_parser(statement):\n import psycopg2\n return_ls = []\n try:\n connection = psycopg2.connect(user=\"icu_bot\",\n password=\"5B2xwP8h4Ln4Y8Xs\",\n host=\"85.214.150.208\",\n port=\"5432\",\n database=\"ICU\")\n cursor = connection.cursor()\n sql_Query = statement\n #print(sql_Query)\n cursor.execute(sql_Query)\n connection.commit()\n #print(\"Selecting rows from mobile table using cursor.fetchall\")\n #mobile_records = cursor.fetchall() \n \n #print(\"Print each row and it's columns values\")\n #for row in mobile_records:\n # return_ls.append(list(row))\n \n except (Exception, psycopg2.Error) as error :\n print (\"Error while fetching data from PostgreSQL: \", error)\n \n finally:\n #closing database connection.\n if(connection):\n cursor.close()\n connection.close()\n #print(\"PostgreSQL connection is closed\")\n \n return return_ls\n\n\n# update database in postgis\ncsv_parser(\"DELETE FROM icu_data\")\ncsv_parser(instr)\n\n# Update map data in server\ncsv_parser(\"DELETE FROM icu_map\")\ncsv_parser(instr_map)\n\n\n\n\"\"\"\nGeoServer Connector\n\"\"\"\ntry:\n\tdf_geojson = pd.read_json(\"https://zgis187.geo.sbg.ac.at/geoserver/IPSDI_WT20/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=IPSDI_WT20%3Aicu_map&maxFeatures=50&outputFormat=application%2Fjson\")\n\tdf_geojson.to_pickle(\"/var/www/FlaskApp/FlaskApp/data/df_geojson.pkl\")\nexcept:\n\tprint(\"an exception occured connecting to the geoserver\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solution(files):
ans = []
for i, file in enumerate(files):
head, number, tail = divide(file)
ans.append((head, number, i))
ans.sort(key=lambda x: [x[0], x[1], x[2]])
answer = []
for h, n, i in ans:
answer.append(files[i])
return answer
<|reserved_special_token_1|>
def divide(file):
index = 0
head = ''
while True:
if file[index].isnumeric():
head_index = index
break
if file[index].isalpha():
head += file[index].lower()
else:
head += file[index]
index += 1
while True:
if index >= len(file):
number = int(file[head_index:])
tail = ''
break
if not file[index].isnumeric():
number = int(file[head_index:index])
tail = file[index:]
break
index += 1
return head, number, tail
def solution(files):
ans = []
for i, file in enumerate(files):
head, number, tail = divide(file)
ans.append((head, number, i))
ans.sort(key=lambda x: [x[0], x[1], x[2]])
answer = []
for h, n, i in ans:
answer.append(files[i])
return answer
|
flexible
|
{
"blob_id": "75837ab778e94693151de1c17b59e12f8b2336d3",
"index": 8341,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solution(files):\n ans = []\n for i, file in enumerate(files):\n head, number, tail = divide(file)\n ans.append((head, number, i))\n ans.sort(key=lambda x: [x[0], x[1], x[2]])\n answer = []\n for h, n, i in ans:\n answer.append(files[i])\n return answer\n",
"step-3": "def divide(file):\n index = 0\n head = ''\n while True:\n if file[index].isnumeric():\n head_index = index\n break\n if file[index].isalpha():\n head += file[index].lower()\n else:\n head += file[index]\n index += 1\n while True:\n if index >= len(file):\n number = int(file[head_index:])\n tail = ''\n break\n if not file[index].isnumeric():\n number = int(file[head_index:index])\n tail = file[index:]\n break\n index += 1\n return head, number, tail\n\n\ndef solution(files):\n ans = []\n for i, file in enumerate(files):\n head, number, tail = divide(file)\n ans.append((head, number, i))\n ans.sort(key=lambda x: [x[0], x[1], x[2]])\n answer = []\n for h, n, i in ans:\n answer.append(files[i])\n return answer\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, os.path.abspath('adjust_schedule_function'))
<|reserved_special_token_1|>
import sys, os
sys.path.insert(0, os.path.abspath('adjust_schedule_function'))
<|reserved_special_token_1|>
import sys, os
sys.path.insert(0, os.path.abspath("adjust_schedule_function"))
|
flexible
|
{
"blob_id": "19126e5041841ab1320730ae82d66c6900cf31bd",
"index": 9145,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, os.path.abspath('adjust_schedule_function'))\n",
"step-3": "import sys, os\nsys.path.insert(0, os.path.abspath('adjust_schedule_function'))\n",
"step-4": "import sys, os\n\nsys.path.insert(0, os.path.abspath(\"adjust_schedule_function\"))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def login(username, password):
data = {'login': username, 'pwd': password, 'lang': ''}
r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php',
data=data, allow_redirects=False)
if (r.headers['Location'] ==
'../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect'
):
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def login(username, password):
data = {'login': username, 'pwd': password, 'lang': ''}
r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php',
data=data, allow_redirects=False)
if (r.headers['Location'] ==
'../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect'
):
return False
return True
if login('michelle', 'michelle'):
print('Login Successfull[+]')
<|reserved_special_token_1|>
import requests
def login(username, password):
data = {'login': username, 'pwd': password, 'lang': ''}
r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php',
data=data, allow_redirects=False)
if (r.headers['Location'] ==
'../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect'
):
return False
return True
if login('michelle', 'michelle'):
print('Login Successfull[+]')
<|reserved_special_token_1|>
import requests
def login(username, password):
data = {'login':username,'pwd':password,'lang':''}
r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php', data=data, allow_redirects=False)
if r.headers['Location'] == '../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect':
return False
return True
# import pdb;pdb.set_trace()
if login("michelle", "michelle"):
print("Login Successfull[+]")
|
flexible
|
{
"blob_id": "ae84b449c8919f14954633b14993e6291501bc24",
"index": 1019,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef login(username, password):\n data = {'login': username, 'pwd': password, 'lang': ''}\n r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php',\n data=data, allow_redirects=False)\n if (r.headers['Location'] ==\n '../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect'\n ):\n return False\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef login(username, password):\n data = {'login': username, 'pwd': password, 'lang': ''}\n r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php',\n data=data, allow_redirects=False)\n if (r.headers['Location'] ==\n '../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect'\n ):\n return False\n return True\n\n\nif login('michelle', 'michelle'):\n print('Login Successfull[+]')\n",
"step-4": "import requests\n\n\ndef login(username, password):\n data = {'login': username, 'pwd': password, 'lang': ''}\n r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php',\n data=data, allow_redirects=False)\n if (r.headers['Location'] ==\n '../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect'\n ):\n return False\n return True\n\n\nif login('michelle', 'michelle'):\n print('Login Successfull[+]')\n",
"step-5": "import requests\n\ndef login(username, password):\n data = {'login':username,'pwd':password,'lang':''}\n r = requests.post('http://dms-pit.htb/seeddms51x/seeddms/op/op.Login.php', data=data, allow_redirects=False)\n if r.headers['Location'] == '../out/out.Login.php?msg=Error+signing+in.+User+ID+or+password+incorrect':\n return False\n return True\n # import pdb;pdb.set_trace()\n\n\nif login(\"michelle\", \"michelle\"):\n print(\"Login Successfull[+]\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
finalImg.save('Q2.jpg')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
filename = 'hw0_data/westbrook.jpg'
im = Image.open(filename)
imgs = np.array(im)
imgsDiv2 = np.trunc(imgs / 2)
imgInt = imgsDiv2.astype(np.int)
imgInt = imgInt[:, :, :3]
finalImg = Image.fromarray(np.uint8(imgInt))
finalImg.save('Q2.jpg')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from PIL import Image
import numpy as np
filename = 'hw0_data/westbrook.jpg'
im = Image.open(filename)
imgs = np.array(im)
imgsDiv2 = np.trunc(imgs / 2)
imgInt = imgsDiv2.astype(np.int)
imgInt = imgInt[:, :, :3]
finalImg = Image.fromarray(np.uint8(imgInt))
finalImg.save('Q2.jpg')
<|reserved_special_token_1|>
#!/usr/bin/env python
#!-*-coding:utf-8 -*-
"""
@version: python3.7
@author: ‘v-enshi‘
@license: Apache Licence
@contact: [email protected]
@site:
@software: PyCharm
@file: Images_fade.py
@time: 2019/1/16 17:17
"""
from PIL import Image
import numpy as np
filename = "hw0_data/westbrook.jpg"
im=Image.open(filename) #open the image
imgs = np.array(im) #transform to array
imgsDiv2 = np.trunc(imgs/2)
imgInt = imgsDiv2.astype(np.int)
imgInt = imgInt[:,:,:3]
finalImg = Image.fromarray(np.uint8(imgInt))
finalImg.save("Q2.jpg")
#注意img如果是uint16的矩阵而不转为uint8的话,Image.fromarray这句会报错
|
flexible
|
{
"blob_id": "6e78d1fb2364d334f47fea89b065d859c025ca2f",
"index": 5648,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfinalImg.save('Q2.jpg')\n",
"step-3": "<mask token>\nfilename = 'hw0_data/westbrook.jpg'\nim = Image.open(filename)\nimgs = np.array(im)\nimgsDiv2 = np.trunc(imgs / 2)\nimgInt = imgsDiv2.astype(np.int)\nimgInt = imgInt[:, :, :3]\nfinalImg = Image.fromarray(np.uint8(imgInt))\nfinalImg.save('Q2.jpg')\n",
"step-4": "<mask token>\nfrom PIL import Image\nimport numpy as np\nfilename = 'hw0_data/westbrook.jpg'\nim = Image.open(filename)\nimgs = np.array(im)\nimgsDiv2 = np.trunc(imgs / 2)\nimgInt = imgsDiv2.astype(np.int)\nimgInt = imgInt[:, :, :3]\nfinalImg = Image.fromarray(np.uint8(imgInt))\nfinalImg.save('Q2.jpg')\n",
"step-5": "#!/usr/bin/env python\n#!-*-coding:utf-8 -*-\n\"\"\"\n@version: python3.7\n@author: ‘v-enshi‘\n@license: Apache Licence \n@contact: [email protected]\n@site: \n@software: PyCharm\n@file: Images_fade.py\n@time: 2019/1/16 17:17\n\"\"\"\nfrom PIL import Image\nimport numpy as np\n\nfilename = \"hw0_data/westbrook.jpg\"\nim=Image.open(filename) #open the image\n\nimgs = np.array(im) #transform to array\n\n\nimgsDiv2 = np.trunc(imgs/2)\nimgInt = imgsDiv2.astype(np.int)\nimgInt = imgInt[:,:,:3]\n\nfinalImg = Image.fromarray(np.uint8(imgInt))\nfinalImg.save(\"Q2.jpg\")\n#注意img如果是uint16的矩阵而不转为uint8的话,Image.fromarray这句会报错\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(proto_images_se_list.shape)
print(proto_images_bse_list.shape)
np.save('Data/SE_prototypes.npy', proto_images_se_list)
np.save('Data/BSE_prototypes.npy', proto_images_bse_list)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
IMG_WIDTH = 768
IMG_HEIGHT = 768
proto_images_se = glob.glob('Clonky-prototypy/*_3*')
proto_images_bse = glob.glob('Clonky-prototypy/*_4*')
proto_images_se_list = crop_reshape(proto_images_se)
proto_images_bse_list = crop_reshape(proto_images_bse)
proto_images_se_list = reshape_normalize(proto_images_se_list, IMG_WIDTH,
IMG_HEIGHT)
proto_images_bse_list = reshape_normalize(proto_images_bse_list, IMG_WIDTH,
IMG_HEIGHT)
print(proto_images_se_list.shape)
print(proto_images_bse_list.shape)
np.save('Data/SE_prototypes.npy', proto_images_se_list)
np.save('Data/BSE_prototypes.npy', proto_images_bse_list)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import glob
import numpy as np
import cv2
from reshape_util import crop_reshape
from reshape_util import reshape_normalize
IMG_WIDTH = 768
IMG_HEIGHT = 768
proto_images_se = glob.glob('Clonky-prototypy/*_3*')
proto_images_bse = glob.glob('Clonky-prototypy/*_4*')
proto_images_se_list = crop_reshape(proto_images_se)
proto_images_bse_list = crop_reshape(proto_images_bse)
proto_images_se_list = reshape_normalize(proto_images_se_list, IMG_WIDTH,
IMG_HEIGHT)
proto_images_bse_list = reshape_normalize(proto_images_bse_list, IMG_WIDTH,
IMG_HEIGHT)
print(proto_images_se_list.shape)
print(proto_images_bse_list.shape)
np.save('Data/SE_prototypes.npy', proto_images_se_list)
np.save('Data/BSE_prototypes.npy', proto_images_bse_list)
<|reserved_special_token_1|>
'''
Copyright (c) 2021, Štěpán Beneš
The purpose of this script it to take the 5 BSE and 5 SE hand-picked prototype
images and turn them into the same shape and format as the rest of the data.
Prototype images are resized to 768x768, the info bar is cropped off. Afterwards
the images are normalized to float32 in range [0,1] and reshaped into Keras Input
shape of (len(images), width, height, 1). Finally they are saved for further use
during anomaly detection with siamese networks.
'''
import glob
import numpy as np
import cv2
from reshape_util import crop_reshape
from reshape_util import reshape_normalize
IMG_WIDTH = 768
IMG_HEIGHT = 768
proto_images_se = glob.glob('Clonky-prototypy/*_3*')
proto_images_bse = glob.glob('Clonky-prototypy/*_4*')
proto_images_se_list = crop_reshape(proto_images_se)
proto_images_bse_list = crop_reshape(proto_images_bse)
proto_images_se_list = reshape_normalize(proto_images_se_list, IMG_WIDTH, IMG_HEIGHT)
proto_images_bse_list = reshape_normalize(proto_images_bse_list, IMG_WIDTH, IMG_HEIGHT)
print(proto_images_se_list.shape)
print(proto_images_bse_list.shape)
np.save("Data/SE_prototypes.npy", proto_images_se_list)
np.save("Data/BSE_prototypes.npy", proto_images_bse_list)
|
flexible
|
{
"blob_id": "af7af5d1048d2b0968e831aad89d5baf30cab608",
"index": 3210,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(proto_images_se_list.shape)\nprint(proto_images_bse_list.shape)\nnp.save('Data/SE_prototypes.npy', proto_images_se_list)\nnp.save('Data/BSE_prototypes.npy', proto_images_bse_list)\n",
"step-3": "<mask token>\nIMG_WIDTH = 768\nIMG_HEIGHT = 768\nproto_images_se = glob.glob('Clonky-prototypy/*_3*')\nproto_images_bse = glob.glob('Clonky-prototypy/*_4*')\nproto_images_se_list = crop_reshape(proto_images_se)\nproto_images_bse_list = crop_reshape(proto_images_bse)\nproto_images_se_list = reshape_normalize(proto_images_se_list, IMG_WIDTH,\n IMG_HEIGHT)\nproto_images_bse_list = reshape_normalize(proto_images_bse_list, IMG_WIDTH,\n IMG_HEIGHT)\nprint(proto_images_se_list.shape)\nprint(proto_images_bse_list.shape)\nnp.save('Data/SE_prototypes.npy', proto_images_se_list)\nnp.save('Data/BSE_prototypes.npy', proto_images_bse_list)\n",
"step-4": "<mask token>\nimport glob\nimport numpy as np\nimport cv2\nfrom reshape_util import crop_reshape\nfrom reshape_util import reshape_normalize\nIMG_WIDTH = 768\nIMG_HEIGHT = 768\nproto_images_se = glob.glob('Clonky-prototypy/*_3*')\nproto_images_bse = glob.glob('Clonky-prototypy/*_4*')\nproto_images_se_list = crop_reshape(proto_images_se)\nproto_images_bse_list = crop_reshape(proto_images_bse)\nproto_images_se_list = reshape_normalize(proto_images_se_list, IMG_WIDTH,\n IMG_HEIGHT)\nproto_images_bse_list = reshape_normalize(proto_images_bse_list, IMG_WIDTH,\n IMG_HEIGHT)\nprint(proto_images_se_list.shape)\nprint(proto_images_bse_list.shape)\nnp.save('Data/SE_prototypes.npy', proto_images_se_list)\nnp.save('Data/BSE_prototypes.npy', proto_images_bse_list)\n",
"step-5": "'''\nCopyright (c) 2021, Štěpán Beneš\n\n\nThe purpose of this script it to take the 5 BSE and 5 SE hand-picked prototype\nimages and turn them into the same shape and format as the rest of the data.\n\nPrototype images are resized to 768x768, the info bar is cropped off. Afterwards\nthe images are normalized to float32 in range [0,1] and reshaped into Keras Input\nshape of (len(images), width, height, 1). Finally they are saved for further use\nduring anomaly detection with siamese networks.\n'''\nimport glob\nimport numpy as np\nimport cv2\n\nfrom reshape_util import crop_reshape\nfrom reshape_util import reshape_normalize\n\n\nIMG_WIDTH = 768\nIMG_HEIGHT = 768\n\nproto_images_se = glob.glob('Clonky-prototypy/*_3*')\nproto_images_bse = glob.glob('Clonky-prototypy/*_4*')\n\nproto_images_se_list = crop_reshape(proto_images_se)\nproto_images_bse_list = crop_reshape(proto_images_bse)\n\nproto_images_se_list = reshape_normalize(proto_images_se_list, IMG_WIDTH, IMG_HEIGHT)\nproto_images_bse_list = reshape_normalize(proto_images_bse_list, IMG_WIDTH, IMG_HEIGHT)\n\nprint(proto_images_se_list.shape)\nprint(proto_images_bse_list.shape)\n\nnp.save(\"Data/SE_prototypes.npy\", proto_images_se_list)\nnp.save(\"Data/BSE_prototypes.npy\", proto_images_bse_list)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def to_string(pessoa):
for linha in pessoa:
print('id: {}\nNome: {}'.format(linha[0], linha[1]))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def to_string(pessoa):
for linha in pessoa:
print('id: {}\nNome: {}'.format(linha[0], linha[1]))
if __name__ == '__main__':
con = sqlite3.connect('lab05-ex01.sqlite')
cursor = con.cursor()
cursor.execute('SELECT * FROM Pessoa')
print(cursor.fetchall())
nome = input('Nome da pessoa: ')
clausula = nome,
cursor.execute('SELECT * FROM Pessoa WHERE nome = ?', clausula)
pessoa = cursor.fetchall()
to_string(pessoa)
cursor.close()
con.close()
<|reserved_special_token_1|>
import sqlite3
def to_string(pessoa):
for linha in pessoa:
print('id: {}\nNome: {}'.format(linha[0], linha[1]))
if __name__ == '__main__':
con = sqlite3.connect('lab05-ex01.sqlite')
cursor = con.cursor()
cursor.execute('SELECT * FROM Pessoa')
print(cursor.fetchall())
nome = input('Nome da pessoa: ')
clausula = nome,
cursor.execute('SELECT * FROM Pessoa WHERE nome = ?', clausula)
pessoa = cursor.fetchall()
to_string(pessoa)
cursor.close()
con.close()
<|reserved_special_token_1|>
import sqlite3
def to_string(pessoa):
for linha in pessoa:
print('id: {}\nNome: {}'.format(linha[0], linha[1]))
if __name__ == '__main__':
con = sqlite3.connect('lab05-ex01.sqlite')
cursor = con.cursor()
cursor.execute("SELECT * FROM Pessoa")
print(cursor.fetchall())
nome = input("Nome da pessoa: ")
clausula = (nome,)
cursor.execute("SELECT * FROM Pessoa WHERE nome = ?", clausula)
pessoa = cursor.fetchall()
to_string(pessoa)
cursor.close()
con.close()
|
flexible
|
{
"blob_id": "4246773a8da61ff21d5faa8ab8ad2d7e75fafb60",
"index": 3058,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef to_string(pessoa):\n for linha in pessoa:\n print('id: {}\\nNome: {}'.format(linha[0], linha[1]))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef to_string(pessoa):\n for linha in pessoa:\n print('id: {}\\nNome: {}'.format(linha[0], linha[1]))\n\n\nif __name__ == '__main__':\n con = sqlite3.connect('lab05-ex01.sqlite')\n cursor = con.cursor()\n cursor.execute('SELECT * FROM Pessoa')\n print(cursor.fetchall())\n nome = input('Nome da pessoa: ')\n clausula = nome,\n cursor.execute('SELECT * FROM Pessoa WHERE nome = ?', clausula)\n pessoa = cursor.fetchall()\n to_string(pessoa)\n cursor.close()\n con.close()\n",
"step-4": "import sqlite3\n\n\ndef to_string(pessoa):\n for linha in pessoa:\n print('id: {}\\nNome: {}'.format(linha[0], linha[1]))\n\n\nif __name__ == '__main__':\n con = sqlite3.connect('lab05-ex01.sqlite')\n cursor = con.cursor()\n cursor.execute('SELECT * FROM Pessoa')\n print(cursor.fetchall())\n nome = input('Nome da pessoa: ')\n clausula = nome,\n cursor.execute('SELECT * FROM Pessoa WHERE nome = ?', clausula)\n pessoa = cursor.fetchall()\n to_string(pessoa)\n cursor.close()\n con.close()\n",
"step-5": "import sqlite3\n\n\ndef to_string(pessoa):\n for linha in pessoa:\n print('id: {}\\nNome: {}'.format(linha[0], linha[1]))\n\nif __name__ == '__main__':\n\n con = sqlite3.connect('lab05-ex01.sqlite')\n\n cursor = con.cursor()\n\n cursor.execute(\"SELECT * FROM Pessoa\")\n print(cursor.fetchall())\n\n nome = input(\"Nome da pessoa: \")\n clausula = (nome,)\n\n cursor.execute(\"SELECT * FROM Pessoa WHERE nome = ?\", clausula)\n pessoa = cursor.fetchall()\n to_string(pessoa)\n\n\n cursor.close()\n con.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import forms
from .models import User,Profile
from django.contrib.auth.forms import UserCreationForm
class ProfileForm(forms.ModelForm):
''' Form for the profile '''
class Meta:
model = Profile
exclude = ('user',) ## we will create the user with the signals
class SignUpForm(UserCreationForm):
''' Sign up form fetching form the User creation form
and the email and password is necessary not the user '''
class Meta:
model = User
fields = ('email','password1','password2')
|
normal
|
{
"blob_id": "7c3569c43d27ba605c0dba420690e18d7f849965",
"index": 7372,
"step-1": "<mask token>\n\n\nclass SignUpForm(UserCreationForm):\n \"\"\" Sign up form fetching form the User creation form\n and the email and password is necessary not the user \"\"\"\n\n\n class Meta:\n model = User\n fields = 'email', 'password1', 'password2'\n",
"step-2": "<mask token>\n\n\nclass ProfileForm(forms.ModelForm):\n <mask token>\n\n\n class Meta:\n model = Profile\n exclude = 'user',\n\n\nclass SignUpForm(UserCreationForm):\n \"\"\" Sign up form fetching form the User creation form\n and the email and password is necessary not the user \"\"\"\n\n\n class Meta:\n model = User\n fields = 'email', 'password1', 'password2'\n",
"step-3": "<mask token>\n\n\nclass ProfileForm(forms.ModelForm):\n \"\"\" Form for the profile \"\"\"\n\n\n class Meta:\n model = Profile\n exclude = 'user',\n\n\nclass SignUpForm(UserCreationForm):\n \"\"\" Sign up form fetching form the User creation form\n and the email and password is necessary not the user \"\"\"\n\n\n class Meta:\n model = User\n fields = 'email', 'password1', 'password2'\n",
"step-4": "from django import forms\nfrom .models import User, Profile\nfrom django.contrib.auth.forms import UserCreationForm\n\n\nclass ProfileForm(forms.ModelForm):\n \"\"\" Form for the profile \"\"\"\n\n\n class Meta:\n model = Profile\n exclude = 'user',\n\n\nclass SignUpForm(UserCreationForm):\n \"\"\" Sign up form fetching form the User creation form\n and the email and password is necessary not the user \"\"\"\n\n\n class Meta:\n model = User\n fields = 'email', 'password1', 'password2'\n",
"step-5": "from django import forms\nfrom .models import User,Profile\nfrom django.contrib.auth.forms import UserCreationForm\n\n\nclass ProfileForm(forms.ModelForm):\n ''' Form for the profile '''\n class Meta:\n model = Profile\n exclude = ('user',) ## we will create the user with the signals\n\n\n\n\nclass SignUpForm(UserCreationForm):\n ''' Sign up form fetching form the User creation form\n and the email and password is necessary not the user '''\n class Meta:\n model = User\n fields = ('email','password1','password2')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pandas as pd
import glob
import string
import os
ALLOWED_CHARS = string.ascii_letters + "-,. \"()'"
def concat_all_data(path : str = 'Data/*.csv', save_path : str = 'Data/final.csv'):
csvs = glob.glob(path)
li = []
for csv in csvs:
df = pd.read_csv(csv)
li.append(df)
final_df = pd.concat(li)
final_df.to_csv(save_path)
def clean_csv(path : str, save_pth : str):
df = pd.read_csv(path)
df = remove_dups_df(df)
df = remove_invalid_rows_df(df)
df.to_csv(save_pth)
def remove_dups_df(df : pd.DataFrame):
df.sort_values("name", inplace = True)
df.drop_duplicates(subset="name", keep=False, inplace=True)
return df
def remove_invalid_rows_df(df : pd.DataFrame):
return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]
df = pd.DataFrame(columns=['count', 'name'])
f = open("fbnames.txt", "r")
count = 0
save_every = 2000
for line in f:
count += 1
split = line.split()
df = df.append({'count':split[0], 'name':split[1].capitalize()}, ignore_index=True)
if count % save_every == 0:
df.to_csv("fbnames.csv")
df.to_csv("fbnames.csv")
files = os.listdir("namesbystate/")
df = pd.DataFrame(columns=['count', 'name'])
count = 0
save_every = 2000
for file in files:
f = open(f"namesbystate\{file}", "r")
count = 0
for line in f:
count += 1
split = line.split(",")
df = df.append({"count":int(split[4]),"name":split[3]}, ignore_index=True)
if save_every % count == 0:
df = df.groupby(['name']).sum()
df.to_csv("namesbystates.csv")
df.groupby(['name']).sum()
df.to_csv("namesbystates.csv")
|
normal
|
{
"blob_id": "0a5e30483c1fde10410c442a1ccd1f79bfb329c8",
"index": 8457,
"step-1": "<mask token>\n\n\ndef concat_all_data(path: str='Data/*.csv', save_path: str='Data/final.csv'):\n csvs = glob.glob(path)\n li = []\n for csv in csvs:\n df = pd.read_csv(csv)\n li.append(df)\n final_df = pd.concat(li)\n final_df.to_csv(save_path)\n\n\ndef clean_csv(path: str, save_pth: str):\n df = pd.read_csv(path)\n df = remove_dups_df(df)\n df = remove_invalid_rows_df(df)\n df.to_csv(save_pth)\n\n\ndef remove_dups_df(df: pd.DataFrame):\n df.sort_values('name', inplace=True)\n df.drop_duplicates(subset='name', keep=False, inplace=True)\n return df\n\n\ndef remove_invalid_rows_df(df: pd.DataFrame):\n return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef concat_all_data(path: str='Data/*.csv', save_path: str='Data/final.csv'):\n csvs = glob.glob(path)\n li = []\n for csv in csvs:\n df = pd.read_csv(csv)\n li.append(df)\n final_df = pd.concat(li)\n final_df.to_csv(save_path)\n\n\ndef clean_csv(path: str, save_pth: str):\n df = pd.read_csv(path)\n df = remove_dups_df(df)\n df = remove_invalid_rows_df(df)\n df.to_csv(save_pth)\n\n\ndef remove_dups_df(df: pd.DataFrame):\n df.sort_values('name', inplace=True)\n df.drop_duplicates(subset='name', keep=False, inplace=True)\n return df\n\n\ndef remove_invalid_rows_df(df: pd.DataFrame):\n return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]\n\n\n<mask token>\nfor line in f:\n count += 1\n split = line.split()\n df = df.append({'count': split[0], 'name': split[1].capitalize()},\n ignore_index=True)\n if count % save_every == 0:\n df.to_csv('fbnames.csv')\ndf.to_csv('fbnames.csv')\n<mask token>\nfor file in files:\n f = open(f'namesbystate\\\\{file}', 'r')\n count = 0\n for line in f:\n count += 1\n split = line.split(',')\n df = df.append({'count': int(split[4]), 'name': split[3]},\n ignore_index=True)\n if save_every % count == 0:\n df = df.groupby(['name']).sum()\n df.to_csv('namesbystates.csv')\ndf.groupby(['name']).sum()\ndf.to_csv('namesbystates.csv')\n",
"step-3": "<mask token>\nALLOWED_CHARS = string.ascii_letters + '-,. \"()\\''\n\n\ndef concat_all_data(path: str='Data/*.csv', save_path: str='Data/final.csv'):\n csvs = glob.glob(path)\n li = []\n for csv in csvs:\n df = pd.read_csv(csv)\n li.append(df)\n final_df = pd.concat(li)\n final_df.to_csv(save_path)\n\n\ndef clean_csv(path: str, save_pth: str):\n df = pd.read_csv(path)\n df = remove_dups_df(df)\n df = remove_invalid_rows_df(df)\n df.to_csv(save_pth)\n\n\ndef remove_dups_df(df: pd.DataFrame):\n df.sort_values('name', inplace=True)\n df.drop_duplicates(subset='name', keep=False, inplace=True)\n return df\n\n\ndef remove_invalid_rows_df(df: pd.DataFrame):\n return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]\n\n\ndf = pd.DataFrame(columns=['count', 'name'])\nf = open('fbnames.txt', 'r')\ncount = 0\nsave_every = 2000\nfor line in f:\n count += 1\n split = line.split()\n df = df.append({'count': split[0], 'name': split[1].capitalize()},\n ignore_index=True)\n if count % save_every == 0:\n df.to_csv('fbnames.csv')\ndf.to_csv('fbnames.csv')\nfiles = os.listdir('namesbystate/')\ndf = pd.DataFrame(columns=['count', 'name'])\ncount = 0\nsave_every = 2000\nfor file in files:\n f = open(f'namesbystate\\\\{file}', 'r')\n count = 0\n for line in f:\n count += 1\n split = line.split(',')\n df = df.append({'count': int(split[4]), 'name': split[3]},\n ignore_index=True)\n if save_every % count == 0:\n df = df.groupby(['name']).sum()\n df.to_csv('namesbystates.csv')\ndf.groupby(['name']).sum()\ndf.to_csv('namesbystates.csv')\n",
"step-4": "import pandas as pd\nimport glob\nimport string\nimport os\nALLOWED_CHARS = string.ascii_letters + '-,. \"()\\''\n\n\ndef concat_all_data(path: str='Data/*.csv', save_path: str='Data/final.csv'):\n csvs = glob.glob(path)\n li = []\n for csv in csvs:\n df = pd.read_csv(csv)\n li.append(df)\n final_df = pd.concat(li)\n final_df.to_csv(save_path)\n\n\ndef clean_csv(path: str, save_pth: str):\n df = pd.read_csv(path)\n df = remove_dups_df(df)\n df = remove_invalid_rows_df(df)\n df.to_csv(save_pth)\n\n\ndef remove_dups_df(df: pd.DataFrame):\n df.sort_values('name', inplace=True)\n df.drop_duplicates(subset='name', keep=False, inplace=True)\n return df\n\n\ndef remove_invalid_rows_df(df: pd.DataFrame):\n return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]\n\n\ndf = pd.DataFrame(columns=['count', 'name'])\nf = open('fbnames.txt', 'r')\ncount = 0\nsave_every = 2000\nfor line in f:\n count += 1\n split = line.split()\n df = df.append({'count': split[0], 'name': split[1].capitalize()},\n ignore_index=True)\n if count % save_every == 0:\n df.to_csv('fbnames.csv')\ndf.to_csv('fbnames.csv')\nfiles = os.listdir('namesbystate/')\ndf = pd.DataFrame(columns=['count', 'name'])\ncount = 0\nsave_every = 2000\nfor file in files:\n f = open(f'namesbystate\\\\{file}', 'r')\n count = 0\n for line in f:\n count += 1\n split = line.split(',')\n df = df.append({'count': int(split[4]), 'name': split[3]},\n ignore_index=True)\n if save_every % count == 0:\n df = df.groupby(['name']).sum()\n df.to_csv('namesbystates.csv')\ndf.groupby(['name']).sum()\ndf.to_csv('namesbystates.csv')\n",
"step-5": "import pandas as pd \nimport glob\nimport string \nimport os\n\nALLOWED_CHARS = string.ascii_letters + \"-,. \\\"()'\"\n\ndef concat_all_data(path : str = 'Data/*.csv', save_path : str = 'Data/final.csv'):\n csvs = glob.glob(path)\n\n li = []\n\n for csv in csvs:\n df = pd.read_csv(csv)\n li.append(df)\n\n final_df = pd.concat(li)\n\n final_df.to_csv(save_path)\n\ndef clean_csv(path : str, save_pth : str):\n df = pd.read_csv(path)\n df = remove_dups_df(df)\n df = remove_invalid_rows_df(df)\n\n df.to_csv(save_pth)\n\ndef remove_dups_df(df : pd.DataFrame):\n df.sort_values(\"name\", inplace = True)\n df.drop_duplicates(subset=\"name\", keep=False, inplace=True)\n\n return df\n\ndef remove_invalid_rows_df(df : pd.DataFrame):\n return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]\n\ndf = pd.DataFrame(columns=['count', 'name'])\n\nf = open(\"fbnames.txt\", \"r\")\ncount = 0\nsave_every = 2000\n\nfor line in f:\n count += 1\n split = line.split()\n df = df.append({'count':split[0], 'name':split[1].capitalize()}, ignore_index=True)\n \n if count % save_every == 0:\n df.to_csv(\"fbnames.csv\")\n\ndf.to_csv(\"fbnames.csv\")\n\n\nfiles = os.listdir(\"namesbystate/\")\n\ndf = pd.DataFrame(columns=['count', 'name'])\n\n\n\ncount = 0\nsave_every = 2000\n\nfor file in files:\n f = open(f\"namesbystate\\{file}\", \"r\")\n count = 0\n for line in f:\n count += 1\n split = line.split(\",\")\n df = df.append({\"count\":int(split[4]),\"name\":split[3]}, ignore_index=True)\n if save_every % count == 0:\n df = df.groupby(['name']).sum()\n df.to_csv(\"namesbystates.csv\")\n\ndf.groupby(['name']).sum()\ndf.to_csv(\"namesbystates.csv\")",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#!/usr/bin/python
from setuptools import setup, find_packages
import os
EXTRAS_REQUIRES = dict(
test=[
'pytest>=2.2.4',
'mock>=0.8.0',
'tempdirs>=0.0.8',
],
dev=[
'ipython>=0.13',
],
)
# Tests always depend on all other requirements, except dev
for k,v in EXTRAS_REQUIRES.iteritems():
if k == 'test' or k == 'dev':
continue
EXTRAS_REQUIRES['test'] += v
# Pypi package documentation
root = os.path.dirname(__file__)
path = os.path.join(root, 'README.rst')
with open(path) as fp:
long_description = fp.read()
setup(
name='linkins',
version='0.0.7.4',
description=(
'Links a directory structure and optionally executes '
'user-defined scripts at each level of the directory '
'hierarchy'
),
long_description=long_description,
author='Andres Buritica',
author_email='[email protected]',
maintainer='Andres Buritica',
maintainer_email='[email protected]',
url='https://github.com/thelinuxkid/linkins',
license='MIT',
packages = find_packages(),
test_suite='nose.collector',
install_requires=[
'setuptools',
],
extras_require=EXTRAS_REQUIRES,
entry_points={
'console_scripts': [
'linkins = linkins.cli:main',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7'
],
)
|
normal
|
{
"blob_id": "f531af47431055866db72f6a7181580da461853d",
"index": 6780,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor k, v in EXTRAS_REQUIRES.iteritems():\n if k == 'test' or k == 'dev':\n continue\n EXTRAS_REQUIRES['test'] += v\n<mask token>\nwith open(path) as fp:\n long_description = fp.read()\nsetup(name='linkins', version='0.0.7.4', description=\n 'Links a directory structure and optionally executes user-defined scripts at each level of the directory hierarchy'\n , long_description=long_description, author='Andres Buritica',\n author_email='[email protected]', maintainer='Andres Buritica',\n maintainer_email='[email protected]', url=\n 'https://github.com/thelinuxkid/linkins', license='MIT', packages=\n find_packages(), test_suite='nose.collector', install_requires=[\n 'setuptools'], extras_require=EXTRAS_REQUIRES, entry_points={\n 'console_scripts': ['linkins = linkins.cli:main']}, classifiers=[\n 'Development Status :: 4 - Beta', 'Intended Audience :: Developers',\n 'Natural Language :: English', 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Programming Language :: Python :: 2.7'])\n",
"step-3": "<mask token>\nEXTRAS_REQUIRES = dict(test=['pytest>=2.2.4', 'mock>=0.8.0',\n 'tempdirs>=0.0.8'], dev=['ipython>=0.13'])\nfor k, v in EXTRAS_REQUIRES.iteritems():\n if k == 'test' or k == 'dev':\n continue\n EXTRAS_REQUIRES['test'] += v\nroot = os.path.dirname(__file__)\npath = os.path.join(root, 'README.rst')\nwith open(path) as fp:\n long_description = fp.read()\nsetup(name='linkins', version='0.0.7.4', description=\n 'Links a directory structure and optionally executes user-defined scripts at each level of the directory hierarchy'\n , long_description=long_description, author='Andres Buritica',\n author_email='[email protected]', maintainer='Andres Buritica',\n maintainer_email='[email protected]', url=\n 'https://github.com/thelinuxkid/linkins', license='MIT', packages=\n find_packages(), test_suite='nose.collector', install_requires=[\n 'setuptools'], extras_require=EXTRAS_REQUIRES, entry_points={\n 'console_scripts': ['linkins = linkins.cli:main']}, classifiers=[\n 'Development Status :: 4 - Beta', 'Intended Audience :: Developers',\n 'Natural Language :: English', 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Programming Language :: Python :: 2.7'])\n",
"step-4": "from setuptools import setup, find_packages\nimport os\nEXTRAS_REQUIRES = dict(test=['pytest>=2.2.4', 'mock>=0.8.0',\n 'tempdirs>=0.0.8'], dev=['ipython>=0.13'])\nfor k, v in EXTRAS_REQUIRES.iteritems():\n if k == 'test' or k == 'dev':\n continue\n EXTRAS_REQUIRES['test'] += v\nroot = os.path.dirname(__file__)\npath = os.path.join(root, 'README.rst')\nwith open(path) as fp:\n long_description = fp.read()\nsetup(name='linkins', version='0.0.7.4', description=\n 'Links a directory structure and optionally executes user-defined scripts at each level of the directory hierarchy'\n , long_description=long_description, author='Andres Buritica',\n author_email='[email protected]', maintainer='Andres Buritica',\n maintainer_email='[email protected]', url=\n 'https://github.com/thelinuxkid/linkins', license='MIT', packages=\n find_packages(), test_suite='nose.collector', install_requires=[\n 'setuptools'], extras_require=EXTRAS_REQUIRES, entry_points={\n 'console_scripts': ['linkins = linkins.cli:main']}, classifiers=[\n 'Development Status :: 4 - Beta', 'Intended Audience :: Developers',\n 'Natural Language :: English', 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Programming Language :: Python :: 2.7'])\n",
"step-5": "#!/usr/bin/python\nfrom setuptools import setup, find_packages\nimport os\n\nEXTRAS_REQUIRES = dict(\n test=[\n 'pytest>=2.2.4',\n 'mock>=0.8.0',\n 'tempdirs>=0.0.8',\n ],\n dev=[\n 'ipython>=0.13',\n ],\n )\n\n# Tests always depend on all other requirements, except dev\nfor k,v in EXTRAS_REQUIRES.iteritems():\n if k == 'test' or k == 'dev':\n continue\n EXTRAS_REQUIRES['test'] += v\n\n# Pypi package documentation\nroot = os.path.dirname(__file__)\npath = os.path.join(root, 'README.rst')\nwith open(path) as fp:\n long_description = fp.read()\n\nsetup(\n name='linkins',\n version='0.0.7.4',\n description=(\n 'Links a directory structure and optionally executes '\n 'user-defined scripts at each level of the directory '\n 'hierarchy'\n ),\n long_description=long_description,\n author='Andres Buritica',\n author_email='[email protected]',\n maintainer='Andres Buritica',\n maintainer_email='[email protected]',\n url='https://github.com/thelinuxkid/linkins',\n license='MIT',\n packages = find_packages(),\n test_suite='nose.collector',\n install_requires=[\n 'setuptools',\n ],\n extras_require=EXTRAS_REQUIRES,\n entry_points={\n 'console_scripts': [\n 'linkins = linkins.cli:main',\n ],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7'\n ],\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
'''
8-6. 도시 이름
도시와 국가 이름을 받는 city_country() 함수를 만드세요. 이 함수는 다음과 같은 문자열을 반환해야 합니다.
'Santiago, Chile'
- 최소한 세 개의 도시-국가 쌍으로 함수를 호출하고 반환값을 출력하세요.
Output:
santiago, chile
ushuaia, argentina
longyearbyen, svalbard
'''
|
flexible
|
{
"blob_id": "2d5abcd75dcbeb1baa3f387035bdcc3b7adbfe3f",
"index": 7856,
"step-1": "<mask token>\n",
"step-2": "'''\n8-6. 도시 이름\n도시와 국가 이름을 받는 city_country() 함수를 만드세요. 이 함수는 다음과 같은 문자열을 반환해야 합니다.\n'Santiago, Chile'\n- 최소한 세 개의 도시-국가 쌍으로 함수를 호출하고 반환값을 출력하세요.\n\nOutput:\nsantiago, chile\nushuaia, argentina\nlongyearbyen, svalbard\n'''\n\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
"""Gaussian mixture model, with Stochastic EM algorithm."""
import numpy as np
from sklearn.mixture.gaussian_mixture import _estimate_gaussian_parameters, _compute_precision_cholesky
from Core.gllim import MyGMM
class SEMGaussianMixture(MyGMM):
"""Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente."""
def _compute_Z_conditionnal_density(self,Y):
"""
Calcule les proba conditionnelles de Z_i sachant Y_i
:param Y: Observations (n_samples,n_features)
:return: matrice stochastique (en ligne) (n_samples,n_components)
"""
proba_cond = np.exp(self._estimate_weighted_log_prob(Y)) # Pi_k * g_k(yi)
s = proba_cond.sum(axis=1)[:,np.newaxis] # sum_k (Pi_k * g_k(yi))
return proba_cond / s #On normalise
def _draw_conditionnal_Z(self,Y):
"""
Tire un échantillon de loi Z sachant Y
:param Y: Observations (n_samples, n_features)
:return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek
"""
M = self._compute_Z_conditionnal_density(Y)
s = M.cumsum(axis=1)
r = np.random.rand(M.shape[0])[:,np.newaxis]
zi = (s < r).sum(axis=1)[:,np.newaxis]
I = np.empty(M.shape)
I[:] = np.arange(M.shape[1])
return (I == zi).astype(float)
def threshold(self,Z,n_features):
pik = Z.sum(axis=0)
return (pik >= (n_features + 1)).prod()
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
while not self.threshold(Z,Y.shape[1]): #Condition de seuil
Z = self._draw_conditionnal_Z(Y)
print("Ajustement au seuil")
n_samples, _ = Y.shape
self.weights_, self.means_, self.covariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar,
self.covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
self._m_step_callback(Y)
class SAEMGaussianMixture(SEMGaussianMixture):
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
super()._print_verbose_msg_iter_end(n_iter,diff_ll)
self.current_iter = n_iter + 1 #Prochaine itération
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
i = 0
while i < 10 and not self.threshold(Z, Y.shape[1]): # Condition de seuil
Z = self._draw_conditionnal_Z(Y)
i += 1
print("Ajustement au seuil")
n_samples, _ = Y.shape
SEMweights_, SEMmeans_, SEMcovariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar,
self.covariance_type))
SEMweights_ /= n_samples
EMweights_, EMmeans_, EMcovariances_ = (
_estimate_gaussian_parameters(Y, np.exp(log_resp), self.reg_covar,
self.covariance_type))
EMweights_ /= n_samples
r = self.current_iter
gr = self.gamma(r)
self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_
self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_
self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
self._m_step_callback(Y)
@staticmethod
def gamma(r):
return 1 / np.sqrt( r + 1)
|
normal
|
{
"blob_id": "39475626b7e3e0f4c8143b300c002a2eb50cc23a",
"index": 9341,
"step-1": "<mask token>\n\n\nclass SEMGaussianMixture(MyGMM):\n <mask token>\n <mask token>\n\n def _draw_conditionnal_Z(self, Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:, np.newaxis]\n zi = (s < r).sum(axis=1)[:, np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self, Z, n_features):\n pik = Z.sum(axis=0)\n return (pik >= n_features + 1).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter, diff_ll)\n self.current_iter = n_iter + 1\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n SEMweights_ /= n_samples\n EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,\n np.exp(log_resp), self.reg_covar, self.covariance_type)\n EMweights_ /= n_samples\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt(r + 1)\n",
"step-2": "<mask token>\n\n\nclass SEMGaussianMixture(MyGMM):\n <mask token>\n\n def _compute_Z_conditionnal_density(self, Y):\n \"\"\"\n Calcule les proba conditionnelles de Z_i sachant Y_i\n :param Y: Observations (n_samples,n_features)\n :return: matrice stochastique (en ligne) (n_samples,n_components)\n \"\"\"\n proba_cond = np.exp(self._estimate_weighted_log_prob(Y))\n s = proba_cond.sum(axis=1)[:, np.newaxis]\n return proba_cond / s\n\n def _draw_conditionnal_Z(self, Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:, np.newaxis]\n zi = (s < r).sum(axis=1)[:, np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self, Z, n_features):\n pik = Z.sum(axis=0)\n return (pik >= n_features + 1).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter, diff_ll)\n self.current_iter = n_iter + 1\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n SEMweights_ /= n_samples\n EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,\n np.exp(log_resp), self.reg_covar, self.covariance_type)\n EMweights_ /= n_samples\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt(r + 1)\n",
"step-3": "<mask token>\n\n\nclass SEMGaussianMixture(MyGMM):\n \"\"\"Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente.\"\"\"\n\n def _compute_Z_conditionnal_density(self, Y):\n \"\"\"\n Calcule les proba conditionnelles de Z_i sachant Y_i\n :param Y: Observations (n_samples,n_features)\n :return: matrice stochastique (en ligne) (n_samples,n_components)\n \"\"\"\n proba_cond = np.exp(self._estimate_weighted_log_prob(Y))\n s = proba_cond.sum(axis=1)[:, np.newaxis]\n return proba_cond / s\n\n def _draw_conditionnal_Z(self, Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:, np.newaxis]\n zi = (s < r).sum(axis=1)[:, np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self, Z, n_features):\n pik = Z.sum(axis=0)\n return (pik >= n_features + 1).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter, diff_ll)\n self.current_iter = n_iter + 1\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n SEMweights_ /= n_samples\n EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,\n np.exp(log_resp), self.reg_covar, self.covariance_type)\n EMweights_ /= n_samples\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt(r + 1)\n",
"step-4": "<mask token>\nimport numpy as np\nfrom sklearn.mixture.gaussian_mixture import _estimate_gaussian_parameters, _compute_precision_cholesky\nfrom Core.gllim import MyGMM\n\n\nclass SEMGaussianMixture(MyGMM):\n \"\"\"Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente.\"\"\"\n\n def _compute_Z_conditionnal_density(self, Y):\n \"\"\"\n Calcule les proba conditionnelles de Z_i sachant Y_i\n :param Y: Observations (n_samples,n_features)\n :return: matrice stochastique (en ligne) (n_samples,n_components)\n \"\"\"\n proba_cond = np.exp(self._estimate_weighted_log_prob(Y))\n s = proba_cond.sum(axis=1)[:, np.newaxis]\n return proba_cond / s\n\n def _draw_conditionnal_Z(self, Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:, np.newaxis]\n zi = (s < r).sum(axis=1)[:, np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self, Z, n_features):\n pik = Z.sum(axis=0)\n return (pik >= n_features + 1).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter, diff_ll)\n self.current_iter = n_iter + 1\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n SEMweights_ /= n_samples\n EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,\n np.exp(log_resp), self.reg_covar, self.covariance_type)\n EMweights_ /= n_samples\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt(r + 1)\n",
"step-5": "\"\"\"Gaussian mixture model, with Stochastic EM algorithm.\"\"\"\n\nimport numpy as np\nfrom sklearn.mixture.gaussian_mixture import _estimate_gaussian_parameters, _compute_precision_cholesky\n\nfrom Core.gllim import MyGMM\n\n\nclass SEMGaussianMixture(MyGMM):\n \"\"\"Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente.\"\"\"\n\n def _compute_Z_conditionnal_density(self,Y):\n \"\"\"\n Calcule les proba conditionnelles de Z_i sachant Y_i\n :param Y: Observations (n_samples,n_features)\n :return: matrice stochastique (en ligne) (n_samples,n_components)\n \"\"\"\n proba_cond = np.exp(self._estimate_weighted_log_prob(Y)) # Pi_k * g_k(yi)\n s = proba_cond.sum(axis=1)[:,np.newaxis] # sum_k (Pi_k * g_k(yi))\n return proba_cond / s #On normalise\n\n def _draw_conditionnal_Z(self,Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:,np.newaxis]\n zi = (s < r).sum(axis=1)[:,np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self,Z,n_features):\n pik = Z.sum(axis=0)\n return (pik >= (n_features + 1)).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z,Y.shape[1]): #Condition de seuil\n Z = self._draw_conditionnal_Z(Y)\n print(\"Ajustement au seuil\")\n\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar,\n self.covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(\n self.covariances_, self.covariance_type)\n\n self._m_step_callback(Y)\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter,diff_ll)\n self.current_iter = n_iter + 1 #Prochaine itération\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]): # Condition de seuil\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print(\"Ajustement au seuil\")\n\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar,\n self.covariance_type))\n SEMweights_ /= n_samples\n\n EMweights_, EMmeans_, EMcovariances_ = (\n _estimate_gaussian_parameters(Y, np.exp(log_resp), self.reg_covar,\n self.covariance_type))\n EMweights_ /= n_samples\n\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n\n self.precisions_cholesky_ = _compute_precision_cholesky(\n self.covariances_, self.covariance_type)\n\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt( r + 1)\n\n",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.