max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
website/rooms/forms.py | KiOui/bussen | 0 | 12791451 | <filename>website/rooms/forms.py
from django import forms
from .models import Room
class RoomCreationForm(forms.Form):
"""Room Creation form."""
room_name = forms.CharField(max_length=128, required=True)
def clean_room_name(self):
"""Clean room name."""
if Room.objects.filter(name=self.cleaned_data.get("room_name")).exists():
self.add_error("room_name", "This room already exists")
raise forms.ValidationError("This room already exists")
return self.cleaned_data.get("room_name")
class PlayerCreationForm(forms.Form):
"""Player Creation form."""
player_name = forms.CharField(max_length=128, required=True)
| 2.90625 | 3 |
clara/transpiler/tree/__init__.py | asergiobranco/clara | 4 | 12791452 | import numpy as np
class DecisionTreeClassifierTranspiler(object):
def __init__(self, model):
self.model = model
self.build_classes()
self.build_feature_idx()
self.build_right_nodes()
self.build_thresholds()
def build_feature_idx(self):
self.features_idx = ','.join(self.model.tree_.feature.astype(str))
def build_classes(self):
class_aux = list(map(lambda x : x[0], self.model.tree_.value))
self.classes = np.argmax(class_aux, axis = 1)
self.classes = ','.join(self.classes.astype(str))
def build_right_nodes(self):
self.right_nodes = ','.join(self.model.tree_.children_right.astype(str)).replace('-1', '0')
def build_thresholds(self):
self.thresholds = ','.join(self.model.tree_.threshold.astype(str))
def generate_code(self):
return """
/*
The following code was generated using Clara.Transpiler. For more information please visit: https://github.com/asergiobranco/clara
*/
#define NO_NODES %s
unsigned char classes[NO_NODES] = {%s};
int FEATURE_IDX_NODE[NO_NODES] = {%s};
int RIGHT_CHILDS[NO_NODES] = {%s};
float THRESHOLDS[NO_NODES] = {%s};
int predict(double * sample){
unsigned int current_node = 0;
int feature_idx = FEATURE_IDX_NODE[0];
while(feature_idx >= 0){
if(sample[feature_idx] <= THRESHOLDS[current_node]){
current_node++;
}
else{
current_node = RIGHT_CHILDS[current_node];
}
feature_idx = FEATURE_IDX_NODE[current_node];
}
return classes[current_node];
}
""" % (self.model.tree_.node_count, self.classes, self.features_idx, self.right_nodes, self.thresholds)
| 2.84375 | 3 |
ordenenumeros.py | EBERTONSCHIPPNIK/Pequenos-codigospy | 0 | 12791453 | lista = [3,2,1]
print(sorted(lista)) | 3.0625 | 3 |
test/test_fakeservertest.py | yimuniao/collectd-cloudwatch | 220 | 12791454 | import unittest
import requests
from helpers.fake_http_server import FakeServer
class FakeServerTest(unittest.TestCase):
SERVER = None
@classmethod
def setUpClass(cls):
cls.SERVER = FakeServer()
cls.SERVER.start_server()
cls.SERVER.serve_forever()
def setUp(self):
self.server = FakeServerTest.SERVER
def test_is_server_alive(self):
self.assertTrue(self.server.is_alive())
self.assertTrue(self.server.is_ready_to_process())
def test_server_process_forever(self):
self.assertTrue(self.server.is_ready_to_process())
send_and_check_request(self.server.get_url(), "request1")
self.assertTrue(self.server.is_ready_to_process())
send_and_check_request(self.server.get_url(), "request2")
self.assertTrue(self.server.is_ready_to_process())
def test_server_overlapped_listeners(self):
self.assertTrue(self.server.is_ready_to_process())
self.assertRaises(FakeServer.ServerStateException, self.server.serve_once)
self.assertRaises(FakeServer.ServerStateException, self.server.serve_forever)
def test_server_start_overlapped_instances(self):
self.assertRaises(FakeServer.ServerStateException, self.server.start_server)
def test_timeout_triggers_only_once_per_call(self):
timeout = 0.3
self.server.set_timeout_delay(timeout)
with self.assertRaises(requests.exceptions.ReadTimeout):
requests.get(self.server.get_url(), timeout=timeout)
requests.get(self.server.get_url(), timeout=timeout)
def test_server_stop_multiple_times(self):
self.server.stop_server()
self.assertRaises(FakeServer.ServerStateException, self.server.stop_server)
self.server.start_server()
self.server.serve_forever()
def test_set_custom_response(self):
expected_response = "Expected Response"
expected_response_code = 404
self.server.set_expected_response(expected_response, expected_response_code)
response = requests.get(self.server.get_url() + "request")
self.assertEquals(expected_response, response.text)
self.assertEquals(expected_response_code, response.status_code)
@classmethod
def tearDownClass(cls):
try:
cls.SERVER.stop_server()
except:
pass
def send_and_check_request(url, request):
url = url + request
response = requests.get(url)
received_request = open(FakeServer.REQUEST_FILE).read()
assert request in received_request[1:] # skip first character which always is '/'
assert response.status_code == FakeServer.DEFAULT_RESPONSE_CODE
assert response.text == FakeServer.DEFAULT_RESPONSE | 2.921875 | 3 |
MPKTA_Final_Project.py | Iqrar99/MPKT-A_Final_Project | 0 | 12791455 | #project akhir mpkta
#semoga lancar
#aminnn
from tkinter import *
lst = []
def readf():
with open('all.txt', 'r') as f:
line = ''
for i in range(68):
while('<deskripsi>' not in line):
line = f.readline()
cmp = ''
txt = ''
while('<end>' not in cmp):
txt += cmp
cmp = f.readline()
lst.append(txt)
line = f.readline()
class DirektoriMakanan():
def __init__(self, master=Tk()):
self.master = master
master.minsize(width = 500, height = 600)
master.maxsize(width = 500, height = 600)
self.master.title("Selamat Datang di McDones (Direktori Macanan Tradisional)")
self.master.judul = Label(self.master, text = "Pilih provinsi yang ingin anda ketahui", font = "Arial 16 bold")
self.master.judul.grid(row = 0, column = 3, columnspan = 8)
self.bprov1 = Button(self.master, text='Aceh', command=self.__prov1, width = 25)
self.bprov1.grid(row = 1, column = 3, columnspan = 4)
self.bprov2 = Button(self.master, text='Sumatera Utara', command=self.__prov2, width = 25)
self.bprov2.grid(row = 2, column = 3, columnspan = 4)
self.bprov3 = Button(self.master, text='Sumatera Barat', command=self.__prov3, width = 25)
self.bprov3.grid(row = 3, column = 3, columnspan = 4)
self.bprov4 = Button(self.master, text='Riau', command=self.__prov4, width = 25)
self.bprov4.grid(row = 4, column = 3, columnspan = 4)
self.bprov5 = Button(self.master, text='Kepulauan Riau', command=self.__prov5, width = 25)
self.bprov5.grid(row = 5, column = 3, columnspan = 4)
self.bprov6 = Button(self.master, text='Jambi', command=self.__prov6, width = 25)
self.bprov6.grid(row = 6, column = 3, columnspan = 4)
self.bprov7 = Button(self.master, text='Bengukulu', command=self.__prov7, width = 25)
self.bprov7.grid(row = 7, column = 3, columnspan = 4)
self.bprov8 = Button(self.master, text='Sumatera Selatan', command=self.__prov8, width = 25)
self.bprov8.grid(row = 8, column = 3, columnspan = 4)
self.bprov9 = Button(self.master, text='Kepulauan Bangka Belitung', command=self.__prov9, width = 25)
self.bprov9.grid(row = 9, column = 3, columnspan = 4)
self.bprov10 = Button(self.master, text='Lampung', command=self.__prov10, width = 25)
self.bprov10.grid(row = 10, column = 3, columnspan = 4)
self.bprov11 = Button(self.master, text='Banten', command=self.__prov11, width = 25)
self.bprov11.grid(row = 11, column = 3, columnspan = 4)
self.bprov12 = Button(self.master, text='Jawa Barat', command=self.__prov12, width = 25)
self.bprov12.grid(row = 12, column = 3, columnspan = 4)
self.bprov13 = Button(self.master, text='DKI Jakarta', command=self.__prov13, width = 25)
self.bprov13.grid(row = 13, column = 3, columnspan = 4)
self.bprov14 = Button(self.master, text='Jawa Tengah', command=self.__prov14, width = 25)
self.bprov14.grid(row = 14, column = 3, columnspan = 4)
self.bprov15 = Button(self.master, text='DI Yogyakarta', command=self.__prov15, width = 25)
self.bprov15.grid(row = 15, column = 3, columnspan = 4)
self.bprov16 = Button(self.master, text='Jawa Timur', command=self.__prov16, width = 25)
self.bprov16.grid(row = 16, column = 3, columnspan = 4)
self.bprov17 = Button(self.master, text='Bali', command=self.__prov17, width = 25)
self.bprov17.grid(row = 17, column = 3, columnspan = 4)
self.bprov18 = Button(self.master, text='NTB', command=self.__prov18, width = 25)
self.bprov18.grid(row = 1, column = 7, columnspan = 4)
self.bprov19 = Button(self.master, text='NTT', command=self.__prov19, width = 25)
self.bprov19.grid(row = 2, column = 7, columnspan = 4)
self.bprov20 = Button(self.master, text='Kalimantan Utara', command=self.__prov20, width = 25)
self.bprov20.grid(row = 3, column = 7, columnspan = 4)
self.bprov21 = Button(self.master, text='Kalimantan Barat', command=self.__prov21, width = 25)
self.bprov21.grid(row = 4, column = 7, columnspan = 4)
self.bprov22 = Button(self.master, text='Kalimantan Tengah', command=self.__prov22, width = 25)
self.bprov22.grid(row = 5, column = 7, columnspan = 4)
self.bprov23 = Button(self.master, text='Kalimantan Selatan', command=self.__prov23, width = 25)
self.bprov23.grid(row = 6, column = 7, columnspan = 4)
self.bprov24 = Button(self.master, text='Kalimantan Timur', command=self.__prov24, width = 25)
self.bprov24.grid(row = 7, column = 7, columnspan = 4)
self.bprov25 = Button(self.master, text='Gorontalo', command=self.__prov25, width = 25)
self.bprov25.grid(row = 8, column = 7, columnspan = 4)
self.bprov26 = Button(self.master, text='Sulawesi Utara', command=self.__prov26, width = 25)
self.bprov26.grid(row = 9, column = 7, columnspan = 4)
self.bprov27 = Button(self.master, text='Sulawesi Barat', command=self.__prov27, width = 25)
self.bprov27.grid(row = 10, column = 7, columnspan = 4)
self.bprov28 = Button(self.master, text='Sulawesi Tengah', command=self.__prov28, width = 25)
self.bprov28.grid(row = 11, column = 7, columnspan = 4)
self.bprov29 = Button(self.master, text='Sulawesi Selatan', command=self.__prov29, width = 25)
self.bprov29.grid(row = 12, column = 7, columnspan = 4)
self.bprov30 = Button(self.master, text='Sulawesi Tenggara', command=self.__prov30, width = 25)
self.bprov30.grid(row = 13, column = 7, columnspan = 4)
self.bprov31 = Button(self.master, text='Maluku Utara', command=self.__prov31, width = 25)
self.bprov31.grid(row = 14, column = 7, columnspan = 4)
self.bprov32 = Button(self.master, text='Maluku', command=self.__prov32, width = 25)
self.bprov32.grid(row = 15, column = 7, columnspan = 4)
self.bprov33 = Button(self.master, text='Papua Barat', command=self.__prov33, width = 25)
self.bprov33.grid(row = 16, column = 7, columnspan = 4)
self.bprov34 = Button(self.master, text='Papua', command=self.__prov34, width = 25)
self.bprov34.grid(row = 17, column = 7, columnspan = 4)
self.master.mainloop()
def __prov1(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Mie Aceh', command=self.aceh1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Kue Timpan', command=self.aceh2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def aceh1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Aceh")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Mie Aceh.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[0]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def aceh2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Aceh")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Kue Timpan.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[1]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov2(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='<NAME>', command=self.sumut1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Bika Ambon', command=self.sumut2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def sumut1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sumatera Utara")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Dekke Na Niura.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[2]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def sumut2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sumatera Utara")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Bika Ambon.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[3]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov3(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Rendang', command=self.sumbar1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Ampiang Dadiah', command=self.sumbar2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def sumbar1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sumatera Barat")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Rendang.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[4]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def sumbar2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sumatera Barat")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Ampiang Dadiah.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[5]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov4(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='<NAME>', command=self.riau1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Asidah', command=self.riau2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def riau1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Riau")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Bolu Kemojo.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[6]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def riau2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Riau")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Asidah.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[7]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov5(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='<NAME>', command=self.kriau1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Gong gong', command=self.kriau2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def kriau1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Kepulauan Riau")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Luti Gendang.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[8]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def kriau2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Kepulauan Riau")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Gong Gong.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[9]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov6(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Tempoyak', command=self.jambi1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='<NAME>', command=self.jambi2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def jambi1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Jambi")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Tempoyak.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[10]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def jambi2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Jambi")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Padamaran.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[11]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov7(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Lepek Binti', command=self.beng1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Bagar Hiu', command=self.beng2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def beng1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Bengkulu")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Lepek Binti.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[12]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def beng2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Bengkulu")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Bagar Hiu.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[13]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov8(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Pempek Palembang', command=self.sumsel1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Tekwan Palembang', command=self.sumsel2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def sumsel1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sumatera Selatan")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Pempek Palembang.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[14]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def sumsel2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sumatera Selatan")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Tekwan Palembang.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[15]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov9(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='<NAME>', command=self.kbang1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Belacan Belitung', command=self.kbang2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def kbang1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Bangka Belitung")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Martabak Bangka.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[16]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def kbang2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Bangka Belitung")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Belaca Belitung.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[17]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov10(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Seruit Lampung', command=self.lamp1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Gulai Taboh', command=self.lamp2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def lamp1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Lampung")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Seruit Lampung.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[18]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def lamp2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Lampung")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Gulai Taboh.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[19]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov11(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Soto Betawi', command=self.dki1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='<NAME>', command=self.dki2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def dki1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional DKI Jakarta")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Soto Betawi.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[20]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def dki2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional DKI Jakarta")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="<NAME>.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[21]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov12(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Nasi Sumsum', command=self.ban1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='<NAME>', command=self.ban2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def ban1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional BAnten")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Nasi Sumsum.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[22]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def ban2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Banten")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Sate Bandeng.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[23]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov13(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='<NAME>', command=self.jabar1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Dorokdok', command=self.jabar2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def jabar1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Jawa Barat")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="<NAME>.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[24]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def jabar2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Jawa Barat")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Dorokdok.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[25]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov14(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Tiwul', command=self.diy1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Jadah Tempe', command=self.diy2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def diy1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional DI Yogyakarta")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Tiwul.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[26]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def diy2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional DI Yogyakarta")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Jadah Tempe.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[27]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov15(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Soto Kudus', command=self.jateng1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Nasi Gerombyang', command=self.jateng2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def jateng1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Jawa TEngah")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Soto Kudus.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[28]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def jateng2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Jawa TEngah")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Nasi Grombyang.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[29]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov16(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Rujak Cingur', command=self.jatim1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Rawon', command=self.jatim2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def jatim1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Jawa Timur")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Rujak Cingur.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[30]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def jatim2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Jawa Timur")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Rawon.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[31]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov17(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='<NAME>', command=self.bali1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='<NAME>u', command=self.bali2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def bali1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Bali")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Sate Lilit.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[32]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def bali2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Bali")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Ayam Betutu.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[33]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov18(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='<NAME>', command=self.ntb1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Sate Bulayak', command=self.ntb2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def ntb1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional NTB")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Ayam Taliwang.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[34]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def ntb2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional NTB")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Sate Bulayak.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[35]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov19(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text="Se'i", command=self.ntt1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Tapa Kolo', command=self.ntt2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def ntt1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional NTT")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Sei.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[36]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def ntt2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional NTT")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Tapa Kolo.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[37]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov20(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Lawa', command=self.kalut1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='<NAME>', command=self.kalut2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def kalut1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Kalimantan Utara")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Lawa.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[38]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def kalut2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Kalimantan Utara")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Kepiting Soka.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[39]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov21(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Bubur Paddas Sambas', command=self.kalbar1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='<NAME>', command=self.kalbar2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def kalbar1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Kalimantan Barat")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Bubur Paddas Sambas.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[40]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def kalbar2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Kalimantan Barat")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="<NAME>oyak.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[41]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov22(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Kalumpe', command=self.kalteng1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='<NAME>', command=self.kalteng2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def kalteng1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Kalimantan Tengha")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Kalumpe.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[42]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def kalteng2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Kalimantan Tengah")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="W<NAME>in.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[43]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov23(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Manday', command=self.kalsel1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Gangan Asam Banjar', command=self.kalsel2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def kalsel1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Kalimantan Selatan")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Manday.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[44]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def kalsel2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Kalimantan Selatan")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Gangan Asam Banjar.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[45]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov24(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='<NAME>', command=self.kaltim1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Nasi Bekekpor', command=self.kaltim2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def kaltim1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Kalimnantan Timur")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Ayam Cincane.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[46]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def kaltim2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Kalimantan Timur")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Nasi Bekepor.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[47]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov25(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Binte Biluhuta', command=self.goron1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Bilenthango', command=self.goron2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def goron1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Gorontalo")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Binte Biluhuta.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[48]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def goron2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Gorontalo")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Bilenthango.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[49]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov26(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Kelapertaar', command=self.sulut1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Tinutuan', command=self.sulut2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def sulut1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sulawesi Utara")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Klapertaart.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[50]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def sulut2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sulawesi Utara")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Tinutuan.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[51]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov27(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Apang Bugis', command=self.sulbar1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Jepa', command=self.sulbar2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def sulbar1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sulawesi Barat ")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Apang Bugis.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[52]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def sulbar2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sulawesi Barat")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Jepa.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[53]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov28(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Uta Kelo', command=self.sulteng1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Kaledo', command=self.sulteng2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def sulteng1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sulawesi Tengah")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Uta Kelo.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[54]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def sulteng2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sulawesi Tengah")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Kaledo.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[55]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov29(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Sop Konro', command=self.sulsel1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Coto Makassar', command=self.sulsel2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def sulsel1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sulawesi Selatan")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Sop Konro.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[56]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def sulsel2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sulawesi Selatan")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Coto Makassar.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[57]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov30(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Sinonggi', command=self.sulgar1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Kasoami', command=self.sulgar2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def sulgar1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sulawesi Tenggara")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Sinonggi.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[58]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def sulgar2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Sulawesi Tenggara")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Kasoami.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[59]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov31(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='<NAME>', command=self.malut1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='<NAME>', command=self.malut2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def malut1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Maluku Utara")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Gatang Kenari.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[60]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def malut2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Maluku Utara")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Nasi Lapola.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[61]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov32(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Sambal Colo-colo', command=self.malu1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Kohu-kohu', command=self.malu2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def malu1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Maluku")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Sambal Colo Colo.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[62]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def malu2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Maluku")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Kohu Kohu.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[63]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov33(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='Ikan Bakar Manokwari', command=self.pabar1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Sate Ulat Sagu', command=self.pabar2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def pabar1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Papua Barat")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Ikan Bakar Manokwari.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[64]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def pabar2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Papua Barat")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Sate Ulat Sagu.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[65]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def __prov34(self):
master0 = Tk()
master0.minsize(width = 450, height = 100)
master0.maxsize(width = 450, height = 100)
master0.title("Direktori Makanan Tradisional Nusantara")
master0.judul = Label(master0, text = "Pilih makanan :)", font = "Arial 16 bold")
master0.judul.grid(row = 0, column = 2, columnspan = 4)
master0.bprov1 = Button(master0, text='<NAME>', command=self.papua1, width = 25, height=3)
master0.bprov1.grid(row = 1, column = 2, columnspan = 4)
master0.bprov2 = Button(master0, text='Papeda', command=self.papua2, width = 25, height=3)
master0.bprov2.grid(row = 1, column = 6, columnspan = 4)
master0.mainloop()
def papua1(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Papua")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Udang Selingkuh.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[66]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
def papua2(self):
master2 = Tk()
master2.minsize(width = 800, height = 600)
master2.maxsize(width = 800, height = 600)
master2.title("Makanan tradisional Papua")
canvas = Canvas(master2, width = 300, height = 300)
canvas.pack()
img = PhotoImage(master = canvas,file="Papeda.png")
canvas.create_image(5,5, anchor=NW, image=img)
text1 = lst[67]
text2 = Text(master2, font = "Arial 12")
text2.insert(INSERT, text1)
text2.pack()
master2.mainloop()
if __name__ == "__main__":
readf()
DirektoriMakanan()
| 3.390625 | 3 |
tests/test_problem_solving_algorithms_warmup.py | mxdzi/hackerrank | 0 | 12791456 | from problem_solving.algorithms.warmup import *
def test_solve_me_first():
assert 5 == q1_solve_me_first.solveMeFirst(2, 3)
assert 1100 == q1_solve_me_first.solveMeFirst(100, 1000)
def test_simple_array_sum():
assert 31 == q2_simple_array_sum.simpleArraySum([1, 2, 3, 4, 10, 11])
def test_compare_the_triplets():
assert [1, 1] == q3_compare_the_triplets.compareTriplets((5, 6, 7), (3, 6, 10))
assert [2, 1] == q3_compare_the_triplets.compareTriplets((17, 28, 30), (99, 16, 8))
def test_a_very_big_sum():
assert 5000000015 == q4_a_very_big_sum.aVeryBigSum([1000000001, 1000000002, 1000000003, 1000000004, 1000000005])
def test_diagonal_difference():
assert 15 == q5_diagonal_difference.diagonalDifference([[11, 2, 4], [4, 5, 6], [10, 8, -12]])
def test_plus_minus(capsys):
q6_plus_minus.plusMinus([-4, 3, -9, 0, 4, 1])
captured = capsys.readouterr()
output = "0.500000\n0.333333\n0.166667\n"
assert captured.out == output
def test_staircase(capsys):
q7_staircase.staircase(6)
captured = capsys.readouterr()
output = " #\n ##\n ###\n ####\n #####\n######\n"
assert captured.out == output
def test_mini_max_sum(capsys):
q8_mini_max_sum.miniMaxSum([1, 2, 3, 4, 5])
captured = capsys.readouterr()
output = "10 14\n"
assert captured.out == output
q8_mini_max_sum.miniMaxSum([7, 69, 2, 221, 8974])
captured = capsys.readouterr()
output = "299 9271\n"
assert captured.out == output
def test_birthday_cake_candles():
assert 2 == q9_birthday_cake_candles.birthdayCakeCandles([3, 2, 1, 3])
def test_time_conversion():
assert "19:05:45" == q10_time_conversion.timeConversion("07:05:45PM")
| 2.828125 | 3 |
izaber_flask_wamp/wamp.py | zaberaki/izaber-flask-wamp | 0 | 12791457 | <filename>izaber_flask_wamp/wamp.py<gh_stars>0
from .app import *
class IZaberFlaskLocalWAMP(object):
""" Allows the creation and sending of calls and functions
"""
def __init__(self,sockets,app):
self.sockets = sockets
self.app = app
self.on_connect = []
self.on_disconnect = []
self.on_authenticated = []
def register(self,uri,options=None):
""" A method to use a decorator to register a callback
"""
def actual_register_decorator(f):
self.app.register_local(uri, f, options)
return f
return actual_register_decorator
def subscribe(self,uri,options=None):
""" A method to use a decorator to subscribe a callback
"""
def actual_subscribe_decorator(f):
self.app.subscribe_local(uri, f, options)
return f
return actual_subscribe_decorator
def publish(self,topic,options=None,args=None,kwargs=None):
self.app.publish(PUBLISH(
options=options or {},
topic=topic,
args=args or [],
kwargs=kwargs or {}
))
def wamp_connect(self):
""" A decorator to attach to when someone connects
"""
return lambda f: self.on_connect.append(f)
def wamp_authenticated(self):
""" A decorator to attach to when someone authenticates
"""
return lambda f: self.on_authenticated.append(f)
def wamp_disconnect(self):
""" A decorator to attach to when someone disconnects
"""
return lambda f: self.on_disconnect.append(f)
def do_wamp_connect(self,client):
""" A decorator to attach to when someone connects
"""
for f in self.on_connect:
f(client)
def do_wamp_authenticated(self,client):
""" A decorator to attach to when someone authenticateds
"""
for f in self.on_authenticated:
f(client)
def do_wamp_disconnect(self,client):
""" A decorator to attach to when someone disconnects
"""
for f in self.on_disconnect:
f(client)
| 2.59375 | 3 |
connectFourLab/game/agents/strategies/monteCarlo.py | yuriharrison/connect-four-lab | 0 | 12791458 | <reponame>yuriharrison/connect-four-lab<gh_stars>0
"""Monte Carlo strategies"""
import math
from copy import copy, deepcopy
from . import Strategy, RandomStrategy, ZobristHashingStrategy
from . import RandomStrategy
from ... import helpers
from ...exceptions import BadImplementation
class SimulationStrategy(RandomStrategy):
"""Simulation Stragegy provide the method necessary
to simulate matches."""
def simulate(self, board, color=-1):
"""Simulate a match to the end from a given
board state. All turns are played randomly till
the board hits a terminal state then the value is
returned.
# Arguments
board: matrix, required, board state to be simulated
color: int, required, id of the owner of the board state
# Return
Id of the winner of the simulation or zero in case
the simulation ends in a draw.
# Example
AgentSimulation: [documentation](./agents#agentsimulation)
"""
winner = helpers.check_winner(board)
if winner:
return winner
sim_board = copy(board)
end = False
while not end:
column = self.random_choice(sim_board)
if column == None: #no options left = tie
break
next_pos = helpers.next_position(sim_board, column)
sim_board[column, next_pos] = color
color = 1 if color == -1 else -1
winner = helpers.check_winner(sim_board)
if winner:
return winner
return 0
class DepthMeasure:
"""Use this class to help when measuring the depth of a
tree search. Useful when debugging and measuring performance
optimization.
# Example
```
class AgentNew(AgentBase, TimerStrategy):
def action(self, board):
DepthMeasure.start()
self.start_timer(...)
while not self.time_out:
# place the line "DepthMeasure.add()" inside the
# search method, when it creates a new depth
self.run_search()
DepthMeasure.reset()
DepthMeasure.print()
return ...
```
"""
current_depth = 0
deepiest = 0
@staticmethod
def start():
"""Reset all the variables to begin a new measurement."""
DepthMeasure.current_depth = 0
DepthMeasure.deepiest = 0
@staticmethod
def add():
"""Add more 1 depth."""
DepthMeasure.current_depth += 1
@staticmethod
def reset():
"""Reset the depth before start a new search episode. Save the
current depth if it's the deepiest till know.
"""
if DepthMeasure.current_depth > DepthMeasure.deepiest:
DepthMeasure.deepiest = DepthMeasure.current_depth
DepthMeasure.current_depth = 0
@staticmethod
def print():
"""Print the deepiest depth reached."""
print('Last play depth:', DepthMeasure.deepiest)
class Node(SimulationStrategy, ZobristHashingStrategy):
"""Node class is a base for a complex node for a Monte Carlo
Tree Searches.
This class has specific methods to perform the Monte Carlo Tree
Search.
This class __don't work on its own__, because it doesn't have a
default board evaluation algorithm. When inherited it's required
to implement the `rollout_score` method which is an evaluation
of a given state.
This class uses a __zobrist hashing table__ to optimize the search.
# Arguments
board: matrix, required, board state
memory: empty dictionary, required(*), default None
- this dictionary will store all searches
with zobrist hashing.
parent: `Node` object, required (**), default None
- Node above in the tree hierarchy.
position: int, required (**), default None
- Index of the column which generated the board
current `board`.
color: int, required (**), default 1
(*) required when creating a new root `Node` object.
(**) required when creating a new "children"
`Node` object (`new_node` method).
# Properties
UCB1: float, UCB1 value (*) of the node.
visits: int, total number of Node visits
value: float, total number of score divided by the
number of visits
- the total number of score is the sum of all
scores made by the Node and his children.
(*) UBC1 is an algorithm which calculates the distribution
of search effort for exploration and exploitation in
the Monte Carlo Tree Search strategy.
# Example
AgentMonteCarlo: [documentation](./agents#agentsimulation)
AgentMCTSNN: [documentation](./agents#agentmctsnn)
"""
def __init__(self, board, memory=None, parent=None, position=None, color=1):
self.board = board
self.color = color
self.position = position
self.parent = parent
self.memory = memory if memory is not None else parent.memory
self.__score = 0
self._visits = 0
self._children = None
super().__init__()
if parent:
self._hash = self._gen_hash()
self._save()
def __init_subclass__(cls):
if 'rollout_score' not in cls.__dict__:
raise BadImplementation(cls.__name__, 'rollout_score')
@property
def UCB1(self):
if self._visits == 0:
return math.inf
lnN = math.log1p(self.parent.visits)
return self.__score/self._visits + 2*math.sqrt(lnN/self._visits)
@property
def visits(self):
return self._visits
@property
def value(self):
if self._visits == 0:
return 0
return self.__score/self._visits
def _save(self):
self.memory[self._hash] = self
def rollout_score(self):
"""This method must return a score (float), evaluation,
for the Node board state (`self.board`), from the
perspective of the player id 1.
This method is called every rollout. The rollout occur
in the first visit of the Node, the value is stored in
the zobrist table and it is __not__ recalculated during the match.
# Return
float, score of the Node `board`
"""
pass
def rollout(self):
"""Node rollout.
It will execute a rollout if this is the first
visits of the Node, otherwise it will return `False`.
Each rollout adds a visit in the counter and the score
of the `board` to the Node and all the parents above
in the tree.
# Return
`True` when the rollout occur, `False` when it do not.
"""
if self.parent and self._visits == 0:
score = self.rollout_score()
self.add_score(score)
self.add_visit()
return True
else:
return False
def add_score(self, value):
self.__score += value
if self.parent:
self.parent.add_score(value)
def add_visit(self):
self._visits += 1
if self.parent:
self.parent.add_visit()
def children(self):
"""Get all the childen Nodes.
Generate or get from the memory, all the childen
Nodes. Each node is generated from the available
possitions in the `board` of the current Node.
"""
# DepthMeasure.add()
if not self._children:
childs = []
board = self.board
for column, row in helpers.available_positions(board):
board[column,row] = self.color
new_board = deepcopy(board)
board[column,row] = 0
node = self._get_memory(new_board, -self.color)
if not node:
node = self.new_node(new_board, column)
childs.append(node)
self._children = childs
return self._children if len(self._children) > 0 else None
def new_node(self, board, column):
"""This method is called by `children` method to
generate a new Node.
# Arguments:
board: matrix, new board state
column: int, index of the last column
# Return
A new instance of `Node` object.
"""
node_type = type(self)
node = node_type(board=board, parent=self, position=column, color=-self.color)
return node
def _get_memory(self, board, color):
hash = self.hash(board, color)
if hash in self.memory:
return hash
else:
return
def _gen_hash(self):
self.hash(self.board, self.color) | 2.734375 | 3 |
textgrid-to-audacity.py | jimregan/wolnelektury-speech-corpus | 2 | 12791459 | #!/usr/bin/env python
import textgrid
import sys
if len(sys.argv) != 2:
print("textgrid-to-audacity.py [filename]")
quit()
tg = textgrid.TextGrid.fromFile(sys.argv[1])
started = False
start=0.0
end=0.0
text=list()
for i in tg[0]:
if i.mark != '':
if not started:
start = i.minTime
started = True
else:
started = True
end = i.maxTime
text.append(i.mark)
else:
if started:
print('{}\t{}\t{}'.format(start, end, ' '.join(text)))
start = 0.0
end = 0.0
text.clear()
started = False
| 2.859375 | 3 |
readonly/readonly/test_runner.py | WimpyAnalytics/django-readonly-schema | 0 | 12791460 | import contextlib
import os
import logging
from django.test.runner import DiscoverRunner
from django.conf import settings
from django.db import connections
logger = logging.getLogger(__name__)
class LegacyDiscoverRunner(DiscoverRunner):
"""
See https://docs.djangoproject.com/en/1.7/topics/testing/advanced/#defining-a-test-runner
"""
def setup_databases(self, **kwargs):
"""Though our schema is readonly in shared environments we assume DB control in testing"""
# Super will create an empty test_<db name> automatically
config = super(LegacyDiscoverRunner, self).setup_databases(**kwargs)
# Invoke any custom ddl to create the schema after that.
script_path = os.path.join(settings.MANAGE_ROOT, 'legacy-schema.sql')
logger.info("Initializing DB with script. [Path: {}]".format(script_path))
with open(script_path, 'r') as sql_file:
ddl = sql_file.read()
cursor = connections['legacy'].cursor()
cursor.executescript(ddl)
return config | 2.375 | 2 |
zukuzuku.py | frodotest/test | 0 | 12791461 | # coding: utf8
import datetime
import logging
import random
import re
import ssl
import subprocess
import threading
import time
from multiprocessing import Process as Thread
import telebot
from aiohttp import web
from telebot import types
import api
import cherrypy
import config
import secret_config
import text
import ujson
import utils
WEBHOOK_HOST = utils.get_my_ip()
WEBHOOK_PORT = 8443 # 443, 80, 88 или 8443 (порт должен быть открыт!)
# На некоторых серверах придется указывать такой же IP, что и выше
WEBHOOK_LISTEN = '0.0.0.0'
WEBHOOK_SSL_CERT = './webhook_cert.pem' # Путь к сертификату
WEBHOOK_SSL_PRIV = './webhook_pkey.pem' # Путь к приватному ключу
WEBHOOK_URL_BASE = "https://%s:%s" % (WEBHOOK_HOST, WEBHOOK_PORT)
WEBHOOK_URL_PATH = "/%s/" % (secret_config.token)
start_time = int(time.time())
bot = telebot.TeleBot(token = secret_config.token)
my_info = bot.get_me()
telebot_logger = logging.getLogger('telebot')
sqlite_info = logging.getLogger('sqlite')
main_info = logging.getLogger('main_info')
report_info = logging.getLogger('reports')
if __name__ == '__main__':
log_name = 'logs.txt'
f = open(log_name,'w')
f.close()
print('Файл логов создан')
telebot_logger = logging.getLogger('telebot')
mysql_info = logging.getLogger('mysql')
main_info = logging.getLogger('main_info')
report_info = logging.getLogger('reports')
print('Список логгеров создан')
logging.basicConfig(
format='%(filename)s [LINE:%(lineno)-3d]# %(levelname)-8s - %(name)-9s [%(asctime)s] - %(message)-50s ',
datefmt='%m/%d/%Y %I:%M:%S %p',
level = logging.INFO
)
app = web.Application()
t = Thread(target = utils.check_deleting_queue)
t.start()
async def handle(request):
if request.match_info.get('token') == bot.token:
request_body_dict = await request.json()
update = telebot.types.Update.de_json(request_body_dict)
bot.process_new_updates([update])
return web.Response()
else:
return web.Response(status=403)
app.router.add_post('/{token}/', handle)
def create_user_language_keyboard():
lang_keyboard = types.InlineKeyboardMarkup()
for i in config.languages:
lang_keyboard.add(types.InlineKeyboardButton(text = i['title'], callback_data = 'lang::{lang_code}'.format(lang_code = i['code'])))
return lang_keyboard
def group_setting(chat_id):
keyboard = types.InlineKeyboardMarkup(row_width=1)
curr_settings = api.get_group_params(chat_id)
btn = types.InlineKeyboardButton(text = 'Принимать рассылки{}'.format(config.settings_statuses[curr_settings['get_notifications']]), callback_data = 'get_notifications::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Удалять ссылки{}'.format(config.settings_statuses[curr_settings['deletions']['url']]), callback_data = 'del_url::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Удалять системные сообщения{}'.format(config.settings_statuses[curr_settings['deletions']['system']]), callback_data = 'del_system::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Исключать ботов{}'.format(config.settings_statuses[curr_settings['kick_bots']]), callback_data='kick_bots::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Фильтры', callback_data='deletions_settings::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Ограничения новых пользователей', callback_data = 'new_users_restrictions::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Настройка предупреждений', callback_data = 'warns_settings::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Настройка приветствий', callback_data = 'welcome_settings::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Получить дамп настроек', callback_data = 'get_settings_json::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Получить топ инвайтеров', callback_data = 'get_chat_refs::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
keyboard.add(types.InlineKeyboardButton(text = 'К списку групп', callback_data = 'to_groups_list'))
return keyboard
def welcome_settings_kb(chat_id):
kb = types.InlineKeyboardMarkup(row_width = 4)
curr_settings = api.get_group_params(chat_id)
btn = types.InlineKeyboardButton(text = 'Отправлять приветствие в чат: {}'.format(config.settings_statuses[curr_settings['greeting']['is_enabled']]), callback_data = 'welcome_state::{chat_id}'.format(chat_id = chat_id))
kb.add(btn)
btn = types.InlineKeyboardButton(text = 'Задержка перед удалением приветствия: {} сек.'.format(curr_settings['greeting']['delete_timer']), callback_data = 'welcome_get::{chat_id}'.format(chat_id = chat_id))
kb.add(btn)
btn1 = types.InlineKeyboardButton(text = '➖10', callback_data = 'welcome_timer_-10::{chat_id}'.format(chat_id = chat_id))
btn2 = types.InlineKeyboardButton(text = '➖5', callback_data = 'welcome_timer_-5::{chat_id}'.format(chat_id = chat_id))
btn3 = types.InlineKeyboardButton(text = '➕5', callback_data = 'welcome_timer_+5::{chat_id}'.format(chat_id = chat_id))
btn4 = types.InlineKeyboardButton(text = '➕10', callback_data = 'welcome_timer_+10::{chat_id}'.format(chat_id = chat_id))
kb.add(btn1, btn2, btn3, btn4)
btn = types.InlineKeyboardButton(text = 'Показать приветствие', callback_data = 'welcome_get::{chat_id}'.format(chat_id = chat_id))
kb.add(btn)
btn = types.InlineKeyboardButton(text = 'Назад', callback_data='to_group_settings_menu::{chat_id}'.format(chat_id = chat_id))
kb.add(btn)
return kb
def new_users_restrictions_kb(chat_id):
keyboard = types.InlineKeyboardMarkup(row_width = 4)
curr_settings = api.get_group_params(chat_id)
btn = types.InlineKeyboardButton(text = 'Автоматический read-only на {} час - {}'.format(curr_settings['restrictions']['for_time'], config.settings_statuses[curr_settings['restrictions']['read_only']]), callback_data = 'read_only::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn1 = types.InlineKeyboardButton(text = '➖2', callback_data = 'time_ro_-2::{chat_id}'.format(chat_id = chat_id))
btn2 = types.InlineKeyboardButton(text = '➖1', callback_data = 'time_ro_-1::{chat_id}'.format(chat_id = chat_id))
btn3 = types.InlineKeyboardButton(text = '➕1', callback_data = 'time_ro_+1::{chat_id}'.format(chat_id = chat_id))
btn4 = types.InlineKeyboardButton(text = '➕2', callback_data = 'time_ro_+2::{chat_id}'.format(chat_id = chat_id))
btn5 = types.InlineKeyboardButton(text = 'Навсегда', callback_data = 'time_ro_+10000::{chat_id}'.format(chat_id = chat_id))
btn6 = types.InlineKeyboardButton(text = 'Сброс', callback_data = 'time_ro_-10000000000::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn1, btn2, btn3, btn4)
keyboard.add(btn5, btn6)
btn = types.InlineKeyboardButton(text = 'Снятие ограничений разрешено для: {}'.format(config.new_users[curr_settings['restrictions']['admins_only']]), callback_data = 'new_restrictions_admins_only_{state}::{chat_id}'.format(state = config.settings_states[curr_settings['restrictions']['admins_only']], chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Назад', callback_data='to_group_settings_menu::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
return keyboard
def warns_settings_kb(chat_id):
keyboard = types.InlineKeyboardMarkup(row_width = 4)
curr_settings = api.get_group_params(chat_id)
btn = types.InlineKeyboardButton(text = 'Максимальное количество исключений: {}'.format(curr_settings['warns']['count']), callback_data = 'empty_callback::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn1 = types.InlineKeyboardButton(text = '➖2', callback_data = 'warns_count_-2::{chat_id}'.format(chat_id = chat_id))
btn2 = types.InlineKeyboardButton(text = '➖1', callback_data = 'warns_count_-1::{chat_id}'.format(chat_id = chat_id))
btn3 = types.InlineKeyboardButton(text = '➕1', callback_data = 'warns_count_+1::{chat_id}'.format(chat_id = chat_id))
btn4 = types.InlineKeyboardButton(text = '➕2', callback_data = 'warns_count_+2::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn1, btn2, btn3, btn4)
btn = types.InlineKeyboardButton(text = 'Действие при максимальном кол-ве варнов: {}'.format(config.warns_states[curr_settings['warns']['action']]), callback_data='empty_callback::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn1 = types.InlineKeyboardButton(text = 'Ничего', callback_data = 'warns_action_0::{chat_id}'.format(chat_id = chat_id))
btn2 = types.InlineKeyboardButton(text = 'Кик', callback_data = 'warns_action_1::{chat_id}'.format(chat_id = chat_id))
btn3 = types.InlineKeyboardButton(text = 'Бан', callback_data = 'warns_action_2::{chat_id}'.format(chat_id = chat_id))
btn4 = types.InlineKeyboardButton(text = 'Read-only на сутки', callback_data = 'warns_action_3::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn1, btn2, btn3, btn4)
btn = types.InlineKeyboardButton(text = 'Назад', callback_data='to_group_settings_menu::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
return keyboard
def remove_warns_kb(user_id):
kb = types.InlineKeyboardMarkup(row_width=1)
btn = types.InlineKeyboardButton(text = 'Удалить предупреждения', callback_data = 'delete_warns::{user_id}'.format(user_id = user_id))
kb.add(btn)
return kb
def unban_new_user_kb(msg):
kb = types.InlineKeyboardMarkup(row_width=1)
btn = types.InlineKeyboardButton(text = 'Разблокировать', callback_data = 'unban_new_user::{chat_id}::{user_id}'.format(user_id = msg.new_chat_member.id, chat_id = msg.chat.id))
kb.add(btn)
return kb
def user_settings_main_menu(msg):
keyboard = types.InlineKeyboardMarkup(row_width=1)
curr_settings = api.get_user_param(msg.chat.id, 'settings')
btn = types.InlineKeyboardButton(text = 'Принимать рассылки{}'.format(config.settings_statuses['get_notifications']), callback_data='get_notifications')
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Выбор языка'.format(config.settings_statuses['get_notifications']), callback_data='open_lang_menu')
keyboard.add(btn)
return keyboard
def delete_settings(chat_id):
keyboard = types.InlineKeyboardMarkup(row_width=1)
curr_settings = api.get_group_params(chat_id)
for cont_type in config.available_attachments:
btn = types.InlineKeyboardButton(text=config.available_attachments_str[cont_type].format(config.settings_statuses[curr_settings['deletions']['files'][cont_type]]), callback_data='delete::{content_type}::{chat_id}'.format(content_type = cont_type, chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Переключить все', callback_data = 'change_all::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Назад', callback_data='to_group_settings_menu::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
return keyboard
def generate_leave_kb(msg):
chat_id = msg.chat.id
keyboard = types.InlineKeyboardMarkup(row_width=1)
btn = types.InlineKeyboardButton(text = 'Да, выйди из чата', callback_data='leave_cancel::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
btn = types.InlineKeyboardButton(text = 'Нет, останься', callback_data='leave_confirm::{chat_id}'.format(chat_id = chat_id))
keyboard.add(btn)
return keyboard
def generate_user_menu_kb(user_id):
kb = types.InlineKeyboardMarkup(row_width = 1)
btn1 = types.InlineKeyboardButton(text = 'Мои чаты', callback_data = 'my_chats')
btn2 = types.InlineKeyboardButton(text = 'Изменить язык', callback_data = 'change_lang')
kb.add(btn1, btn2)
if utils.check_super_user(user_id):
kb.add(types.InlineKeyboardButton(text = 'Админка бота', callback_data = 'admin_menu'))
return kb
def generate_admin_menu_kb():
kb = types.InlineKeyboardMarkup(row_width = 2)
btn1 = types.InlineKeyboardButton(text = 'Рассылка', callback_data = 'broadcast_menu')
btn2 = types.InlineKeyboardButton(text = 'Статистика', callback_data = 'stats_menu')
kb.add(btn1, btn2)
kb.add(types.InlineKeyboardButton(text = 'В главное меню', callback_data = 'to_main_menu'))
return kb
def generate_broadcast_vars_menu_kb():
kb = types.InlineKeyboardMarkup(row_width = 1)
btn1 = types.InlineKeyboardButton(text = 'Рассылка-проверка', callback_data = 'check_broadcast')
btn2 = types.InlineKeyboardButton(text = 'Рассылка сообщения', callback_data = 'broadcast_settings')
kb.add(btn1, btn2)
kb.add(types.InlineKeyboardButton(text = 'В главное меню', callback_data = 'to_main_menu'))
return kb
def generate_broadcast_settings_menu_kb():
kb = types.InlineKeyboardMarkup(row_width = 2)
btn1 = types.InlineKeyboardButton(text = 'Ввести сообщение', callback_data = 'broadcast_message::input')
btn2 = types.InlineKeyboardButton(text = 'Просмотреть сообщение', callback_data = 'broadcast_message::show')
btn3 = types.InlineKeyboardButton(text = 'Начать рассылку', callback_data = 'broadcast_message::start')
kb.add(btn1, btn2, btn3)
return kb
def generate_broadcast_check_menu_kb():
kb = types.InlineKeyboardMarkup(row_width = 3)
curr_settings = ujson.loads(api.get_bot_settings(secret_config.token))
s = {
'users': 'пользователи',
'chats': 'диалоги',
'all': 'все'
}
btn1 = types.InlineKeyboardButton(text = 'Только диалоги', callback_data = 'broadcast_check::users')
btn2 = types.InlineKeyboardButton(text = 'Только чаты', callback_data = 'broadcast_check::chats')
btn3 = types.InlineKeyboardButton(text = 'Все', callback_data = 'broadcast_check::all')
btn4 = types.InlineKeyboardButton(text = 'Сейчас: {}'.format(s[curr_settings['broadcast']['check']['receivers']]), callback_data = 'empty_callback')
btn5 = types.InlineKeyboardButton(text = 'Начать рассылку', callback_data = 'broadcast_check::start')
kb.add(btn1, btn2, btn3)
kb.add(btn4, btn5)
return kb
def generate_user_groups(user_id):
kb = types.InlineKeyboardMarkup(row_width=2)
user_settings = ujson.loads(api.get_user_param(user_id, 'settings'))
btns = []
for i in user_settings['admined_groups']:
btn = types.InlineKeyboardButton(text = i['title'], callback_data = 'settings::{chat_id}'.format(chat_id = i['chat_id']))
btns.append(btn)
kb.add(*btns)
kb.add(types.InlineKeyboardButton(text = 'В главное меню', callback_data = 'to_main_menu'))
return kb
@bot.channel_post_handler(content_types=['text'], func = lambda msg: msg.chat.id == secret_config.channel_ID)
def bot_broadcast(msg):
r = bot.forward_message(secret_config.official_chat, msg.chat.id, msg.message_id)
bot.pin_chat_message(
r.chat.id,
r.message_id
)
@bot.message_handler(commands =['setlog'], func = lambda msg:
msg.chat.type in ['group', 'supergroup'] and
msg.forward_from_chat is not None and
utils.check_status(msg.from_user.id, msg.chat.id) and
not utils.check_log(msg.chat.id)
)
def bot_set_log(msg):
user_id = msg.from_user.id
try:
admins = bot.get_chat_administrators(msg.forward_from_chat.id)
status1 = False
status2 = False
for i in admins:
if i.user.id == user_id:
if i.status == 'creator':
status1 = True
if i.user.id == my_info.id:
status2 = True
if status1 is True and status2 is True:
utils.set_log_channel(msg.chat.id, msg.forward_from_chat.id)
elif status1 is not True:
bot.send_message(
msg.chat.id,
text = text.group_commands[utils.get_group_lang(chat_id)]['log_channel']['confirmation']['errors']['user_is_not_creator']
)
elif status2 is not True:
bot.send_message(
msg.chat.id,
text = text.group_commands[utils.get_group_lang(chat_id)]['log_channel']['confirmation']['errors']['bot_is_not_admin']
)
except Exception as e:
print(e)
@bot.message_handler(commands = ['dellog'], func = lambda msg:
msg.chat.type in ['group', 'supergroup'] and
msg.forward_from_chat is not None and
utils.check_status(msg.from_user.id, msg.chat.id) and
msg.forward_from_chat.id == utils.get_log_id(msg.chat.id) and
utils.check_log(msg.chat.id)
)
def bot_del_log(msg):
print(1)
user_id = msg.from_user.id
try:
admins = bot.get_chat_administrators(msg.forward_from_chat.id)
status1 = False
status2 = False
for i in admins:
if i.user.id == user_id:
if i.status == 'creator':
status1 = True
if i.user.id == my_info.id:
status2 = True
if status1 is True and status2 is True:
utils.remove_log_channel(msg.chat.id)
elif status1 is not True:
bot.send_message(
msg.chat.id,
text = text.group_commands[utils.get_group_lang(chat_id)]['log_channel']['confirmation']['errors']['user_is_not_creator']
)
elif status2 is not True:
bot.send_message(
msg.chat.id,
text = text.group_commands[utils.get_group_lang(chat_id)]['log_channel']['confirmation']['errors']['bot_is_not_admin']
)
except Exception as e:
print(e)
@bot.message_handler(commands = ['infolog'], func = lambda msg: msg.chat.type in ['group', 'supergroup'])
def bot_info_log(msg):
if utils.check_log(msg.chat.id):
m = text.group_commands[utils.get_group_lang(msg.chat.id)]['log_channel']['info']['is_on'].format(
chat_id = utils.get_log_id(msg.chat.id),
chat_name = bot.get_chat(utils.get_log_id(msg.chat.id)).title
)
else:
m = text.group_commands[utils.get_group_lang(msg.chat.id)]['log_channel']['info']['is_off']
bot.send_message(
msg.chat.id,
m,
parse_mode = 'HTML'
)
@bot.message_handler(commands = ['leave'], func = lambda msg: msg.chat.type != 'private' and utils.check_status(msg.from_user.id, msg.chat.id))
def bot_leave(msg):
bot.send_message(
msg.chat.id,
text.group_commands[utils.get_group_lang(msg.chat.id)]['leave']['question'],
reply_markup = generate_leave_kb(msg),
parse_mode = 'HTML'
)
@bot.message_handler(commands = ['rmkb'], func = lambda msg: msg.chat.type in ['group', 'supergroup'])
def bot_remove_kb(msg):
kb = types.ReplyKeyboardMarkup(one_time_keyboard=True)
kb.add(types.KeyboardButton(text='/rmkb'))
r = bot.send_message(
msg.chat.id,
text = text.group_commands[utils.get_group_lang(msg.chat.id)]['remove_keyboard'],
reply_markup = kb
)
bot.delete_message(
msg.chat.id,
r.message_id
)
bot.delete_message(
msg.chat.id,
msg.message_id
)
@bot.message_handler(commands = ['settings'], func = lambda msg: msg.chat.type == 'supergroup')
def bot_answ(msg):
start_time = time.time()
message = msg
kb = types.InlineKeyboardMarkup()
r = bot.reply_to(
msg,
'Настройки отправлены вам в личные сообщения',
)
kb.add(types.InlineKeyboardButton(text = 'Удалить', callback_data = 'settings_delete {} {}'.format(msg.message_id, r.message_id)))
bot.edit_message_reply_markup(
chat_id = msg.chat.id,
message_id = r.message_id,
reply_markup = kb
)
bot.send_message(
msg.from_user.id,
'<b>Настройки группы {}</b>'.format(msg.chat.title),
reply_markup=group_setting(msg.chat.id),
parse_mode='HTML'
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['start'], func=lambda msg: msg.chat.type == 'private')
def bot_user_start(msg):
message = msg
start_time = time.time()
if utils.is_user_new(msg):
if utils.have_args(msg):
referrer = utils.parse_arg(msg)[1]
bot.send_message(
msg.chat.id,
text.user_messages['start'],
reply_markup=generate_user_menu_kb(msg.from_user.id)
)
api.register_new_user(msg.from_user, 'ru')
else:
bot.send_message(
msg.chat.id,
text.user_messages[utils.get_user_lang(msg)]['start'],
reply_markup=generate_user_menu_kb(msg.from_user.id)
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['start'], func=lambda msg: msg.chat.type != 'private')
def bot_group_start(msg):
start_time = time.time()
api.register_new_chat(msg.chat)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands = ['get_logs'], func = lambda msg: msg.chat.id == -1001236256304 and utils.check_super_user(msg.from_user.id))
def bot_logs(msg):
bot.send_document(msg.chat.id, open('logs.txt', 'rb'))
@bot.message_handler(commands = ['menu'])
def bot_user_menu(msg):
bot.send_message(
msg.from_user.id,
'Ваше меню',
reply_markup = generate_user_menu_kb(msg.from_user.id)
)
@bot.message_handler(commands=['set_text'], func = lambda msg: msg.chat.type != 'private')
def bot_set_text(msg):
start_time = time.time()
message = msg
if len(msg.text) not in [9, 21]:
new_greeting = msg.text[len(msg.text):msg.entities[0].length:-1][::-1]
if utils.check_text(new_greeting):
utils.set_greeting(msg, new_greeting)
bot.send_message(
msg.chat.id,
'Приветствие изменено'
)
else:
bot.send_message(
msg.chat.id,
text = 'Данное приветствие не работает'
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['kick'], func=lambda msg: msg.chat.type != 'private')
def bot_kick(msg):
start_time = time.time()
utils.kick_user(msg)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands = ['ban', 'ban_me_please'], func = lambda msg: msg.chat.type == 'supergroup')
def bot_ban_me_please(msg):
start_time = time.time()
if msg.text == '/ban_me_please':
t = random.randint(1, 10)
ban_time = 60*t
try:
if not utils.check_status(msg.from_user.id, msg.chat.id):
bot.restrict_chat_member(
msg.chat.id,
msg.from_user.id,
until_date=str(time.time() + ban_time))
bot.reply_to(
msg,
text.group_commands[utils.get_group_lang(msg.chat.id)]['ban_me_please'].format(
t = t
),
parse_mode = 'HTML'
)
else:
bot.reply_to(
msg,
text.group_commands[utils.get_group_lang(msg.chat.id)]['errors']['prefix'].format(
reason = text.group_commands[utils.get_group_lang(msg.chat.id)]['errors']['reasons']['user_is_admin']
),
parse_mode='HTML'
)
except Exception as e:
logging.error(e)
else:
utils.ban_user(msg)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['ping'])
def bot_ping(msg):
start_timee = time.time()
uptime = datetime.timedelta(seconds = int(time.time()-start_time))
working_time = datetime.timedelta(seconds = int(time.time()-msg.date))
uptime_str = str(uptime).replace('day', 'days').replace('dayss', 'days')
working_time_str = str(working_time).replace('day', 'days').replace('dayss', 'days')
if uptime.days != 0:
uptime_str = uptime_str.replace(uptime_str.split(',')[0], utils.get_text_translation(uptime_str.split(',')[0]), 'ru')
if working_time.days != 0:
working_time_str = working_time_str.replace(working_time_str.split(',')[0], utils.get_text_translation(working_time_str.split(',')[0], 'ru'))
bot.send_message(
msg.chat.id,
text.user_messages['ru']['commands']['ping'].format(
unix_time = datetime.datetime.fromtimestamp(int(time.time())),
working_time = working_time_str,
uptime_sec = uptime
),
reply_to_message_id=msg.message_id,
parse_mode='HTML'
)
utils.new_update(msg, time.time()-start_timee)
@bot.message_handler(content_types=['new_chat_members'])
def bot_users_new(msg):
start_time = time.time()
api.register_new_chat(msg.chat)
chat_id = msg.chat.id
utils.new_member_logs(msg)
if api.get_group_params(msg.chat.id)['deletions']['system']:
bot.delete_message(
msg.chat.id,
msg.message_id
)
if msg.chat.type == 'channel':
bot.send_message(
msg.chat.id,
text.promotion_message,
parse_mode='HTML'
)
bot.leave_chat(
msg.chat.id
)
if msg.new_chat_member.id == 495038140:
api.change_group_params(msg.chat.id, ujson.dumps(config.default_group_settings))
else:
if api.get_group_params(msg.chat.id)['restrictions']['read_only']:
bot.restrict_chat_member(
msg.chat.id,
msg.new_chat_member.id,
until_date = int(time.time()+api.get_group_params(msg.chat.id)['restrictions']['for_time']*3600)
)
r = bot.send_message(
msg.chat.id,
text.group_commands['ru']['restricted']['new_user']['read_only'].format(
user_id = msg.new_chat_member.id,
user_name = api.replacer(msg.new_chat_member.first_name),
ban_time = api.get_group_params(msg.chat.id)['restrictions']['for_time']
),
reply_markup = unban_new_user_kb(msg),
parse_mode = 'HTML'
)
utils.add_to_delete_queue(msg.chat.id, r.message_id, api.get_group_params(msg.chat.id)['restrictions']['for_time']*3600)
if msg.new_chat_member.is_bot and api.get_group_params(msg.chat.id)['kick_bots']:
bot.kick_chat_member(
msg.chat.id,
msg.new_chat_member.id
)
bot.send_message(
msg.chat.id,
text.group_commands['ru']['restricted']['bot'],
parse_mode = 'HTML',
reply_markup = types.ReplyKeyboardRemove()
)
elif utils.check_global_ban(msg):
bot.kick_chat_member(
msg.chat.id,
msg.new_chat_member.id
)
bot.send_message(
msg.chat.id,
text.group_commands['ru']['restricted']['global_ban'].format(
user_id = msg.new_chat_member.id,
user_name = msg.new_chat_member.first_name
),
parse_mode = 'HTML'
)
else:
utils.new_user_in_chat(msg)
if utils.need_greeting(msg):
r = bot.send_message(
msg.chat.id,
utils.generate_welcome_text(msg),
parse_mode='HTML'
)
utils.add_to_delete_queue(msg.chat.id, r.message_id, api.get_group_params(msg.chat.id)['greeting']['delete_timer'])
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(content_types=[
'new_chat_members',
'left_chat_member',
'new_chat_title',
'new_chat_photo',
'delete_chat_photo',
'group_chat_created',
'supergroup_chat_created',
'channel_chat_created',
'migrate_to_chat_id',
'migrate_from_chat_id',
'pinned_message'
])
def bot_check_system(msg):
start_time = time.time()
if api.get_group_params(msg.chat.id)['deletions']['system']:
bot.delete_message(
msg.chat.id,
msg.message_id
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['report'])
def bot_report(msg):
start_time = time.time()
admins = bot.get_chat_administrators(msg.chat.id)
chat = bot.get_chat(msg.chat.id)
msg_id = ''
if chat.username:
if msg.reply_to_message:
msg_id = msg.reply_to_message.message_id
txt = text.reports_messages['report']['to_admin']['have_username']['reply']
else:
msg_id = msg.message_id
txt = text.reports_messages['report']['to_admin']['have_username']['no_reply']
else:
txt = text.reports_messages['report']['to_admin']['no_username']
for i in admins:
try:
bot.send_message(
i.user.id,
txt.format(
group_name = api.replacer(msg.chat.title),
group_username = chat.username,
message_id = msg_id,
user_id = msg.from_user.id,
user_name = api.replacer(msg.from_user.first_name),
),
parse_mode='HTML'
)
except Exception as e:
print(e)
bot.reply_to(
msg,
text.reports_messages['report']['to_user'],
parse_mode = 'HTML'
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands = ['unban'], func = lambda msg: msg.chat.type == 'supergroup')
def bot_user_unban(msg):
start_time = time.time()
if utils.check_status(msg.from_user.id, msg.chat.id) and utils.have_args(msg):
words = utils.parse_arg(msg)[1]
user_id = int(words)
utils.unban_user(msg, user_id)
elif utils.check_status(msg.from_user.id, msg.chat.id) and msg.reply_to_message is not None:
user_id = msg.reply_to_message.from_user.id
utils.unban_user(msg, user_id)
elif utils.check_status(msg.from_user.id, msg.chat.id) and not utils.have_args(msg):
utils.send_err_report(msg, 'no_args_provided')
else:
utils.send_err_report(msg, 'not_enought_rights')
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands = ['reregister'], func = lambda msg: msg.chat.type == 'supergroup')
def bot_reregister(msg):
start_time = time.time()
if utils.check_status(msg.from_user.id, msg.chat.id):
api.register_new_chat(msg.chat)
api.change_group_params(msg.chat.id, ujson.dumps(config.default_group_settings))
bot.send_message(
msg.chat.id,
text.group_commands[utils.get_group_lang(msg.chat.id)]['registration'],
parse_mode = 'HTML'
)
@bot.message_handler(commands=['ro'], func=lambda msg: msg.chat.type == 'supergroup')
def bot_users_ro(msg):
start_time = time.time()
if utils.check_status(msg.from_user.id, msg.chat.id):
utils.read_only(msg)
else:
utils.send_err_report(msg, 'not_enought_rights')
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['stickerpack_ban'],func=lambda msg: msg.chat.type == 'supergroup')
def bot_stickerpack_ban(msg):
start_time = time.time()
if utils.check_status(msg.from_user.id, msg.chat.id):
utils.ban_stickerpack(msg)
else:
utils.send_err_report(msg, 'not_enought_rights')
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['stickerpack_unban'], func=lambda msg: msg.chat.type != 'private')
def bot_stickerpack_unban(msg):
start_time = time.time()
if utils.check_status(msg.from_user.id, msg.chat.id) and utils.have_args(msg):
stickerpack_name = utils.parse_arg(msg)[1]
utils.unban_stickerpack(msg, stickerpack_name)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['sticker_ban'], func=lambda msg: msg.chat.type == 'supergroup')
def bot_sticker_ban(msg):
start_time = time.time()
if utils.check_status(msg.from_user.id, msg.chat.id):
sticker_id = msg.reply_to_message.sticker.file_id
utils.ban_sticker(msg, sticker_id)
elif not utils.check_status(msg.from_user.id, msg.chat.id):
utils.send_err_report(msg, 'not_enought_rights')
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['sticker_unban'], func=lambda msg: msg.chat.type == 'supergroup')
def bot_sticker_unban(msg):
start_time = time.time()
if utils.have_args(msg) and utils.check_status(msg.from_user.id, msg.chat.id):
sticker_id = utils.parse_arg(msg)[1]
utils.unban_sticker(msg, sticker_id)
elif utils.check_status(msg.from_user.id, msg.chat.id) and not utils.have_args(msg):
utils.send_err_report(msg, 'not_enought_rights')
elif utils.have_args(msg) and not check_status(msg.from_user.id):
utils.send_err_report(msg, 'no_args_provided')
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['help'])
def bot_help(msg):
start_time = time.time()
bot.send_message(
msg.from_user.id,
text.user_messages[utils.get_user_lang(msg)]['help'],
parse_mode='HTML'
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['about'], func=lambda msg: msg.chat.type == 'private')
def bot_about(msg):
start_time = time.time()
bot.send_message(
msg.chat.id,
text.user_messages[utils.get_user_lang(msg)]['about'],
parse_mode='HTML'
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['warn'], func=lambda msg: msg.chat.type != 'private')
def bot_new_warn(msg):
start_time = time.time()
if utils.check_status(msg.from_user.id, msg.chat.id) and msg.reply_to_message is not None and not utils.check_status(msg.reply_to_message.from_user.id, msg.chat.id):
utils.new_warn(msg)
elif not utils.check_status(msg.from_user.id, msg.chat.id):
utils.send_err_report(msg, 'not_enought_rights')
elif utils.check_status(msg.reply_to_message.from_user.id, msg.chat.id):
utils.send_err_report(msg, 'user_is_admin')
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands=['donate'])
def bot_donate(msg):
start_time = time.time()
bot.send_message(
msg.chat.id,
text.group_commands['ru']['donate'],
parse_mode = 'HTML'
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands = ['get_id'])
def bot_get_id(msg):
bot.send_message(
msg.chat.id,
msg.chat.id
)
# @bot.message_handler(commands = ['voteban'])
# def bot_voteban(msg):
# utils.new_voteban(msg)
# bot.send_message(
# msg.chat.id,
# text.
# )
@bot.message_handler(commands = ['version'])
def bot_version(msg):
bot.send_message(
msg.chat.id,
text.user_messages[utils.get_user_lang(msg)]['commands']['version'].format(version = text.VERSION),
parse_mode = 'HTML'
)
@bot.message_handler(commands = ['set_rules'], func = lambda msg: utils.check_status(msg.from_user.id, msg.chat.id))
def bot_set_rules(msg):
start_time = time.time()
message = msg
if len(msg.text) not in [9, 21]:
new_rules = msg.text[len(msg.text):msg.entities[0].length:-1][::-1]
if utils.check_text(new_rules):
utils.set_rules(msg, new_rules)
bot.send_message(
msg.chat.id,
'Правила изменены'
)
else:
bot.send_message(
msg.chat.id,
text = 'Правила составлены неверно'
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands = ['rules'], func = lambda msg: msg.chat.type != 'private')
def bot_get_rules(msg):
start_time = time.time()
try:
bot.send_message(
msg.from_user.id,
utils.generate_rules_text(msg),
parse_mode = 'HTML'
)
except Exception:
bot.reply_to(
msg,
text = ''
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(commands = ['reset_settings'], func = lambda msg: msg.chat.type != 'private')
def bot_reset_settings(msg):
start_time = time.time()
kb = types.InlineKeyboardMarkup()
kb.add(types.InlineKeyboardButton(text = 'Да, выполнить сброс', callback_data = 'reset_settings_confirmation::{chat_id}'.format(chat_id = msg.chat.id)))
kb.add(types.InlineKeyboardButton(text = 'Нет, не стоит', callback_data = 'reset_settings_abort::{chat_id}'.format(chat_id = msg.chat.id)))
if utils.check_status(msg.from_user.id, msg.chat.id):
bot.send_message(
msg.chat.id,
'Вы действительно хотите сбросить настройки?',
reply_markup = kb
)
@bot.message_handler(commands = ['update_time'], func = lambda msg: utils.check_super_user(msg.from_user.id))
def bot_update_time(msg):
bot_ping(msg)
subprocess.run("timedatectl set-time '{time}'".format(time = datetime.datetime.fromtimestamp(msg.date+1).strftime("%Y-%m-%d %H:%M:%S")), shell=True)
bot_ping(msg)
@bot.message_handler(content_types=['text'], func = lambda msg: msg.chat.type != 'private')
def bot_check_text(msg):
start_time = time.time()
msg_text = msg.text
msg_text_low = msg_text.lower()
if utils.is_restricted(msg) and not utils.check_status(msg.from_user.id, msg.chat.id):
bot.delete_message(
msg.chat.id,
msg.message_id
)
if msg_text_low.startswith('разбан'):
if utils.check_super_user(msg.from_user.id):
utils.global_unban(msg)
elif msg_text.lower() in ['глобал бан']:
if utils.check_super_user(msg.from_user.id):
utils.global_ban(msg)
elif not utils.check_status(msg.from_user.id, msg.chat.id):
# if utils.is_new_in_chat(msg) and api.get_group_params(msg.chat.id)['restrict_new'] == '1':
if utils.check_for_urls(msg) and api.get_group_params(msg.chat.id)['deletions']['url']:
bot.delete_message(
msg.chat.id,
msg.message_id
)
bot.send_message(
msg.chat.id,
text.group_commands[utils.get_group_lang(msg.chat.id)]['restricted']['url'].format(
user_id = msg.from_user.id,
user_name = api.replacer(msg.from_user.first_name)
),
parse_mode='HTML'
)
# elif utils.check_for_forward(msg) and api.get_group_params(msg.chat.id)['deletions']['forward']:
# bot.delete_message(
# msg.chat.id,
# msg.message_id
# )
# bot.send_message(
# msg.chat.id,
# text.group_commands[utils.get_group_lang(msg.chat.id)]['restricted']['url'].format(
# user_id = msg.from_user.id,
# user_name = api.replacer(msg.from_user.first_name)
# ),
# parse_mode='HTML'
# )
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(content_types=['photo'], func = lambda msg: msg.chat.id == 303986717)
def bot_text(msg):
start_time = time.time()
bot.reply_to(msg, "<code>'{}': '{}',</code>".format(msg.photo[0].file_id, msg.caption), parse_mode ='HTML')
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(content_types = ['sticker'], func = lambda msg: not utils.check_status(msg.from_user.id, msg.chat.id))
def bot_check_sticker(msg):
start_time = time.time()
if utils.is_restricted(msg) or utils.is_sticker_restricted(msg):
bot.delete_message(
msg.chat.id,
msg.message_id
)
utils.new_update(msg, time.time()-start_time)
@bot.message_handler(content_types = ['audio', 'document', 'photo', 'sticker', 'video', 'video_note', 'voice', 'location', 'contact'], func = lambda msg: not utils.check_status(msg.from_user.id, msg.chat.id))
def testt(msg):
start_time = time.time()
if utils.is_restricted(msg):
bot.delete_message(
msg.chat.id,
msg.message_id
)
utils.new_update(msg, time.time()-start_time)
# Кнопки
@bot.callback_query_handler(func = lambda c: c.data.startswith('get_chat_refs::'))
def bot_get_chat_refs(c):
chat_id = utils.parse_chat_id(c)
user_id = c.from_user.id
inviters = utils.get_top_inviters(chat_id)
m = text.group_commands[utils.get_group_lang(chat_id)]['refs_stats']['header']
counter = 0
for i in inviters:
inviter_info = bot.get_chat_member(chat_id, i['inviter'])
counter += 1
m += text.group_commands[utils.get_group_lang(chat_id)]['refs_stats']['body'].format(
inviter_pos = counter,
inviter_id = inviter_info.user.id,
inviter_firstname = inviter_info.user.first_name,
invited_count = int(i['COUNT(`inviter`)'])
)
bot.send_message(
user_id,
m,
parse_mode = 'HTML'
)
bot.answer_callback_query(
c.id,
text = 'Список отправлен',
show_alert = True
)
@bot.callback_query_handler(func = lambda c: c.data in ['my_chats', 'to_groups_list'])
def my_chats_list(c):
user_id = c.from_user.id
user_settings = api.get_user_param(user_id, 'settings')
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = 'Список ваших групп'
)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = generate_user_groups(user_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Переход выполнен'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('get_settings_json'))
def bot_get_settings_json(c):
chat_id = utils.parse_chat_id(c)
bot.send_message(
chat_id = c.from_user.id,
text = 'Эти настройки можно получить в любое время и отправить @f0rden для восстановления их, в случае сбоя:\n'+ujson.dumps(api.get_group_params(chat_id))
)
bot.answer_callback_query(
c.id,
text = 'Настройки отправлены',
show_alert = True
)
@bot.callback_query_handler(func = lambda c: c.data == 'stats_menu')
def bot_stats_menu(c):
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = text.service_messages['stats'].format(
all_users = api.get_users_count(),
all_chats = api.get_chats_count(),
unblocked_users = api.get_unblocked_users_count(),
unblocked_chats = api.get_unblocked_chats_count()
)
)
@bot.callback_query_handler(func = lambda c: c.data == 'change_lang')
def bot_change_lang(c):
user_id = c.from_user.id
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = text.user_messages['start'],
parse_mode = 'HTML'
)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = create_user_language_keyboard()
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Переход выполнен'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('settings::'))
def chat_settings(c):
chat_id = utils.parse_chat_id(c)
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = '<b>Настройки группы {}</b>'.format(bot.get_chat(chat_id).title),
parse_mode = 'HTML'
)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = group_setting(chat_id),
)
@bot.callback_query_handler(func = lambda c: c.data == 'to_main_menu')
def bot_to_main_menu(c):
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = 'Ваше меню'
)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = generate_user_menu_kb(c.from_user.id)
)
@bot.callback_query_handler(func = lambda c: c.data == 'broadcast_menu')
def bot_admin_menu(c):
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = 'Выберите тип рассылки'
)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = generate_broadcast_vars_menu_kb()
)
@bot.callback_query_handler(func = lambda c: c.data == 'check_broadcast')
def bot_admin_menu(c):
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = 'Рассылка начата'
)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = generate_broadcast_check_menu_kb()
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('broadcast_check'))
def bot_broadcast_check(c):
arg = c.data.split('::')[1]
curr_bot_settings = ujson.loads(api.get_bot_settings(secret_config.token))
if arg in ['users', 'chats', 'all']:
curr_bot_settings['broadcast']['check']['recievers'] = arg
api.change_bot_settings(secret_config.token, ujson.dumps(curr_bot_settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = generate_broadcast_check_menu_kb()
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
t = Thread(target = utils.make_broadcast, kwargs = {
'is_test': True,
'receivers': curr_bot_settings['broadcast']['check']['recievers'],
'cont_type': 'text',
'msg_text': '',
'file_id': '',
'user_id': c.from_user.id,
'message_id': c.message.message_id
}
)
kb = types.InlineKeyboardMarkup()
kb.add(types.InlineKeyboardButton(text = 'В главное меню', callback_data = 'to_main_menu'))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = kb
)
t.start()
t.join()
@bot.callback_query_handler(func = lambda c: c.data == 'admin_menu')
def bot_admin_menu(c):
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = 'Админка'
)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = generate_admin_menu_kb()
)
@bot.callback_query_handler(func=lambda c: c.data.startswith('lang::'))
def change_language(c):
words = re.split('::', c.data)
lang = words[1]
bot.edit_message_text(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
text = text.user_messages[lang]['chosen_language'])
api.register_new_user(c.from_user, lang)
@bot.callback_query_handler(func = lambda c: c.data.startswith('get_notifications'))
def notify_change(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
utils.change_state_main(chat_id, 'get_notifications')
bot.edit_message_reply_markup(
chat_id=c.message.chat.id,
message_id=c.message.message_id,
reply_markup=group_setting(utils.parse_chat_id(c))
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)[c.data.split('::')[0]]])
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'Вы не являетесь администратором. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)[c.data.split('::')[0]]])
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('del_url'))
def del_url(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
utils.change_state_deletions_main(chat_id, 'url')
bot.edit_message_reply_markup(
chat_id=c.message.chat.id,
message_id=c.message.message_id,
reply_markup=group_setting(utils.parse_chat_id(c))
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)['deletions']['url']])
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'Вы не являетесь администратором. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)['deletions']['url']])
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('del_system'))
def del_system(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
utils.change_state_deletions_main(chat_id, 'system')
bot.edit_message_reply_markup(
chat_id=c.message.chat.id,
message_id=c.message.message_id,
reply_markup=group_setting(utils.parse_chat_id(c))
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)['deletions']['system']])
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'Вы не являетесь администратором. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)['deletions']['system']])
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('kick_bots'))
def kick_bots(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
utils.change_state_main(chat_id, 'kick_bots')
bot.edit_message_reply_markup(
chat_id=c.message.chat.id,
message_id=c.message.message_id,
reply_markup=group_setting(utils.parse_chat_id(c))
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)[c.data.split('::')[0]]])
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)[c.data.split('::')[0]]])
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('deletions_settings'))
def to_deletions(c):
chat_id = utils.parse_chat_id(c)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = delete_settings(utils.parse_chat_id(c))
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Переход выполнен.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('delete::'))
def group_settings_deletions(c):
chat_id = utils.parse_chat_id(c)
cont_type = re.split('::', c.data)[1]
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
utils.change_state_deletions_files(chat_id, cont_type)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = delete_settings(utils.parse_chat_id(c))
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены. Статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)['deletions']['files'][cont_type]])
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия. Текущий статус настройки: {}'.format(config.settings_statuses[api.get_group_params(chat_id)['deletions']['files'][cont_type]])
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('change_all'))
def group_settings_deletions_all(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
for i in config.available_attachments:
utils.change_state_deletions_files(chat_id, i)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = delete_settings(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('to_group_settings_menu'))
def group_settings_deletions_photo(c):
chat_id = utils.parse_chat_id(c)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup=group_setting(utils.parse_chat_id(c))
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('warns_del'))
def del_warns(c):
user_id = utils.parse_user_id(c)
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
api.zeroing_warns(user_id, chat_id)
bot.edit_message_text(
text = 'Предупреждения обнулены.',
chat_id = c.message.chat.id,
message_id = c.message.message_id
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('new_users_restrictions'))
def new_users_restrictions(c):
chat_id = utils.parse_chat_id(c)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = new_users_restrictions_kb(chat_id)
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('read_only'))
def new_users_ro(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
settings = api.get_group_params(chat_id)
settings['restrictions']['read_only'] = config.settings_states[settings['restrictions']['read_only']]
api.change_group_params(chat_id, ujson.dumps(settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = new_users_restrictions_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('time_ro_'))
def ro_time_change(c):
change_time = int(c.data.split('_')[2].split('::')[0])
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
settings = api.get_group_params(chat_id)
settings['restrictions']['for_time'] = settings['restrictions']['for_time'] + change_time
if settings['restrictions']['for_time'] < 1:
settings['restrictions']['for_time'] = 1
api.change_group_params(chat_id, ujson.dumps(settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = new_users_restrictions_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('warns_count_'))
def ro_time_change(c):
change_count = int(c.data.split('_')[2].split('::')[0])
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
settings = api.get_group_params(chat_id)
settings['warns']['count'] = settings['warns']['count'] + change_count
if settings['warns']['count'] < 1:
settings['warns']['count'] = 1
api.change_group_params(chat_id, ujson.dumps(settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = warns_settings_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('warns_settings'))
def warns_count_change(c):
chat_id = utils.parse_chat_id(c)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = warns_settings_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('warns_action_'))
def warns_count_change(c):
new_mod = int(c.data.split('_')[2].split('::')[0])
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
settings = api.get_group_params(chat_id)
settings['warns']['action'] = new_mod
api.change_group_params(chat_id, ujson.dumps(settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = warns_settings_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('unban_new_user'))
def unban_new_user(c):
chat_id = utils.parse_chat_id(c)
user_id = utils.parse_user_id(c)
if api.get_group_params(chat_id)['restrictions']['admins_only']:
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
utils.unban_user_button(c)
user = bot.get_chat_member(
chat_id,
user_id
)
bot.edit_message_text(
text = text.group_commands[utils.get_group_lang(c.message.chat.id)]['restricted']['new_user']['button_pressed'].format(
user_id = user.user.id,
user_name = api.replacer(user.user.first_name)
),
parse_mode = 'HTML',
chat_id = c.message.chat.id,
message_id = c.message.message_id
)
utils.add_to_delete_queue(msg.chat.id, r.message_id, api.get_group_params(msg.chat.id)['greeting']['delete_timer'])
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
else:
if c.from_user.id == user_id or utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
user = bot.get_chat_member(
chat_id,
user_id
)
if user.status in ['restricted']:
bot.restrict_chat_member(
chat_id,
user_id,
can_send_media_messages=True,
can_add_web_page_previews=True,
can_send_messages=True,
can_send_other_messages=True
)
bot.edit_message_text(
text = text.group_commands[utils.get_group_lang(c.message.chat.id)]['restricted']['new_user']['button_pressed'].format(
user_id = user.user.id,
user_name = api.replacer(user.user.first_name)
),
parse_mode = 'HTML',
chat_id = c.message.chat.id,
message_id = c.message.message_id
)
utils.add_to_delete_queue(chat_id, c.message.message_id, api.get_group_params(chat_id)['greeting']['delete_timer'])
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('new_restrictions_admins_only_'))
def warns_count_change(c):
chat_id = utils.parse_chat_id(c)
state = c.data.split('_')[4].split('::')[0]
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
settings = api.get_group_params(chat_id)
settings['restrictions']['admins_only'] = utils.to_bool(state)
api.change_group_params(chat_id, ujson.dumps(settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = new_users_restrictions_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('welcome_settings'))
def welcome_settings(c):
chat_id = utils.parse_chat_id(c)
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = welcome_settings_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('welcome_state'))
def welcome_settings_state(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
settings = api.get_group_params(chat_id)
curr_state = settings['greeting']['is_enabled']
new_state = config.settings_states[curr_state]
settings['greeting']['is_enabled'] = new_state
api.change_group_params(chat_id, ujson.dumps(settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = welcome_settings_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('welcome_timer'))
def welcome_timer_change(c):
change_count = int(c.data.split('_')[2].split('::')[0])
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
settings = api.get_group_params(chat_id)
settings['greeting']['delete_timer'] = settings['greeting']['delete_timer'] + change_count
if settings['greeting']['delete_timer'] < 0:
settings['greeting']['delete_timer'] = 0
api.change_group_params(chat_id, ujson.dumps(settings))
bot.edit_message_reply_markup(
chat_id = c.message.chat.id,
message_id = c.message.message_id,
reply_markup = welcome_settings_kb(chat_id)
)
bot.answer_callback_query(
callback_query_id = c.id,
text = 'Изменения подтверждены.'
)
else:
bot.answer_callback_query(
callback_query_id = c.id,
show_alert = True,
text = 'У вас недостаточно прав для выполнения этого действия.'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('settings_delete'))
def del_settings(c):
words = c.data.split()
bot.delete_message(
c.message.chat.id,
words[2]
)
bot.delete_message(
c.message.chat.id,
words[1]
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('welcome_get'))
def get_welcome_text(c):
chat_id = utils.parse_chat_id(c)
bot.send_message(
c.message.chat.id,
utils.get_greeting(chat_id),
parse_mode = 'HTML'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('reset_settings'))
def reset_settings_button(c):
chat_id = utils.parse_chat_id(c)
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
if c.data.startswith('reset_settings_confirmation'):
api.register_new_chat(c.message.chat)
api.change_group_params(chat_id, ujson.dumps(config.default_group_settings))
bot.send_message(
c.message.chat.id,
'Настройки сброшены.'
)
bot.delete_message(
c.message.chat.id,
c.message.message_id
)
else:
bot.delete_message(
c.message.chat.id,
c.message.message_id
)
bot.send_message(
c.message.chat.id,
'Сброс отменен'
)
@bot.callback_query_handler(func = lambda c: c.data.startswith('leave_'))
def bot_leave_cb(c):
if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
if c.data.endswith('confirm'):
bot.delete_message(
c.message.chat.id,
c.message.message_id
)
bot.send_message(
c.message.chat.id,
text.group_commands[utils.get_group_lang(c.message.chat.id)]['leave']['accepted']
)
bot.leave_chat(
c.message.chat.id
)
else:
bot.send_message(
c.message.chat.id,
text.group_commands[utils.get_group_lang(c.message.chat.id)]['leave']['cancelled']
)
bot.delete_message(
c.message.chat.id,
c.message.message_id
)
# @bot.callback_query_handler(func = lambda c: c.data.startswith('settings_captcha'))
# def change_captcha_settings(c):
# chat_id = utils.parse_chat_id(c)
# if utils.check_status(c.from_user.id, utils.parse_chat_id(c)):
# settings = api.get_group_params(chat_id)
# settings['']
# api.change_group_params(chat_id, )
# Вебхук
bot.remove_webhook()
bot.set_webhook(
url=WEBHOOK_URL_BASE + WEBHOOK_URL_PATH,
certificate=open(WEBHOOK_SSL_CERT, 'r'))
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(WEBHOOK_SSL_CERT, WEBHOOK_SSL_PRIV)
# Start aiohttp server
web.run_app(
app,
host=WEBHOOK_LISTEN,
port=WEBHOOK_PORT,
ssl_context=context,
)
# bot.remove_webhook()
# bot.polling()
| 1.953125 | 2 |
shared/secrets.example.py | Mo-Talha/Nomad | 0 | 12791462 | MONGO_HOST = 'localhost'
MONGO_PORT = 27017
MONGO_DATABASE = 'nomad'
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0 | 1.132813 | 1 |
espresso.py | isikdos/Espresso | 0 | 12791463 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 22:08:31 2019
@author: iaricanli
"""
import copy
T = True
F = False
D = "_"
"""
Generate the inputs for the algorithm. A list of dictionarys.
Each element of the list represents a different boolean expression input --
say if we are trying to reduce something for a 7 segment display over a period
of multiple timesteps.
Each dictionary represents a boolean function. The key is the boolean input
represented in integer form (aka A^!B^!C^D -> 1001 -> 9) and the value is
either True, False, or Don't Care.
Reading from a file not yet supported.
Do whatever you want in here.
Arguments:
LEN : integer
- The dimensionality of the desired output truth table. AKA,
the number of boolean variables.
Return:
tt (truth table): list (dict (int, Booleanish))
- The only question here really is what is a booleanish?
in addition to True and False there is a third concept of Don'tCare
which is being represented here as "_". It fails "is True" but
passes "== True". this is abused heavily.
"""
def Get_Truth_Table(LEN):
tt = list()
e = dict()
e = {
0: T,
1: T,
2: D,
3: T,
4: F,
5: F,
6: T,
7: T,
}
tt.append(e)
return tt
"""
This is a helper function, existant in case I want to
expand the code and allow different iteration over the inputs.
If this returned randomly, the code would no longer necessarily
output predictable results.
"""
def _Provide_Index(dim):
return range(0, dim)
"""
Performs the expansion of a cube through the Nspace
Attempting to expand through ever dimension, one at a time.
While it does this, it maps the boolean expressions to minterms
and it maps the minterms to the boolean expressions. Thus providing
the program a quick view of the rows and columns regarding the results found.
Arguments:
boolean array: dict (int, truth)
- The key maps to the integer representation of the inputs
- The value points to whether that is mapped to True, False, or DC
idx: int
- The space in the boolean array we are beginning at, where the
expansion begins from.
dim: int
- The total number of dimensions we are operating in.
# REFERENCED BY VALUE
minterms2bln: dict (int, boolean_expression)
- Maps the minterms (tracked by integer -- same as in the idx we see
above), and keeps track of which minterms are related to what
boolean expressions.
Return:
covered_minterms: set
- The defined set of minterms covered by the boolean expression
bln_expr: boolean_expression
- The boolean expression (defined by a point and then a mask)
that covered the aforementioned minterms.
"""
def Expand_Cube(boolean_array, idx, dim, minterms2bln):
bln_expr = boolean_expression(idx, 0)
# Define the space of the cube
space = [idx]
covered_minterms = {idx}
if idx in minterms2bln:
minterms2bln[idx].add(bln_expr)
else:
minterms2bln[idx] = {bln_expr}
# Iterate over the indices however we decide
for i in _Provide_Index(dim):
# Control variable to exit a loop
_continue = False
# Convert the index into the representitive integer
dim2int = 2**i
# The space being explored
new_space = list()
for index in space:
# MAGIC LINE
# We need to turn 1s into 0s and 0s into 1s, depending on the index
new_index = index ^ dim2int
# We're expanding the cube, verify that we're expanding it into
# valid space. If it is valid, add the expanding indices into list
if new_index in boolean_array and boolean_array[new_index]:
new_space.append(new_index)
else:
# If the new space doesn't pan out _perfectly_, keep going to
# the the next index
_continue = True
break
# We don't want to extend into the space of the selected index
# if it didn't pan out. So skip this one and move on to the next
# dimension.
if not _continue:
# We like the new dimension, and are going to cover all the new
# elements into it.
space.extend(new_space)
for ns in new_space:
# If the value at the boolean array is specifically
# True and not just Don't Care, add it to the Covered Minterms
if boolean_array[ns] is T:
covered_minterms.add(ns)
if ns in minterms2bln:
minterms2bln[ns].add(bln_expr)
else:
minterms2bln[ns] = {bln_expr}
# Allow the Mask to contain the information regarding the dimension
# that was just covered.
bln_expr.mask += dim2int
return covered_minterms, bln_expr
class boolean_expression(object):
def __init__(self, idx, mask):
self.idx = idx
self.mask = mask
def __eq__(self, b):
return self.idx == b.idx and self.mask == b.mask
def __hash__(self):
return hash((self.idx, self.mask))
def __str__(self):
return "boolean_expression({0}, {1})".format(self.idx, self.mask)
def __repr__(self):
return self.__str__()
def Expand(truth_table, dim):
#
# Iterate over every boolean output
#
expr_per_output = list()
for boolean_array in truth_table:
bln2minterms= dict()
minterms2bln = dict()
for idx, bln in boolean_array.items():
if bln is T:
covered_minterms, bln_expr = Expand_Cube(boolean_array,
idx,
dim,
minterms2bln)
bln2minterms[bln_expr] = covered_minterms
# bln2minterms and minterms2bln
# are two dictionaries that are dually referent
# in order to keep computations fast.
expr_per_output.append((bln2minterms, minterms2bln))
return expr_per_output
def Intersect( list_of_maps ):
#
# Finds intersections between boolean statements and
# the minterms they cover
#
lom = list()
# Iterate over every solution-set per output
itr_list_of_maps = copy.deepcopy(list_of_maps)
for bln2minterms, minterms2bln in itr_list_of_maps:
# First we're going to look for any case where a minterm
# maps to only one boolean expression.
required_blns = set()
todelete = list()
itr_minterms2bln = copy.deepcopy(minterms2bln)
for minterm, set_of_blns in itr_minterms2bln.items():
if len(set_of_blns) == 1:
# WE found one!
# Take it
required_bln = set_of_blns.pop()
# Now find all the minterms related to the boolean
minterms_correlated_to_bln = bln2minterms[required_bln]
# Iterate over them
for correlated_minterm in minterms_correlated_to_bln:
# and remove the boolean from their knowledge
minterms2bln[correlated_minterm].remove(required_bln)
# Then delete the entire boolean from the booly-books
del bln2minterms[required_bln]
todelete.append(minterm)
# And remember what we've done on this day, this evil day.
required_blns.add( required_bln )
for i in todelete:
del minterms2bln[i]
# Now we get rid of booleans as we determine that they are "the best candidate
while len(minterms2bln):
# We are looking at only a SINGLE minterm.
# Scanning a subspace to decrease overall computation time
# and keep everything in linear time.
minterm = Select_Minterm(minterms2bln)
most = 0
best_candidate = None
# We determine the "Best candidate" as the boolean expression
# with the greatest number of related minterms
for bln in minterms2bln[minterm]:
if len(bln2minterms[bln]) > most:
best_candidate = bln
most = len(bln2minterms[bln])
required_blns.add( best_candidate )
# Now find all the minterms related to the boolean
minterms_correlated_to_bln = bln2minterms[best_candidate]
# Iterate over them
todelete = list()
for correlated_minterm in minterms_correlated_to_bln:
# Delete all minterms correlated to the highest-scoring boolean
for related_bln in minterms2bln[correlated_minterm]:
todelete.append((related_bln, correlated_minterm))
# Forreal, delete them
del minterms2bln[correlated_minterm]
for related_bln, correlated_minterm in todelete:
bln2minterms[related_bln].remove(correlated_minterm)
# The ndelete the aforementioned best candidate
del bln2minterms[best_candidate]
lom.append(required_blns)
return lom
"""
This is a helper function, existant in case I want to
expand the code and allow different iteration over the inputs.
If this returned randomly, the code would no longer necessarily
output predictable results.
"""
def Select_Minterm(minterms2bln):
return list(minterms2bln.keys())[0]
def main(dim):
#
# Define truth table
#
truth_table = Get_Truth_Table(dim)
# Perform the Expand operation on every output set
list_of_maps = Expand(truth_table, dim)
list_of_covering_blns = Intersect(list_of_maps)
return list_of_covering_blns
| 3.9375 | 4 |
bot/config/config.py | 0x0bloodyknight/jdan734-bot | 0 | 12791464 | import yaml
from pathlib import Path
from os import environ
class Config:
def __init__(self, file_path="config.yml"):
try:
with open(file_path, encoding="UTF-8") as file:
self.config = yaml.full_load(file.read())
except Exception:
self.config = {}
self.environ = environ
def get(self, param, default=None):
globals()[param.upper()] = (
self.environ.get(param.upper()) or
self.config.get(param, default))
config = Config()
config.get("db_path", default="jdanbot.db")
config.get("delay", default=30)
config.get("rss_feeds", default=[])
config.get("rss", default=False)
config.get("image_path", default="bot/cache/{image}.jpg")
config.get("token")
config.get("status", default="unknown")
config.get("vk", default=False)
config.get("vk_channels", default=())
config.get("access_token", default="")
config.get("katz_bots", default=False)
config.get("youtube", default=False)
config.get("youtube_channels", default=())
config.get("youtube_key", default=None)
config.get("langs_list", default=[
"ru", "en", "sv", "de", "ce",
"tt", "ba", "pl", "uk", "be",
"es", "he", "xh", "ab"])
config.get("unique_commands", default={
"ru": ["wikiru2", "w", "wiki"],
"en": ["van", "wen", "v"],
"uk": ["wikiua", "wua", "pawuk"],
"be-tarask": ["wikibe-tarask", "wikibet", "wbet", "xbet"]
})
config.get("admin_notes", default=[
"__rules__",
"__enable_bot__",
"__ban__",
"__welcome__",
"__enable_response__",
"__enable_welcome__",
"__enable_greatings__",
"__warns_to_ban__"
])
config.get("eggs", default=[
{"commands": ["java1"], "audio": "java.ogg"},
{"commands": ["cool_music"], "audio": "music.ogg"},
{"commands": ["cum"], "audio": "cum.ogg"},
{"commands": ["longcum"], "audio": "longcum.ogg"},
{"commands": ["frog"], "audio": "lyagushka.ogg"}])
config.get("stickers", {
"pizda": "<KEY>",
"net_pizdy": "<KEY>",
"pizda_tebe": "<KEY>",
"xui": "<KEY>",
"net_xua": "<KEY>"
})
BASE_DIR = Path(__file__).parent.parent
LOCALES_DIR = BASE_DIR / "i18n"
| 2.765625 | 3 |
mediator/event/aggregate.py | dlski/python-mediator | 12 | 12791465 | from typing import Any, Dict, List, Optional, Tuple
from mediator.event.base import EventPublisher
class EventAggregateError(Exception):
"""
Event aggregate base error
"""
class ConfigEventAggregateError(AssertionError, EventAggregateError):
"""
Config event aggregate error.
Raised when event aggregate object is not properly configured.
"""
class EventAggregate:
"""
Event aggregate object.
Used for staging events and publishing them in one transaction.
"""
_publisher: Optional[EventPublisher]
_staged: List[Tuple[Any, Dict[str, Any]]]
def __init__(self):
"""
Initializes empty event aggregate.
"""
self._publisher = None
self._staged = []
def use(self, publisher: EventPublisher):
"""
Sets event publisher used for event sending.
:param publisher: event publisher to use
:return: self
"""
self._publisher = publisher
return self
async def commit(self):
"""
Commits staged events by underlying publisher.
"""
publisher = self._publisher
if publisher is None:
raise ConfigEventAggregateError(f"Publisher is not set in {self!r}")
async with publisher.transaction() as context:
for obj, kwargs in self._staged:
await context.publish(obj, **kwargs)
self._staged.clear()
def cleanup(self):
"""
Clears all staged events.
"""
self._staged.clear()
def enqueue(self, obj: Any, **kwargs):
"""
Stages given event object with optional extra arguments.
:param obj: event object
:param kwargs: optional extra arguments
"""
self._staged.append((obj, kwargs))
| 2.390625 | 2 |
samples/wsgi_session.py | UKTradeInvestment/pyslet | 2 | 12791466 | #! /usr/bin/env python
import pyslet.xml.structures as xml
from pyslet.wsgi import SessionApp, session_decorator
class MyApp(SessionApp):
settings_file = 'samples/wsgi_session/settings.json'
def init_dispatcher(self):
super(MyApp, self).init_dispatcher()
self.set_method("/", self.home)
self.set_method("/setname", self.setname)
@session_decorator
def home(self, context):
page = """<html><head><title>Session Page</title></head><body>
<h1>Session Page</h1>
%s
</body></html>"""
if context.session.entity['UserName']:
noform = """<p>Welcome: %s</p>"""
page = page % (
noform % xml.EscapeCharData(
context.session.entity['UserName'].value))
else:
form = """<form method="POST" action="setname">
<p>Please enter your name: <input type="text" name="name"/>
<input type="hidden" name=%s value=%s />
<input type="submit" value="Set"/></p>
</form>"""
page = page % (
form % (xml.EscapeCharData(self.csrf_token, True),
xml.EscapeCharData(context.session.sid(),
True)))
context.set_status(200)
return self.html_response(context, page)
@session_decorator
def setname(self, context):
user_name = context.get_form_string('name')
if user_name:
context.session.entity['UserName'].set_from_value(user_name)
context.session.touch()
return self.redirect_page(context, context.get_app_root())
if __name__ == "__main__":
MyApp.main()
| 2.59375 | 3 |
finetune.py | kbehouse/vgg-face-keras | 3 | 12791467 | from keras.engine import Model
from keras.layers import Flatten, Dense, Input
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from vggface import VGGFace
from sklearn.metrics import log_loss
from one_face_predict import prdict_one_face
from load_face_data import load_face_data
from facetool import FaceTool
def train_face_model(finetune = True):
#===============custom parameters =============== #
hidden_dim = 512
img_width, img_height = 224, 224
nb_class = 16
One_Class_Train_MAX = 30
One_Class_Valid_MAX = 10
nb_train_samples = nb_class * One_Class_Train_MAX
nb_validation_samples = nb_class * One_Class_Valid_MAX
nb_epoch = 10
batch_size = 16
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
save_model_path = './faceDB/face-model.json'
save_model_h5 = './faceDB/face-model.h5'
save_face_index = './faceDB/face-index.json'
# =============== NN =============== #
vgg_model = VGGFace(include_top=False, input_shape=(224, 224, 3))
# print('----------------After Add finetune layers----------------')
# for l in vgg_model.layers:
# print('Name ', l.name, 'trainable' ,l.trainable)
last_layer = vgg_model.get_layer('pool5').output
x = Flatten(name='flatten')(last_layer)
x = Dense(hidden_dim, activation='relu', name='fc6')(x)
x = Dense(hidden_dim, activation='relu', name='fc7')(x)
out = Dense(nb_class, activation='softmax', name='fc8')(x)
custom_vgg_model = Model(vgg_model.input, out)
if finetune:
# print('----------------After Disable Trainable----------------')
all_layers = custom_vgg_model.layers
pool5_index = custom_vgg_model.layers.index(custom_vgg_model.get_layer('pool5'))
for ind, l in enumerate(all_layers):
if ind <= pool5_index:
l.trainable = False
# all_layers[:pool5_index].trainable = False
# for ind, l in enumerate(all_layers):
# print('Name ', l.name, 'trainable' ,l.trainable,'index',ind)
# Train your model as usual.
# You can Try different optimizers
# opt = optimizers.SGD(lr=1e-5, decay=1e-6) #OK
# adagrad = optimizers.Adagrad( decay=1e-6)
# opt = optimizers.Adadelta( )
opt = optimizers.Adam(lr=1e-5, decay=1e-6)
custom_vgg_model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
custom_vgg_model.summary()
X_train, Y_train, X_valid, Y_valid, Face_Label_Dic = load_face_data('data/')
ftool = FaceTool()
ftool.write_json(save_face_index,Face_Label_Dic)
# Start Fine-tuning
custom_vgg_model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=1,
validation_data=(X_valid, Y_valid),
)
# Make predictions
predictions_valid = custom_vgg_model.predict(X_valid, batch_size=batch_size, verbose=1)
# Cross-entropy loss score
score = log_loss(Y_valid, predictions_valid)
# ===============Save Model===============
print("Saved model to disk")
model_json = custom_vgg_model.to_json()
with open(save_model_path, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
custom_vgg_model.save_weights(save_model_h5)
# ===============Test===============
face_index = prdict_one_face(custom_vgg_model, 'data/test/1.jpg')
print Face_Label_Dic[face_index]
face_index = prdict_one_face(custom_vgg_model, 'data/test/2.jpg')
print Face_Label_Dic[face_index]
face_index = prdict_one_face(custom_vgg_model, 'data/test/3.jpg')
print Face_Label_Dic[face_index]
if __name__ == '__main__':
train_face_model(False) | 2.578125 | 3 |
assets_new_new/data/2021-03-05/json_for_classification/get_classification_file_with_padim_segment.py | ggzhang0071/PaDiM-Anomaly-Detection-Localization-master | 0 | 12791468 | <filename>assets_new_new/data/2021-03-05/json_for_classification/get_classification_file_with_padim_segment.py
from get_classification_file_with_original_annotation import get_image_label_dict_from_original_annotation, get_classification_file_based_on_label
original_annotation_path="/git/PaDiM-master/assets_new_new/data/2021-03-05/json_for_classification"
image_data_root="/git/PaDiM-master/kangqiang_result/segment_image_result_wide_resnet50_2/image/**/*.jpg"
save_image_path="/git/PaDiM-master/kangqiang_result/croped_images_with_padim_segment_for_classification"
original_image_label_dict= get_image_label_dict_from_original_annotation(original_annotation_path)
get_classification_file_based_on_label(original_image_label_dict,image_data_root,save_image_path)
| 2.25 | 2 |
service_layer/customer_service_interface.py | yeonghwanchoi/Project_bank | 0 | 12791469 | from abc import ABC, abstractmethod
class CustomerServiceInterface(ABC):
@abstractmethod
def get_all_accounts_for_user(self, id: int) -> list:
pass
| 2.828125 | 3 |
scripts/benchmark/contract_data/__init__.py | zixuanzh/py-evm | 1 | 12791470 | import pathlib
from typing import (
Iterable
)
CONTRACTS_ROOT = "./scripts/benchmark/contract_data/"
CONTRACTS = [
"erc20.sol"
]
def get_contracts() -> Iterable[pathlib.Path]:
for val in CONTRACTS:
yield pathlib.Path(CONTRACTS_ROOT) / pathlib.Path(val)
| 2.28125 | 2 |
pgAdmin/tools/sqleditor/utils/tests/test_is_query_resultset_updatable.py | WeilerWebServices/PostgreSQL | 0 | 12791471 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import random
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from pgadmin.tools.sqleditor.tests.execute_query_test_utils \
import execute_query
from datetime import date
class TestQueryUpdatableResultset(BaseTestGenerator):
""" This class will test the detection of whether the query
result-set is updatable. """
scenarios = [
('When selecting all columns of the table', dict(
sql='SELECT * FROM {0};',
expected_primary_keys={
'pk_col1': 'int4',
'pk_col2': 'int4'
},
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[True, True, True, True]
)),
('When selecting all primary keys of the table', dict(
sql='SELECT pk_col1, pk_col2 FROM {0};',
expected_primary_keys={
'pk_col1': 'int4',
'pk_col2': 'int4'
},
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[True, True]
)),
('When selecting some of the primary keys of the table', dict(
sql='SELECT pk_col2 FROM {0};',
expected_primary_keys=None,
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[False]
)),
('When selecting none of the primary keys of the table', dict(
sql='SELECT normal_col1 FROM {0};',
expected_primary_keys=None,
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[False]
)),
('When renaming a primary key', dict(
sql='SELECT pk_col1 as some_col, pk_col2 FROM "{0}";',
expected_primary_keys=None,
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[False, False]
)),
('When renaming a normal column', dict(
sql='SELECT pk_col1, pk_col2, normal_col1 as some_col FROM "{0}";',
expected_primary_keys={
'pk_col1': 'int4',
'pk_col2': 'int4'
},
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[True, True, False]
)),
('When renaming a normal column to a primary key name', dict(
sql='SELECT normal_col1 as pk_col1, pk_col1, pk_col2 FROM {0};',
expected_primary_keys={
'pk_col1': 'int4',
'pk_col2': 'int4'
},
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[False, True, True]
)),
('When selecting a normal column twice', dict(
sql='SELECT pk_col1, pk_col2, normal_col1, normal_col1 FROM {0};',
expected_primary_keys={
'pk_col1': 'int4',
'pk_col2': 'int4'
},
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[True, True, True, False]
)),
('When selecting a non-table column', dict(
sql='SELECT pk_col1, pk_col2, normal_col1 || normal_col2 FROM {0};',
expected_primary_keys={'pk_col1': 'int4',
'pk_col2': 'int4'
},
expected_has_oids=False,
table_has_oids=False,
expected_cols_is_editable=[True, True, False]
)),
('When selecting primary keys and oids (table with oids)', dict(
sql='SELECT *, oid FROM {0};',
expected_primary_keys={
'pk_col1': 'int4',
'pk_col2': 'int4'
},
expected_has_oids=True,
table_has_oids=True,
expected_cols_is_editable=[True, True, True, True, False]
)),
('When selecting oids without primary keys (table with oids)', dict(
sql='SELECT oid, normal_col1, normal_col2 FROM {0};',
expected_primary_keys=None,
expected_has_oids=True,
table_has_oids=True,
expected_cols_is_editable=[False, True, True]
)),
('When selecting none of the primary keys or oids (table with oids)',
dict(
sql='SELECT normal_col1, normal_col2 FROM {0};',
expected_primary_keys=None,
expected_has_oids=False,
table_has_oids=True,
expected_cols_is_editable=[False, False]
))
]
def setUp(self):
self.test_table_name = "test_for_updatable_resultset" + \
str(random.randint(1000, 9999))
self._initialize_database_connection()
self._initialize_query_tool()
self._initialize_urls()
def runTest(self):
self._create_test_table(table_has_oids=self.table_has_oids)
response_data = self._execute_select_sql()
self._check_primary_keys(response_data)
self._check_oids(response_data)
self._check_editable_columns(response_data)
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id)
def _execute_select_sql(self):
sql = self.sql.format(self.test_table_name)
is_success, response_data = \
execute_query(tester=self.tester,
query=sql,
poll_url=self.poll_url,
start_query_tool_url=self.start_query_tool_url)
self.assertEquals(is_success, True)
return response_data
def _check_primary_keys(self, response_data):
primary_keys = response_data['data']['primary_keys']
self.assertEquals(primary_keys, self.expected_primary_keys)
def _check_oids(self, response_data):
has_oids = response_data['data']['has_oids']
self.assertEquals(has_oids, self.expected_has_oids)
def _check_editable_columns(self, response_data):
columns_info = response_data['data']['colinfo']
for col, expected_is_editable in \
zip(columns_info, self.expected_cols_is_editable):
self.assertEquals(col['is_editable'], expected_is_editable)
def _initialize_database_connection(self):
database_info = parent_node_dict["database"][-1]
self.db_name = database_info["db_name"]
self.server_id = database_info["server_id"]
self.server_version = parent_node_dict["schema"][-1]["server_version"]
if self.server_version >= 120000 and self.table_has_oids:
self.skipTest('Tables with OIDs are not supported starting '
'PostgreSQL 12')
driver_version = utils.get_driver_version()
driver_version = float('.'.join(driver_version.split('.')[:2]))
if driver_version < 2.8:
self.skipTest('Updatable resultsets require pyscopg 2.8 or later')
self.db_id = database_info["db_id"]
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to the database.")
def _initialize_query_tool(self):
self.trans_id = str(random.randint(1, 9999999))
url = '/datagrid/initialize/query_tool/{0}/{1}/{2}/{3}'.format(
self.trans_id, utils.SERVER_GROUP, self.server_id, self.db_id)
response = self.tester.post(url)
self.assertEquals(response.status_code, 200)
def _initialize_urls(self):
self.start_query_tool_url = \
'/sqleditor/query_tool/start/{0}'.format(self.trans_id)
self.poll_url = '/sqleditor/poll/{0}'.format(self.trans_id)
def _create_test_table(self, table_has_oids=False):
create_sql = """
DROP TABLE IF EXISTS {0};
CREATE TABLE {0}(
pk_col1 SERIAL,
pk_col2 SERIAL,
normal_col1 VARCHAR,
normal_col2 VARCHAR,
PRIMARY KEY(pk_col1, pk_col2)
)
""".format(self.test_table_name)
if table_has_oids:
create_sql += ' WITH OIDS;'
else:
create_sql += ';'
utils.create_table_with_query(self.server, self.db_name, create_sql)
class TestTemporaryTable(TestQueryUpdatableResultset):
""" This class will test the query result-set for temporary tables """
scenarios = [
('When selecting all columns of the Temporary table, on commit drop',
dict(sql='''
DROP TABLE IF EXISTS {0};
CREATE TEMPORARY TABLE {0} ON COMMIT DROP AS
SELECT
CURRENT_DATE AS today;
SELECT * FROM {0};''',
expected_primary_keys=None,
expected_results_column_data=[[date.today().strftime(
"%Y-%m-%d")]],
expected_has_oids=False,
expected_results_column_is_editable=False,
table_has_oids=False,
expected_cols_is_editable=[False]
))
]
def runTest(self):
response_data = self._execute_select_sql()
self._check_primary_keys(response_data)
self._check_oids(response_data)
# Verifying Temporary table result data on Commit Drop
self._check_results_column_data(response_data)
self._check_editable_columns(response_data)
def _check_results_column_data(self, response_data):
results_column_data = response_data['data']['result']
for result_data, expected_is_editable in \
zip(results_column_data, self.expected_results_column_data):
self.assertEquals(result_data, expected_is_editable)
| 2.1875 | 2 |
Data_generation.py | CS671/Assignment-4 | 0 | 12791472 | import numpy as np
import pickle as pkl
def function_generator(init_num):
seq = np.array([], dtype='int')
n = init_num
seq = np.append(seq, n)
while True:
if ((n%2)==0):
next_number = n/2
next_number = np.asarray(next_number, dtype='int')
seq = np.append(seq, next_number)
if next_number==1:
break
else:
next_number = (3*n)+1
next_number = np.asarray(next_number, dtype='int')
seq = np.append(seq, next_number)
n = next_number
return seq
output_seq_data = []
output_seq_length = []
x_train = []
y_train = []
num = 0
for n in range(0,10000):
sequence = function_generator(n+1)
seq_len = len(sequence)
x_training = sequence[:(seq_len-1)]
x_training = np.array(x_training, dtype='int')
y_training = sequence[1:seq_len]
y_training = np.array(y_training, dtype='int')
output_seq_data.append(sequence)
output_seq_length.append(seq_len)
x_train.append(x_training)
y_train.append(y_training)
output_seq_data = np.asarray(output_seq_data)
x_train = np.asarray(x_train)
y_train = np.asarray(y_train)
print(y_train[26])
output_seq_length = np.asarray(output_seq_length)
max_length = output_seq_length.max()
# print(max_length)
# print(x_train[26])
# np.save('generated_data.npy', gen_data)
# np.save('x_train.npy', x_train)
# np.save('y_train.npy', y_train)
| 3.171875 | 3 |
PythonCode/MathModule.py | janw23/Ballance | 2 | 12791473 | <reponame>janw23/Ballance<filename>PythonCode/MathModule.py
#PRZYDATNE FUNKCJE MATEMATYCZNE
import math
import heapq
#zwrca znak liczby
def sign(num):
if num > 0: return 1.0
if num < 0: return -1.0
return 0.0
def softsign(num):
if num < 0: return num / (1 - num)
return num / (1 + num)
#zwraca kwadrat liczby, ale z jej znakiem
def signedSqr(num):
if num >= 0: return num * num
return -num * num
#zwraca kwadrat liczby ze znakiem; f(x)=(x+1)^2-1
def signedBaseOneSqr(num):
if num >= 0: return (num+1)**2 - 1
return 1 - (1-num)**2
#interpolacja liniowa
def lerp(a, b, c):
return c*b + (1-c) * a
#zwraca dlugosc wektora [x, y] do kwadratu
def sqrMagnitude(x, y=None):
if y is not None: return x*x + y*y
return x[0] * x[0] + x[1] * x[1]
#zwraca dlugosc wektora [x, y]
def magnitude(x, y=None):
if y is not None: return math.sqrt(x*x + y*y)
return math.sqrt(x[0]*x[0] + x[1]*x[1])
#zwraca odleglosc miedzy punktami A i B
def distance(A, B):
x = A[0] - B[0]
y = A[1] - B[1]
return math.sqrt(x*x + y*y)
#zwraca kwadrat odleglosci miedzy punktami A i B
def sqrDistance(A, B):
x = A[0] - B[0]
y = A[1] - B[1]
return x*x + y*y
#zwraca znormalizowany wektor [x, y]
def normalized(x, y=None):
if y is not None:
if x == 0 and y == 0: return (0, 0)
mag = magnitude(x, y)
return (x/mag, y/mag)
else:
if x[0] == 0 and x[1] == 0: return (0, 0)
mag = magnitude(x)
return (x[0]/mag, x[1]/mag)
#zwraca roznice kwadratowa miedzy target a value
def errorsquare(target, value):
size = len(target)
sum = 0.0
for i in range(size):
a = int(target[i]) - value[i]
sum += a * a
return sum
#zwraca wartosc 'num' ograniczana przez <_min, _max>
def clamp(num, _min, _max):
if num > _max: return _max
elif num < _min: return _min
return num
#zwraca wartosc iloczynu skalarnego miedzy wektorami A i B
def dot(A, B):
return A[0] * B[0] + A[1] * B[1]
#zwraca wartosc iloczynu wektorowego miedzy wektorami A i B
def cross(A, B):
return A[0] * B[1] - A[1] * B[0]
#zwraca mediane liczb z tablicy 'data'
def Median(data):
order = sorted(data)
size = len(order)
if size % 2 == 0:
size = size // 2
return (order[size-1] + order[size]) / 2
return order[size//2]
#kolejka priorytetowa
class PriorityQueue:
def __init__(self):
self.elements = []
#dodaje element do kolejki
def push(self, item, priority):
heapq.heappush(self.elements, (priority, item))
#zdejmuje i zwraca element z poczatku kolejki
def pop(self):
return heapq.heappop(self.elements)[1]
#czy kolejka jest pusta?
def empty(self):
return len(self.elements) == 0
class MedianFilter:
#size to wielkosc kernela filtra
def __init__(self, size):
self.data = [0.0] * size
self.size = size
self.index = 0
#dodaje element do tablicy danych filtra
def push(self, num):
self.data[self.index] = num
self.index += 1
if self.index == self.size: self.index = 0
#zwraca przefiltrowana wartosc
def getValue(self):
return Median(self.data)
#pozwala na symulowanie opoznienia w pomiarach
class SignalDelay:
#'delay' oznacza, ile pomiarow jest miedzy aktualna wartoscia, a opozniana
def __init__(self, delay, dtype):
self.delay = delay
self.data = [dtype] * delay
self.index = 0
#dodaje element do struktury danych
def push(self, element):
self.data[self.index] = element
#zwraca element opozniony o 'delay'
def get(self):
return self.data[self.index]
#symuluje uplyw czasu o 1 jednostke; musi byc wywolane pomiedzy uzyciem 'push' a 'get'
def tick(self):
self.index += 1
if self.index == self.delay:
self.index = 0 | 3.0625 | 3 |
lib/utils/utils.py | TotalVariation/Flattenet | 3 | 12791474 | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import time
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
logger = logging.getLogger(__name__)
class FullModel(nn.Module):
"""
Distribute the loss on multi-gpu to reduce
the memory cost in the main gpu.
You can check the following discussion.
https://discuss.pytorch.org/t/dataparallel-imbalanced-memory-usage/22551/21
"""
def __init__(self, model, loss):
super(FullModel, self).__init__()
self.model = model
self.loss = loss
def forward(self, inputs, labels):
outputs = self.model(inputs)
loss = self.loss(outputs, labels)
return loss, outputs
def get_world_size():
if not torch.distributed.is_initialized():
return 1
return torch.distributed.get_world_size()
def get_rank():
if not torch.distributed.is_initialized():
return 0
return torch.distributed.get_rank()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.initialized = False
self.val = None
self.avg = None
self.sum = None
self.count = None
def initialize(self, val, weight):
self.val = val
self.avg = val
self.sum = val * weight
self.count = weight
self.initialized = True
def update(self, val, weight=1):
if not self.initialized:
self.initialize(val, weight)
else:
self.add(val, weight)
def add(self, val, weight):
self.val = val
self.sum += val * weight
self.count += weight
self.avg = self.sum / self.count
def value(self):
return self.val
def average(self):
return self.avg
def create_logger(cfg, cfg_name, phase='train'):
root_output_dir = Path(cfg.OUTPUT_DIR)
# set up logger
if not root_output_dir.exists():
print('=> creating {}'.format(root_output_dir))
root_output_dir.mkdir()
dataset = cfg.DATASET.DATASET
model = cfg.MODEL.NAME
cfg_name = os.path.basename(cfg_name).split('.')[0]
final_output_dir = root_output_dir / dataset / cfg_name
print('=> creating {}'.format(final_output_dir))
final_output_dir.mkdir(parents=True, exist_ok=True)
time_str = time.strftime('%Y-%m-%d-%H-%M')
log_file = '{}_{}_{}.log'.format(cfg_name, time_str, phase)
final_log_file = final_output_dir / log_file
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=str(final_log_file),
format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
tensorboard_log_dir = Path(cfg.LOG_DIR) / dataset / model / \
(cfg_name + '_' + time_str)
print('=> creating {}'.format(tensorboard_log_dir))
tensorboard_log_dir.mkdir(parents=True, exist_ok=True)
return logger, str(final_output_dir), str(tensorboard_log_dir)
def get_confusion_matrix(label, pred, size, num_class, ignore=-1):
"""
Calcute the confusion matrix by given label and pred
"""
output = pred.cpu().numpy().transpose(0, 2, 3, 1)
seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)
seg_gt = np.asarray(
label.cpu().numpy()[:, :size[-2], :size[-1]], dtype=np.int)
ignore_index = seg_gt != ignore
seg_gt = seg_gt[ignore_index]
seg_pred = seg_pred[ignore_index]
index = (seg_gt * num_class + seg_pred).astype('int32')
label_count = np.bincount(index)
confusion_matrix = np.zeros((num_class, num_class))
for i_label in range(num_class):
for i_pred in range(num_class):
cur_index = i_label * num_class + i_pred
if cur_index < len(label_count):
confusion_matrix[i_label,
i_pred] = label_count[cur_index]
return confusion_matrix
def get_optimizer(config, model):
_nwd_keys = ('bias', 'bn', 'norm', 'prelu', 'nwd')
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = config.TRAIN.LR
weight_decay = config.TRAIN.WD
if 'head' in key:
lr *= 10
if any(key.find(sub) != -1 for sub in _nwd_keys):
weight_decay = 0
logger.info(f'Params: {key}, LR: {lr}, Weight_Decay: {weight_decay}')
elif 'base' in key:
if any(key.find(sub) != -1 for sub in _nwd_keys):
weight_decay = 0
logger.info(f'Params: {key}, LR: {lr}, Weight_Decay: {weight_decay}')
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if config.TRAIN.OPTIMIZER == 'sgd':
optimizer = torch.optim.SGD(params,
lr=config.TRAIN.LR,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD,
nesterov=config.TRAIN.NESTEROV,
)
elif config.TRAIN.OPTIMIZER == 'adam':
optimizer = torch.optim.Adam(params,
lr=config.TRAIN.LR,
amsgrad=config.TRAIN.AMSGRAD
)
else:
raise NotImplementedError
return optimizer
def get_group_gn(dim, dim_per_gp, num_groups):
"""get number of groups used by GroupNorm, based on number of channels."""
assert dim_per_gp == -1 or num_groups == -1, \
"GroupNorm: can only specify G or C/G."
if dim_per_gp > 0:
assert dim % dim_per_gp == 0, \
"dim: {}, dim_per_gp: {}".format(dim, dim_per_gp)
group_gn = dim // dim_per_gp
else:
assert dim % num_groups == 0, \
"dim: {}, num_groups: {}".format(dim, num_groups)
group_gn = num_groups
return group_gn
| 2.84375 | 3 |
src/ralph/supports/migrations/0006_auto_20160615_0805.py | DoNnMyTh/ralph | 1,668 | 12791475 | <filename>src/ralph/supports/migrations/0006_auto_20160615_0805.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import ralph.lib.mixins.fields
class Migration(migrations.Migration):
dependencies = [
('supports', '0005_auto_20160105_1222'),
]
operations = [
migrations.AlterModelOptions(
name='baseobjectssupport',
options={},
),
migrations.AlterModelTable(
name='baseobjectssupport',
table=None,
),
migrations.SeparateDatabaseAndState(
state_operations=[
migrations.AddField(
model_name='baseobjectssupport',
name='baseobject',
field=ralph.lib.mixins.fields.BaseObjectForeignKey(default=0, verbose_name='Asset', to='assets.BaseObject', related_name='supports'),
preserve_default=False,
),
migrations.AddField(
model_name='baseobjectssupport',
name='support',
field=models.ForeignKey(default=0, to='supports.Support'),
preserve_default=False,
),
],
database_operations=[]
),
]
| 1.59375 | 2 |
tools/gen_doc_files.py | joshddunn/crsfml | 248 | 12791476 | <filename>tools/gen_doc_files.py
import os
import textwrap
import mkdocs_gen_files
root = mkdocs_gen_files.config["plugins"]["mkdocstrings"].get_handler("crystal").collector.root
nav = mkdocs_gen_files.open(f"api/index.md", "w")
for module in ["System", "Window", "Graphics", "Audio", "Network", ""]:
if module:
print(f"* [{module} module]({module.lower()}.md)", file=nav)
with mkdocs_gen_files.open(f"api/{module.lower()}.md", "w") as f:
f.write(textwrap.dedent(f"""
# ::: SF
selection:
file_filters:
- '/{module.lower()}/'
"""))
for typ in root.lookup("SF").walk_types():
[cur_module] = {os.path.dirname(os.path.relpath(loc.filename, "src")) for loc in typ.locations}
if module.lower() == cur_module:
name = typ.name
full_name = typ.abs_id
path = full_name.replace("::", "/")
indent = bool(module) + full_name.count("::") - 1
print(" " * indent + f"* [{name}]({path}.md)", file=nav)
filename = f"api/{path}.md"
with mkdocs_gen_files.open(filename, "w") as f:
f.write(textwrap.dedent(f"""\
# ::: {full_name}
"""))
if typ.locations:
mkdocs_gen_files.set_edit_path(filename, typ.locations[0].url)
| 2.453125 | 2 |
doc/workflow/examples/example7.py | PyUtilib/PyUtilib | 24 | 12791477 | <gh_stars>10-100
import pyutilib.workflow
import os.path
import os
currdir = os.path.dirname(os.path.abspath(__file__))+os.sep
import sys
if sys.platform.startswith('win'):
INPUT = open('example7.txt','r')
for line in INPUT:
sys.stdout.write(line)
INPUT.close()
else:
# @ex:
class TaskH(pyutilib.workflow.Task):
def __init__(self, *args, **kwds):
"""Constructor."""
pyutilib.workflow.Task.__init__(self, *args, **kwds)
self.inputs.declare('dir')
self.outputs.declare('list')
self.add_resource(pyutilib.workflow.ExecutableResource('ls'))
def execute(self):
self.resource('ls').run(self.dir, logfile=currdir+'logfile')
self.list = []
INPUT = open(currdir+'logfile','r')
for line in INPUT:
self.list.append( line.strip() )
INPUT.close()
self.list.sort()
H = TaskH()
w = pyutilib.workflow.Workflow()
w.add(H)
print(w(dir=currdir+'dummy'))
# @:ex
if os.path.exists(currdir+'logfile'):
os.remove(currdir+'logfile')
| 2.5 | 2 |
startup_scripts/240_virtualization_interfaces.py | systempal/netbox-docker | 691 | 12791478 | import sys
from startup_script_utils import load_yaml, pop_custom_fields, set_custom_fields_values
from virtualization.models import VirtualMachine, VMInterface
interfaces = load_yaml("/opt/netbox/initializers/virtualization_interfaces.yml")
if interfaces is None:
sys.exit()
required_assocs = {"virtual_machine": (VirtualMachine, "name")}
for params in interfaces:
custom_field_data = pop_custom_fields(params)
for assoc, details in required_assocs.items():
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
interface, created = VMInterface.objects.get_or_create(**params)
if created:
set_custom_fields_values(interface, custom_field_data)
print("🧷 Created interface", interface.name, interface.virtual_machine.name)
| 2.59375 | 3 |
src/atcoder/abc007/c/sol_0.py | kagemeka/competitive-programming | 1 | 12791479 | from __future__ import (
annotations,
)
from typing import (
Generator,
NoReturn
)
class StdReader:
def __init__(
self,
) -> NoReturn:
import sys
self.buf = sys.stdin.buffer
self.lines = (
self.async_readlines()
)
self.chunks: Generator
def async_readlines(
self,
) -> Generator:
while True:
gen = self.line_chunks()
yield gen
def line_chunks(
self,
) -> Generator:
ln = self.buf.readline()
for chunk in ln.split():
yield chunk
def __call__(
self,
) -> bytes:
try:
chunk = next(self.chunks)
except:
self.chunks = next(
self.lines,
)
chunk = self()
return chunk
def str(
self,
) -> str:
b = self()
return b.decode()
def int(
self,
) -> int:
return int(self.str())
from abc import (
ABC,
abstractmethod,
)
class Solver(ABC):
def __init__(self):
self.reader = StdReader()
def __call__(
self,
):
self.prepare()
self.solve()
@abstractmethod
def prepare(self):
...
@abstractmethod
def solve(self):
...
import numpy as np
from typing import (
List,
)
from dataclasses import (
dataclass,
)
@dataclass
class Node:
id_: int = None
@dataclass
class Edge:
id_: int = None
from_ : int = ...
to: int = ...
weight: int = 1
capacity: int = 0
@dataclass
class Graph:
nodes: List[Node]
edges: List[List[Edge]]
def __init__(
self,
n: int,
):
nodes = [
Node(i)
for i in range(n)
]
edges = [
[] for _ in range(n)
]
self.nodes = nodes
self.edges = edges
def add_edge(
self,
e: Edge,
):
i = e.from_
self.edges[i].append(e)
def add_edges(
self,
edges: List[Edge],
):
for e in edges:
self.add_edge(e)
@property
def size(self):
return len(self.nodes)
from collections import (
deque,
)
class GraphBFS:
level: List[int]
def __init__(
self,
graph: Graph,
):
self.g = graph
self.inf = float('inf')
def search(
self,
src: int,
):
self.init_level()
self.level[src] = 0
self.set_queue()
que = self.queue
que.append(src)
while que:
x = que.popleft()
self.explore(x)
def explore(
self,
u: int,
):
g = self.g
lv = self.level
que = self.queue
for e in g.edges[u]:
v = e.to
if lv[v] is not None:
continue
lv[v] = lv[u] + 1
que.append(v)
def set_queue(self):
que = deque()
self.queue = que
def init_level(self):
lv = [None] * self.g.size
self.level = lv
class Problem(
Solver,
):
def prepare(self):
reader = self.reader
r = reader.int()
c = reader.int()
sy = reader.int() - 1
sx = reader.int() - 1
gy = reader.int() - 1
gx = reader.int() - 1
maze = [None] * r
for i in range(r):
maze[i] = reader.str()
maze = ''.join(maze)
self.r = r
self.c = c
self.sy = sy
self.sx = sx
self.gy = gy
self.gx = gx
self.maze = maze
def solve(self):
c = self.c
self.moves = (-c, -1, 1, c)
self.make_graph()
print(self.calc_dist())
def calc_dist(self) -> int:
g = self.g
c = self.c
src = self.sy * c + self.sx
dst = self.gy * c + self.gx
bfs = GraphBFS(graph=g)
bfs.search(src)
dist = bfs.level[dst]
return dist
def make_graph(
self,
):
r, c = self.r, self.c
n = r * c
g = Graph(n)
for i in range(n):
edges = self.gen_edges(i)
g.add_edges(edges)
self.g = g
def gen_edges(
self,
i: int,
):
edges = []
maze = self.maze
if maze[i] == '#':
return edges
for d in self.moves:
j = i + d
if maze[j] == '#':
continue
e = Edge(
from_ = i,
to = j,
)
edges.append(e)
return edges
def main():
t = 1
# t = StdReader().int()
for _ in range(t):
Problem()()
if __name__ == '__main__':
main() | 2.921875 | 3 |
afnumpy/lib/stride_tricks.py | FilipeMaia/afnumpy | 31 | 12791480 | <reponame>FilipeMaia/afnumpy
import afnumpy
import numpy
def broadcast_arrays(*args, **kwargs):
subok = kwargs.pop('subok', False)
if kwargs:
raise TypeError('broadcast_arrays() got an unexpected keyword '
'argument {}'.format(kwargs.pop()))
args = [afnumpy.array(_m, copy=False, subok=subok) for _m in args]
shapes = [x.shape for x in args]
if len(set(shapes)) == 1:
# Common case where nothing needs to be broadcasted.
return args
shapes = [list(s) for s in shapes]
strides = [list(x.strides) for x in args]
nds = [len(s) for s in shapes]
biggest = max(nds)
# Go through each array and prepend dimensions of length 1 to each of
# the shapes in order to make the number of dimensions equal.
for i in range(len(args)):
diff = biggest - nds[i]
if diff > 0:
shapes[i] = [1] * diff + shapes[i]
strides[i] = [0] * diff + strides[i]
# Chech each dimension for compatibility. A dimension length of 1 is
# accepted as compatible with any other length.
common_shape = []
for axis in range(biggest):
lengths = [s[axis] for s in shapes]
unique = set(lengths + [1])
if len(unique) > 2:
# There must be at least two non-1 lengths for this axis.
raise ValueError("shape mismatch: two or more arrays have "
"incompatible dimensions on axis %r." % (axis,))
elif len(unique) == 2:
# There is exactly one non-1 length. The common shape will take
# this value.
unique.remove(1)
new_length = unique.pop()
common_shape.append(new_length)
# For each array, if this axis is being broadcasted from a
# length of 1, then set its stride to 0 so that it repeats its
# data.
for i in range(len(args)):
if shapes[i][axis] == 1:
shapes[i][axis] = new_length
strides[i][axis] = 0
else:
# Every array has a length of 1 on this axis. Strides can be
# left alone as nothing is broadcasted.
common_shape.append(1)
# Construct the new arrays.
broadcasted = []
for (x, sh) in zip(args, shapes):
x_sh = x.shape + (1,)*(len(sh)-x.ndim)
reps = numpy.array(sh)//numpy.array(x_sh)
if(numpy.prod(reps) > 1):
broadcasted.append(afnumpy.tile(x, reps))
else:
if(x.shape != tuple(sh)):
x = x.reshape(sh)
broadcasted.append(x)
return broadcasted
| 2.578125 | 3 |
tests/util_test.py | nickgaya/bravado-core | 122 | 12791481 | <reponame>nickgaya/bravado-core
# -*- coding: utf-8 -*-
from inspect import getcallargs
import mock
import pytest
from bravado_core.util import AliasKeyDict
from bravado_core.util import cached_property
from bravado_core.util import determine_object_type
from bravado_core.util import lazy_class_attribute
from bravado_core.util import memoize_by_id
from bravado_core.util import ObjectType
from bravado_core.util import RecursiveCallException
from bravado_core.util import sanitize_name
from bravado_core.util import strip_xscope
def test_cached_property():
class Class(object):
def __init__(self):
self.calls = 0
@cached_property
def property_1(self):
self.calls += 1
return self.calls
assert isinstance(Class.property_1, cached_property)
class_instance = Class()
assert class_instance.calls == 0
assert class_instance.property_1 == 1
assert class_instance.calls == 1
# If property is called twice no calls are received from the method
assert class_instance.property_1 == 1
assert class_instance.calls == 1
# If property is deleted then the method is called again
del class_instance.property_1
assert class_instance.property_1 == 2
assert class_instance.calls == 2
def test_class_cached_property():
class Class(object):
calls = 0
@lazy_class_attribute
def prop(cls):
cls.calls += 1
return cls.calls
class_instance_1 = Class()
assert class_instance_1.calls == 0
assert class_instance_1.prop == 1
assert class_instance_1.calls == 1
class_instance_2 = Class()
assert class_instance_2.calls == 1
assert class_instance_2.prop == 1
assert class_instance_2.calls == 1
def test_memoize_by_id_decorator_recursive_call():
calls = []
@memoize_by_id
def function(a):
calls.append(a)
return function(a)
with pytest.raises(RecursiveCallException):
function(mock.sentinel.A)
assert calls == [mock.sentinel.A]
def test_memoize_by_id_decorator():
calls = []
def function(a, b=None):
calls.append([a, b])
return id(a) + id(b)
decorated_function = memoize_by_id(function)
assert decorated_function(1) == id(1) + id(None)
assert decorated_function.cache == {
(('a', id(1)), ('b', id(None))): id(1) + id(None),
}
assert calls == [[1, None]]
assert decorated_function(2, 3) == id(2) + id(3)
assert decorated_function.cache == {
(('a', id(1)), ('b', id(None))): id(1) + id(None),
(('a', id(2)), ('b', id(3))): id(2) + id(3),
}
assert calls == [[1, None], [2, 3]]
# Calling the decorated method with known arguments will not call the inner method
assert decorated_function(1) == id(1) + id(None)
assert decorated_function.cache == {
(('a', id(1)), ('b', id(None))): id(1) + id(None),
(('a', id(2)), ('b', id(3))): id(2) + id(3),
}
assert calls == [[1, None], [2, 3]]
decorated_function.cache.clear()
assert decorated_function(1) == id(1) + id(None)
assert decorated_function.cache == {
(('a', id(1)), ('b', id(None))): id(1) + id(None),
}
assert calls == [[1, None], [2, 3], [1, None]]
@mock.patch('bravado_core.util.inspect.getcallargs', wraps=getcallargs)
def test_memoize_by_id_do_not_use_inspect_if_only_kwargs_are_provided(mock_getcallargs):
calls = []
def function(a, b=None):
calls.append([a, b])
return id(a) + id(b)
decorated_function = memoize_by_id(function)
assert decorated_function(1) == id(1) + id(None)
mock_getcallargs.assert_called_once_with(function, 1)
assert calls == [[1, None]]
assert decorated_function.cache == {
(('a', id(1)), ('b', id(None))): id(1) + id(None),
}
mock_getcallargs.reset_mock()
assert decorated_function(a=1) == id(1) + id(None)
assert not mock_getcallargs.called
assert decorated_function.cache == {
(('a', id(1)), ('b', id(None))): id(1) + id(None),
}
@pytest.mark.parametrize(
('input', 'expected'), [
('pet.getBy Id', 'pet_getBy_Id'), # simple case
('_getPetById_', 'getPetById'), # leading/trailing underscore
('get__Pet_By__Id', 'get_Pet_By_Id'), # double underscores
('^&#@!$foo%+++:;"<>?/', 'foo'), # bunch of illegal chars
('__foo__', 'foo'), # make sure we strip multiple underscores
('100percent', 'percent'), # make sure we remove all digits
('100.0', '_100_0'), # a name consisting mostly of digits should keep them
],
)
def test_sanitize_name(input, expected):
assert sanitize_name(input) == expected
def test_AliasKeyDict():
alias_dict = AliasKeyDict({'a': 'b', 'c': 'd'})
alias_dict.add_alias('alias_a', 'a')
assert len(alias_dict) == 2
assert set(alias_dict.items()) == set([('a', 'b'), ('c', 'd')])
assert 'alias_a' in alias_dict
assert alias_dict['alias_a'] is alias_dict['a']
assert alias_dict.get('alias_a') is alias_dict.get('a')
assert alias_dict.get('f', 'not there') == 'not there'
assert alias_dict.pop('alias_a') == 'b'
assert len(alias_dict) == 1
assert 'a' not in alias_dict
assert 'alias_a' not in alias_dict
def test_AliasKeyDict_copy():
alias_dict = AliasKeyDict([('foo', 'bar')])
alias_dict.add_alias('baz', 'foo')
dict_copy = alias_dict.copy()
assert set(dict_copy.items()) == set(alias_dict.items())
assert dict_copy.alias_to_key == alias_dict.alias_to_key
def test_AliasKeyDict_del():
alias_dict = AliasKeyDict([('foo', 'bar')])
alias_dict.add_alias('baz', 'foo')
del alias_dict['baz']
assert len(alias_dict) == 0
assert 'baz' not in alias_dict
assert 'foo' not in alias_dict
@pytest.mark.parametrize(
'default_type_to_object, object_dict, expected_object_type',
(
[True, 'anything that is not a dictionary', ObjectType.UNKNOWN],
[True, {'in': 'body', 'name': 'body', 'required': True, 'schema': {'type': 'object'}}, ObjectType.PARAMETER],
[True, {'get': {'responses': {'200': {'description': 'response description'}}}}, ObjectType.PATH_ITEM],
[True, {'description': 'response description', 'schema': {'type': 'object'}}, ObjectType.RESPONSE],
[True, {'description': 'response description', 'parameters': {'param': {'type': 'object'}}}, ObjectType.SCHEMA],
[False, {'description': 'response description', 'parameters': {'param': {'type': 'object'}}}, ObjectType.UNKNOWN], # noqa
),
)
def test_determine_object_type(default_type_to_object, object_dict, expected_object_type):
assert determine_object_type(object_dict, default_type_to_object) == expected_object_type
def test_empty():
assert {} == strip_xscope({})
def test_contained_in_dict():
fragment = {
'MON': {
'$ref': '#/definitions/DayHours',
'x-scope': [
'file:///happyhour/api_docs/swagger.json',
'file:///happyhour/api_docs/swagger.json#/definitions/WeekHours',
],
},
}
expected = {
'MON': {
'$ref': '#/definitions/DayHours',
},
}
assert expected == strip_xscope(fragment)
assert 'x-scope' in fragment['MON']
def test_contained_in_list():
fragment = [
{
'$ref': '#/definitions/DayHours',
'x-scope': [
'file:///happyhour/api_docs/swagger.json',
'file:///happyhour/api_docs/swagger.json#/definitions/WeekHours',
],
},
]
expected = [
{
'$ref': '#/definitions/DayHours',
},
]
assert expected == strip_xscope(fragment)
assert 'x-scope' in fragment[0]
def test_no_op():
fragment = {
'MON': {
'$ref': '#/definitions/DayHours',
},
}
expected = {
'MON': {
'$ref': '#/definitions/DayHours',
},
}
assert expected == strip_xscope(fragment)
def test_petstore_spec(petstore_spec):
assert petstore_spec.client_spec_dict == strip_xscope(petstore_spec.spec_dict)
| 2.109375 | 2 |
discord/ext/ui/item.py | Lapis256/discord-ext-ui | 0 | 12791482 | from typing import Any, Callable
import discord
class Item:
def to_discord(self) -> Any:
pass
def check(self, func: Callable[[discord.Interaction], bool]) -> 'Item':
pass
| 2.328125 | 2 |
bgg4py/valueobject/search.py | hiroaqii/bgg4py | 1 | 12791483 | from collections import OrderedDict
from typing import List, Optional, Union
from .bgg import Bgg
class Item(Bgg):
id: int
type: str
name: Optional[str]
yearpublished: Optional[int]
@classmethod
def create(cls, itme: OrderedDict):
_item = Item(
id=Bgg.parse_int(itme.get("@id")),
type=itme.get("@type"),
name=Bgg.get_primary_name(itme.get("name")),
yearpublished=Bgg.parse_int(itme.get("yearpublished", {}).get("@value")),
)
return _item
class Search(Bgg):
items: List[Item]
@classmethod
def create(cls, items: Union[OrderedDict, List[OrderedDict]]):
if items is None:
return None
if type(items) == OrderedDict:
items = [items]
_items = [Item.create(x) for x in items]
return Search(items=_items)
| 2.859375 | 3 |
Networks.py | yanhuchen/Quantum-Graph-Convolutional-Network | 3 | 12791484 | <gh_stars>1-10
import random
from QuantumGate import QuantumGate
import numpy as np
import scipy.sparse as sp
from numpy import pi, sin, cos, sqrt, exp
from copy import deepcopy
class Networks:
def __init__(self, n_qubit, n_class, nums, label):
self.nums = nums
self.label = label
self.n_class = n_class
self.n_qubit = n_qubit
self.map = {0:[0,1], 1:[2,3], 2:[4,5], 3:[6,7], 4:[8,9],
5:[10,11], 6:[12,13]}
def uniformstate(self,sigma,nums,init):
psi = np.zeros((nums,2**self.n_qubit))
for j in range(nums):
for i in range(2**self.n_qubit):
psi[j,i] = (1+np.random.normal(0,sigma,1)) * init[i]
return psi
#随机采样,默认值10
def randomSample(self,psi,label):
ind = random.sample(list(range(0,len(psi))),self.nums)
return psi[ind],label[ind]
# 归一化
def normalize(self,psi):
h,l = psi.shape # h是psi的个数,l是每个psi的维度
for j in range(h):
s = sqrt(sum(psi[j]**2))
for i in range(l):
psi[j,i] = psi[j,i] / s
return psi
def train1(self, theta, init, label, epochs, lr): # 输入为随机采样后的量子态集合
expect = np.zeros((epochs, self.nums, self.n_class))
acc = np.zeros(epochs)
test_acc = np.zeros(epochs)
psi = init[0:self.nums] # 训练集数据
# y = label
y = np.zeros((self.nums, self.n_class))
for k in range(self.nums):
y[k, label[k]] = 1
loss = np.zeros(epochs)
for epoch in range(epochs):
delta = np.zeros_like(theta) # 每个epoch更新一次梯度
for i in range(self.nums):
# 先根据现有的参数计算一次期望概率
expect[epoch, i] = self.getExpectation1(theta, psi[i].reshape(len(psi[i]), 1))
for t in range(len(theta)):
grad_e = self.getGradient1(theta, t, psi[i].reshape(len(psi[i]), 1))
soft_e = self.Softmax(deepcopy(expect[epoch, i]))
delta[t] += lr * (soft_e - y[i]).reshape((1, self.n_class)) @ grad_e.reshape((self.n_class, 1))
theta -= delta / self.nums # 更新参数
tmp = 0
for i in range(self.nums):
tmp -= np.log(expect[epoch, i, label[i]]) # 计算损失函数
loss[epoch] = tmp
acc[epoch] = self.get_accuracy(expect[epoch], label)
test_acc[epoch] = self.test(theta=theta, init=init)
print('第', epoch, '次迭代,', 'loss:', loss[epoch], 'train_acc:', acc[epoch],
'test_acc', test_acc[epoch])
return theta, loss, acc, test_acc
def getGradient1(self, theta, num_para, init): # 返回PSR的结果
left = deepcopy(theta)
right = deepcopy(theta)
left[num_para] = left[num_para] - pi / 4
right[num_para] = right[num_para] + pi / 4
# 左边
out_l = self.getBlock1(theta=left) @ init
expect_l = np.zeros(self.n_class)
for i in range(self.n_class):
expect_l[i] = out_l[self.map[i][0], 0] ** 2 + out_l[self.map[i][1], 0] ** 2
# 右边
out_r = self.getBlock1(theta=right) @ init
expect_r = np.zeros(self.n_class)
for i in range(self.n_class):
expect_r[i] = out_r[self.map[i][0], 0] ** 2 + out_r[self.map[i][1], 0] ** 2
return expect_r - expect_l
def getExpectation1(self, theta, init):
res = self.getBlock1(theta=theta) @ init
expect = np.zeros(self.n_class)
for i in range(self.n_class):
expect[i] = res[self.map[i][0], 0] ** 2 + res[self.map[i][1], 0] ** 2
return expect
def getBlock1(self, theta): # 常见的PQC block
QG = QuantumGate()
layer = len(theta) // 8
U = np.eye(2 ** self.n_qubit)
for lay in range(layer):
U1 = np.kron(
np.kron(
np.kron(self.Ry(theta[lay * 8 + 0]), self.Ry(theta[lay * 8 + 1])),
self.Ry(theta[lay * 8 + 2])),
self.Ry(theta[lay * 8 + 3]))
U2 = np.kron(QG.C1nU(n=0, U=self.Ry(theta=theta[lay * 8 + 4])), np.eye(4))
U3 = np.kron(np.kron(QG.I, QG.C1nU(n=0, U=self.Ry(theta=theta[lay * 8 + 5]))), QG.I)
U4 = np.kron(np.eye(4), QG.C1nU(n=0, U=self.Ry(theta=theta[lay * 8 + 6])))
U5 = QG.UnC1(n=2, U=self.Ry(theta=theta[lay * 8 + 7]))
U = U @ U5 @ U4 @ U3 @ U2 @ U1
return U
def Softmax(self, x):
A = sum(np.exp(x))
for k in range(len(x)):
x[k] = np.exp(x[k]) / A
return x
def partial_NLL(self, x, y):
return sum(x - y)
def Ry(self, theta):
return np.array([[cos(theta), -sin(theta)],
[sin(theta), cos(theta)]])
def test(self, theta, init):
test_expect = np.zeros((len(self.label) - self.nums, self.n_class))
for n in range(self.nums, len(self.label)):
test_expect[n - self.nums] = self.getExpectation1(theta=theta, init=init[n].reshape(len(init[n]), 1))
test_acc = self.get_accuracy(test_expect, self.label[self.nums:])
return test_acc
def get_accuracy(self, expect, label):
# expect的shape为:[num, n_class]
acc = 0
for j in range(expect.shape[0]):
arg = np.argmax(expect[j])
if arg == label[j]:
acc += 1
return acc
def get_residuals(self, prop):
# adj中共有13264条边,
# 我们假设残差项矩阵不为0的元素为13264*prop,
# 同时应保证其范数为2708*prop
a = np.random.randint(low=0, high=2708, size=int(13264 * prop / 2))
b = np.random.randint(low=0, high=2708, size=int(13264 * prop / 2))
row = np.hstack((a, b))
col = np.hstack((b, a))
data = (np.ones_like(row) -
2 * np.random.randint(low=0, high=2, size=len(row))) * prop
residuals = sp.coo_matrix((data, (row, col)), shape=(2708, 2708))
return residuals
| 2.3125 | 2 |
Python tripartite framework/Django/code/mysite/app_model/models.py | Ljazz/studyspace | 0 | 12791485 | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.timezone import now
from django.template.defaultfilters import slugify
import uuid
import os
class Article(models.Model):
STATUS_CHOICES = (
('d', '草稿'),
('p', '发表'),
)
title = models.CharField('标题', max_length=200, unique=True)
slug = models.SlugField('slug', max_length=60)
body = models.TextField('正文')
pub_date = models.DateTimeField('发布时间', default=now, null=True)
create_date = models.DateTimeField('创建时间', auto_now_add=True)
mod_date = models.DateTimeField('修改时间', auto_now=True)
status = models.CharField('文章状态', max_length=1, choices=STATUS_CHOICES)
views = models.PositiveIntegerField('浏览量', default=0)
author = models.ForeignKey(User, verbose_name="作者", on_delete=models.CASCADE)
tags = models.ManyToManyField('Tag', verbose_name="标签集合", blank=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:article_detail', args=[str(self.id)])
def viewed(self):
self.views += 1
self.save(update_fields=['views'])
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
if not self.slug or not self.id:
self.slug = slugify(self.title)
super(Article, self).save()
# do something
class Meta:
ordering = ['-pub_date']
verbose_name = 'article'
def user_directory_path(instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(uuid.uuid4().hex[:10], ext)
# return the whole path to the file
return os.path.join(instance.user.id, 'avatar', filename)
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
avatar = models.ImageField(upload_to=user_directory_path, verbose_name='头像')
class AuthorManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(role='A')
class EditorManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(role='E')
class Person(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
role = models.CharField(max_length=1, choices=(('A', 'Author'), ('E', 'Editor')))
objects = models.Manager()
authors = AuthorManager()
editors = EditorManager()
| 2.15625 | 2 |
uwsgi_sloth/commands/echo_conf.py | 365moods/uwsgi | 0 | 12791486 | <reponame>365moods/uwsgi
# -*- coding: utf-8 -*-
import pkg_resources
def echo_conf(args):
print pkg_resources.resource_string('uwsgi_sloth', "sample.conf")
def load_subcommand(subparsers):
"""Load this subcommand"""
parser_analyze = subparsers.add_parser('echo_conf', help='Echo sample configuration file')
parser_analyze.set_defaults(func=echo_conf)
| 2.40625 | 2 |
bims/views/under_development.py | Christiaanvdm/django-bims | 0 | 12791487 | <reponame>Christiaanvdm/django-bims<gh_stars>0
# coding=utf-8
from django.views.generic import TemplateView
class UnderDevelopmentView(TemplateView):
template_name = 'under_development.html'
| 1.210938 | 1 |
utility.py | DavideFrr/ibmqx_experiments | 6 | 12791488 | # Copyright 2017 Quantum Information Science, University of Parma, Italy. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Quantum Information Science, University of Parma, Italy"
__license__ = "Apache"
__version__ = "2.0"
__email__ = "<EMAIL>"
import os
from time import sleep
from devices import *
import logging
import myLogger
import operator
import sys
sys.path.append( # solve the relative dependencies if you clone QISKit from the Git repo and use like a global.
"../qiskit-sdk-py")
from qiskit import QuantumProgram
import Qconfig
logger = logging.getLogger('utility')
logger.addHandler(myLogger.MyHandler())
logger.setLevel(logging.CRITICAL)
logger.propagate = False
class Utility(object):
def __init__(self, coupling_map):
self.__coupling_map = dict()
self.__inverse_coupling_map = dict()
self.__plain_map = dict()
self.__path = dict()
self.__n_qubits = 0
self.__ranks = dict()
self.__connected = dict()
self.__most_connected = []
if coupling_map:
self.__coupling_map = coupling_map.copy()
logger.log(logging.DEBUG, 'init() - coupling_map:\n%s', str(self.__coupling_map))
self.invert_graph(coupling_map, self.__inverse_coupling_map)
logger.log(logging.DEBUG, 'init() - inverse coupling map:\n%s', str(self.__inverse_coupling_map))
for i in coupling_map:
self.__plain_map.update({i: self.__inverse_coupling_map[i] + coupling_map[i]})
logger.debug('init() - plain map:\n%s', str(self.__plain_map))
self.start_explore(self.__coupling_map, self.__ranks)
self.__most_connected = self.find_max(self.__ranks)
self.create_path(self.__most_connected[0], plain_map=self.__plain_map)
else:
logger.critical('init() - Null argument: coupling_map')
exit(1)
def close(self):
self.__ranks.clear()
self.__inverse_coupling_map.clear()
self.__coupling_map.clear()
self.__path.clear()
self.__most_connected.clear()
def explore(self, source, visiting, visited, ranks):
for next in self.__coupling_map[visiting]:
if next not in visited[source]:
visited[source].append(next)
if next not in ranks:
ranks.update({next: 0})
ranks[next] = ranks[next] + 1
self.explore(source, next, visited, ranks)
# TODO Try using some sort of centrality algorithm
def start_explore(self, graph, ranks):
visited = dict()
for source in graph:
visited.update({source: []})
self.explore(source, source, visited, ranks)
# create an inverted coupling-map for further use
@staticmethod
def invert_graph(graph, inverse_graph=None):
if inverse_graph is None:
inverse_graph = {}
for end in graph:
for start in graph[end]:
if start not in inverse_graph:
inverse_graph.update({start: [end]})
else:
inverse_graph[start].append(end)
for node in graph:
if node not in inverse_graph:
inverse_graph.update({node: []})
# find the most connected qubit
@staticmethod
def find_max(ranks):
logger.debug('ranks:\n%s', str(ranks))
most_connected = max(ranks.items(), key=operator.itemgetter(1))[0]
found = [most_connected, ranks[most_connected]]
logger.debug('max: %s', str(found))
return found
# create a valid path that connect qubits used in the circuit
def create_path(self, start, plain_map):
self.__path.update({start: -1})
to_connect = [start]
max = len(self.__coupling_map)
logger.debug('create_path() - max:\n%s', str(max))
count = max - 1
changed = True
visiting = 0
while count > 0:
logger.debug('create_path() - visiting:\n%s - %s', str(visiting), str(to_connect[visiting]))
# for visiting in to_connect:
if count <= 0:
break
for node in plain_map[to_connect[visiting]]:
if count <= 0:
break
if node not in self.__path:
self.__path.update({node: to_connect[visiting]})
count -= 1
logger.debug('create_path() - path:\n%s', str(self.__path))
if node not in to_connect:
to_connect.append(node)
visiting += 1
logger.debug('create_path() - path:\n%s', str(self.__path))
def cx(self, circuit, control_qubit, target_qubit, control, target):
if target in self.__coupling_map[control]:
logger.log(logging.VERBOSE, 'cx() - cnot: (%s, %s)', str(control), str(target))
circuit.cx(control_qubit, target_qubit)
elif control in self.__coupling_map[target]:
logger.log(logging.VERBOSE, 'cx() - inverse-cnot: (%s, %s)', str(control), str(target))
circuit.h(control_qubit)
circuit.h(target_qubit)
circuit.cx(target_qubit, control_qubit)
circuit.h(control_qubit)
circuit.h(target_qubit)
else:
logger.critical('cx() - Cannot connect qubit %s to qubit %s', str(control), str(target))
exit(3)
# place cnot gates based on the path created in create_path method
def place_cx(self, circuit, quantum_r, oracle='11'):
if not oracle == '00':
logger.log(logging.VERBOSE, 'place_cx() - oracle != 00')
stop = self.__n_qubits // 2
for qubit in self.__connected:
if self.__connected[qubit] != -1:
if oracle == '11':
logger.log(logging.VERBOSE, 'place_cx() - oracle = 11')
self.cx(circuit, quantum_r[qubit], quantum_r[self.__connected[qubit]], qubit,
self.__connected[qubit])
elif oracle == '10':
logger.log(logging.VERBOSE, 'place_cx() - oracle = 10')
if stop > 0:
self.cx(circuit, quantum_r[qubit], quantum_r[self.__connected[qubit]], qubit,
self.__connected[qubit])
stop -= 1
# place Hadamard gates
def place_h(self, circuit, start, quantum_r, initial=True, x=True):
for qubit in self.__connected:
if qubit != start:
circuit.h(quantum_r[qubit])
else:
if initial is True:
if x is True:
circuit.x(quantum_r[qubit])
else:
circuit.h(quantum_r[qubit])
# place Pauli-X gates
def place_x(self, circuit, quantum_r):
sorted_c = sorted(self.__connected.items(), key=operator.itemgetter(0))
logger.log(logging.VERBOSE, 'place_x() - sorted_c:\n%s', str(sorted_c))
s_0 = self.__n_qubits // 2
i = 0
count = self.__n_qubits - 1
for qubit in sorted_c:
if count <= 0:
break
if i >= s_0:
circuit.x(quantum_r[qubit[0]])
else:
circuit.iden(quantum_r[qubit[0]])
i += 1
i = 0
for qubit in sorted_c:
if i >= s_0:
circuit.iden(quantum_r[qubit[0]])
else:
circuit.x(quantum_r[qubit[0]])
i += 1
# final measure
def measure(self, circuit, quantum_r, classical_r):
for qubit in self.__connected:
circuit.measure(quantum_r[qubit], classical_r[qubit])
# create the circuit
def create(self, circuit, quantum_r, classical_r, n_qubits, x=True, oracle='11'):
self.__n_qubits = n_qubits
max_qubits = len(self.__path)
logger.debug('create() - N qubits: %s', str(self.__n_qubits))
logger.debug('create() - Max qubits: %s', str(max_qubits))
if max_qubits < self.__n_qubits:
logger.critical('create() - Can use only up to %s qubits', str(max_qubits))
exit(2)
count = self.__n_qubits
for qubit in self.__path:
if count <= 0:
break
self.__connected.update({qubit: self.__path[qubit]})
count -= 1
logger.debug('create() - connected:\n%s', str(self.__connected))
self.place_h(circuit, self.__most_connected[0], quantum_r, x=x)
self.place_cx(circuit, quantum_r, oracle=oracle)
self.place_h(circuit, self.__most_connected[0], quantum_r, initial=False)
if x is True:
self.place_x(circuit, quantum_r)
self.measure(circuit, quantum_r, classical_r)
def envariance(self, circuit, quantum_r, classical_r, n_qubits):
self.create(circuit, quantum_r, classical_r, n_qubits)
sorted_c = sorted(self.__connected.items(), key=operator.itemgetter(0))
connected = list(zip(*sorted_c))[0]
logger.debug('envariance() - connected:\n%s', str(connected))
self.__n_qubits = 0
self.__connected.clear()
return connected
def parity(self, circuit, quantum_r, classical_r, n_qubits, oracle='11'):
self.create(circuit, quantum_r, classical_r, n_qubits, x=False, oracle=oracle)
connected = list(self.__connected.keys())
logger.debug('parity() - connected:\n%s', str(connected))
self.__n_qubits = 0
self.__connected.clear()
return connected
# launch envariance experiment on the given device
def envariance_exec(execution, device, utility, n_qubits, num_shots=1024, directory='Data_Envariance/'):
os.makedirs(os.path.dirname(directory), exist_ok=True)
size = 0
results = dict()
if device == qx2 or device == qx4:
if n_qubits <= 5:
size = 5
# device = 'ibmqx_qasm_simulator'
else:
logger.critical('launch_exp() - Too much qubits for %s !', device)
exit(1)
elif device == qx3 or device == qx5:
if n_qubits <= 16:
size = 16
# device = 'ibmqx_qasm_simulator'
else:
logger.critical('launch_exp() - Too much qubits for %s !', device)
exit(2)
elif device == online_sim:
if n_qubits <= 5:
size = 5
elif n_qubits <= 16:
size = 16
else:
logger.critical('launch_exp() - Unknown device.')
exit(3)
Q_program = QuantumProgram()
try:
Q_program.set_api(Qconfig.APItoken, Qconfig.config["url"]) # set the APIToken and API url
except ConnectionError:
sleep(900)
logger.critical('API Exception occurred, retrying\nQubits %d - Execution %d - Shots %d', n_qubits, execution,
num_shots)
envariance_exec(execution, device, utility, n_qubits=n_qubits, num_shots=num_shots, directory=directory)
return
quantum_r = Q_program.create_quantum_register("qr", size)
classical_r = Q_program.create_classical_register("cr", size)
circuit = Q_program.create_circuit("envariance", [quantum_r], [classical_r])
connected = utility.envariance(circuit=circuit, quantum_r=quantum_r, classical_r=classical_r, n_qubits=n_qubits)
QASM_source = Q_program.get_qasm("envariance")
logger.debug('launch_exp() - QASM:\n%s', str(QASM_source))
while True:
try:
backend_status = Q_program.get_backend_status(device)
if ('available' in backend_status and backend_status['available'] is False) \
or ('busy' in backend_status and backend_status['busy'] is True):
logger.critical('%s currently offline, waiting...', device)
while Q_program.get_backend_status(device)['available'] is False:
sleep(1800)
logger.critical('%s is back online, resuming execution', device)
except ConnectionError:
logger.critical('Error getting backend status, retrying...')
sleep(900)
continue
except ValueError:
logger.critical('Backend is not available, waiting...')
sleep(900)
continue
break
if Q_program.get_api().get_my_credits()['remaining'] < 3:
logger.critical('Qubits %d - Execution %d - Shots %d ---- Waiting for credits to replenish...',
n_qubits, execution, num_shots)
while Q_program.get_api().get_my_credits()['remaining'] < 3:
sleep(900)
logger.critical('Credits replenished, resuming execution')
try:
result = Q_program.execute(["envariance"], backend=device, wait=2, timeout=1000, shots=num_shots, max_credits=5)
except Exception:
sleep(900)
logger.critical('Exception occurred, retrying\nQubits %d - Execution %d - Shots %d', n_qubits, execution,
num_shots)
envariance_exec(execution, device, utility, n_qubits=n_qubits, num_shots=num_shots, directory=directory)
return
try:
counts = result.get_counts("envariance")
except Exception:
logger.critical('Exception occurred, retrying\nQubits %d - Execution %d - Shots %d', n_qubits, execution,
num_shots)
envariance_exec(execution, device, utility, n_qubits=n_qubits, num_shots=num_shots, directory=directory)
return
logger.debug('launch_exp() - counts:\n%s', str(counts))
sorted_c = sorted(counts.items(), key=operator.itemgetter(1), reverse=True)
filename = directory + device + '/' + 'execution' + str(
execution) + '/' + device + '_' + str(num_shots) + '_' + str(
n_qubits) + '_qubits_envariance.txt'
os.makedirs(os.path.dirname(filename), exist_ok=True)
out_f = open(filename, 'w')
# store counts in txt file and xlsx file
out_f.write('VALUES\t\tCOUNTS\n\n')
stop = n_qubits // 2
for i in sorted_c:
reverse = i[0][::-1]
sorted_v = []
for n in range(n_qubits - stop):
sorted_v.append(reverse[connected[n + stop]])
for n in range(stop):
sorted_v.append(reverse[connected[n]])
value = ''.join(str(v) for v in sorted_v)
results.update({value: i[1]})
out_f.write(value + '\t' + str(i[1]) + '\n')
out_f.close()
# launch parity experiment on the given device
def parity_exec(execution, device, utility, n_qubits, oracle='11', num_shots=1024, directory='Data_Parity/'):
os.makedirs(os.path.dirname(directory), exist_ok=True)
size = 0
results = dict()
if device == qx2 or device == qx4:
if n_qubits <= 5:
size = 5
# device = 'ibmqx_qasm_simulator'
else:
logger.critical('launch_exp() - Too much qubits for %s !', device)
exit(1)
elif device == qx3 or device == qx5:
if n_qubits <= 16:
size = 16
# device = 'ibmqx_qasm_simulator'
else:
logger.critical('launch_exp() - Too much qubits for %s !', device)
exit(2)
elif device == online_sim:
if n_qubits <= 5:
size = 5
elif n_qubits <= 16:
size = 16
else:
logger.critical('launch_exp() - Unknown device.')
exit(3)
Q_program = QuantumProgram()
try:
Q_program.set_api(Qconfig.APItoken, Qconfig.config["url"]) # set the APIToken and API url
except ConnectionError:
sleep(900)
logger.critical('API Exception occurred, retrying\nQubits %d - Oracle %s - Execution %d - Queries %d', n_qubits,
oracle,
execution, num_shots)
parity_exec(execution, device, utility, n_qubits=n_qubits, oracle=oracle, num_shots=num_shots, directory=directory)
return
quantum_r = Q_program.create_quantum_register("qr", size)
classical_r = Q_program.create_classical_register("cr", size)
circuit = Q_program.create_circuit('parity', [quantum_r], [classical_r])
connected = utility.parity(circuit=circuit, quantum_r=quantum_r, classical_r=classical_r, n_qubits=n_qubits,
oracle=oracle)
QASM_source = Q_program.get_qasm('parity')
logger.debug('launch_exp() - QASM:\n%s', str(QASM_source))
while True:
try:
backend_status = Q_program.get_backend_status(device)
if ('available' in backend_status and backend_status['available'] is False) \
or ('busy' in backend_status and backend_status['busy'] is True):
logger.critical('%s currently offline, waiting...', device)
while Q_program.get_backend_status(device)['available'] is False:
sleep(1800)
logger.critical('%s is back online, resuming execution', device)
except ConnectionError:
logger.critical('Error getting backend status, retrying...')
sleep(900)
continue
except ValueError:
logger.critical('Backend is not available, waiting...')
sleep(900)
continue
break
if Q_program.get_api().get_my_credits()['remaining'] < 3:
logger.critical('Qubits %d - Oracle %s - Execution %d - Queries %d ---- Waiting for credits to replenish...',
n_qubits, oracle,
execution, num_shots)
while Q_program.get_api().get_my_credits()['remaining'] < 3:
sleep(900)
logger.critical('Credits replenished, resuming execution')
try:
result = Q_program.execute(['parity'], backend=device, wait=2, timeout=1000, shots=num_shots, max_credits=5)
except Exception:
sleep(900)
logger.critical('Exception occurred, retrying\nQubits %d - Oracle %s - Execution %d - Queries %d', n_qubits, oracle,
execution, num_shots)
parity_exec(execution, device, utility, n_qubits=n_qubits, oracle=oracle, num_shots=num_shots, directory=directory)
return
try:
counts = result.get_counts('parity')
except Exception:
logger.critical('Exception occurred, retrying\nQubits %d - Oracle %s - Execution %d - Queries %d', n_qubits, oracle,
execution, num_shots)
parity_exec(execution, device, utility, n_qubits=n_qubits, oracle=oracle, num_shots=num_shots, directory=directory)
return
logger.debug('launch_exp() - counts:\n%s', str(counts))
sorted_c = sorted(counts.items(), key=operator.itemgetter(1), reverse=True)
filename = directory + device + '/' + oracle + '/' + 'execution' + str(
execution) + '/' + device + '_' + str(
num_shots) + 'queries_' + oracle + '_' + str(
n_qubits) + '_qubits_parity.txt'
os.makedirs(os.path.dirname(filename), exist_ok=True)
out_f = open(filename, 'w')
# store counts in txt file and xlsx file
out_f.write('VALUES\t\tCOUNTS\n\n')
logger.debug('launch_exp() - oredred_q:\n%s', str(connected))
stop = n_qubits // 2
for i in sorted_c:
reverse = i[0][::-1]
logger.log(logging.VERBOSE, 'launch_exp() - reverse in for 1st loop: %s', str(reverse))
sorted_v = [reverse[connected[0]]]
logger.log(logging.VERBOSE, 'launch_exp() - connected[0] in 1st for loop: %s', str(connected[0]))
logger.log(logging.VERBOSE, 'launch_exp() - sorted_v in 1st for loop: %s', str(sorted_v))
for n in range(stop):
sorted_v.append(reverse[connected[n + 1]])
logger.log(logging.VERBOSE, 'launch_exp() - connected[n+1], sorted_v[n+1] in 2nd for loop: %s,%s',
str(connected[n + 1]), str(sorted_v[n + 1]))
if (n + stop + 1) != n_qubits:
sorted_v.append(reverse[connected[n + stop + 1]])
logger.log(logging.VERBOSE, 'launch_exp() - connected[n+stop+1], sorted_v[n+2] in 2nd for loop: %s%s',
str(connected[n + stop + 1]), str(sorted_v[n + 2]))
value = ''.join(str(v) for v in sorted_v)
results.update({value: i[1]})
out_f.write(value + '\t' + str(i[1]) + '\n')
out_f.close()
| 2 | 2 |
survey/migrations/0009_auto_20151120_1756.py | lundskommun/kartverktyget | 0 | 12791489 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import ckeditor.fields
class Migration(migrations.Migration):
dependencies = [
('survey', '0008_survey_image'),
]
operations = [
migrations.AlterField(
model_name='survey',
name='description',
field=ckeditor.fields.RichTextField(help_text='This description is show to the contributors.', verbose_name='description', blank=True),
),
]
| 1.65625 | 2 |
agent/src/Basic_linux_commands/copy_move_delete.py | ekbanasolutions/aditas | 2 | 12791490 | import os
import time
import run_services
from Basic_linux_commands.chown_chmod import chown
from bigdata_logs.logger import getLoggingInstance
log = getLoggingInstance()
username = os.getenv("user")
groupname = username
def copy(src, file_list, dest, user_pass, *args):
log.info("\nCopying\n")
try:
file_not_exists = []
if src.endswith('/'):
src = src[:-1]
if dest.endswith('/'):
dest = dest[:-1]
if not os.path.exists(dest):
os.makedirs(dest)
for file in file_list:
file_to_copy = src + '/%s' % file
if not os.path.exists(file_to_copy):
file_not_exists.append(file_to_copy)
else:
if os.path.isdir(file_to_copy):
unique_id = str(time.time()).split('.')[0]
copied_file = dest + '/%s_%s' % (file, unique_id)
run_services.run_basic_services("echo %s | sudo -S cp -r %s %s" % (user_pass, file_to_copy, copied_file))
chown(copied_file, username, groupname, user_pass)
else:
unique_id = str(time.time()).split('.')[0]
copied_file = dest + '/%s_%s' % (file, unique_id)
run_services.run_basic_services("echo %s | sudo -S cp %s %s" % (user_pass, file_to_copy, copied_file))
chown(copied_file, username, groupname, user_pass)
if file_not_exists:
return '{"success": 0, "msg": ["%s", file does not exists!!!]}' % file_not_exists
return '{"success": 1}'
except Exception as e:
log.error("Exception in copy_move_delete ==> copy()")
log.error(e)
return '{"success": 0, "msg": ["%s"]}' % e
def move_rename(src, file_list, dest, user_pass):
try:
file_not_exists = []
existing_file_list = []
if src.endswith('/'):
src = src[:-1]
if dest.endswith('/'):
dest = dest[:-1]
for file in file_list:
file_to_move = src+ '/%s' % file
dest_path = dest + '/%s' % file
if os.path.exists(dest_path):
existing_file_list.append(dest_path)
continue
if not os.path.exists(file_to_move):
file_not_exists.append(file_to_move)
else:
run_services.run_basic_services("echo %s | sudo -S mv %s %s" % (user_pass, file_to_move, dest))
if file_not_exists:
return '{"success": 0, "msg": ["%s", file does not exists!!!]}' % file_not_exists
elif existing_file_list:
return '{"success": 0, "msg": ["%s", file already exists!!!]}' % existing_file_list
return '{"success": 1}'
except Exception as e:
log.error("Exception in copy_move_delete ==> move()")
log.error(e)
return '{"success": 0, "msg": ["%s"]}' % e
def delete(src, files, user_pass):
try:
file_not_exists = []
if src.endswith('/'):
src = src[:-1]
for file in files:
file_path = src + '/%s' % file
if not os.path.exists(file_path):
file_not_exists.append(file_path)
run_services.run_basic_services("echo %s | sudo -S rm -rf %s" % (user_pass, file_path))
if file_not_exists:
return '{"success": 0, "msg": ["%s", file does not exists!!!]}' % file_not_exists
return '{"success": 1}'
except Exception as e:
log.error("Exception in copy_move_delete ==> move()")
log.error(e)
return '{"success": 0, "msg": ["%s"]}' % e
| 2.546875 | 3 |
pytest_use_postgresql.py | admariner/django-sql-dashboard | 293 | 12791491 | import os
import pytest
from dj_database_url import parse
from django.conf import settings
from testing.postgresql import Postgresql
postgres = os.environ.get("POSTGRESQL_PATH")
initdb = os.environ.get("INITDB_PATH")
_POSTGRESQL = Postgresql(postgres=postgres, initdb=initdb)
@pytest.hookimpl(tryfirst=True)
def pytest_load_initial_conftests(early_config, parser, args):
os.environ["DJANGO_SETTINGS_MODULE"] = early_config.getini("DJANGO_SETTINGS_MODULE")
settings.DATABASES["default"] = parse(_POSTGRESQL.url())
settings.DATABASES["dashboard"] = parse(_POSTGRESQL.url())
def pytest_unconfigure(config):
_POSTGRESQL.stop()
| 2.0625 | 2 |
ConvertText.py | danheeks/PyCAD | 17 | 12791492 | <gh_stars>10-100
from HeeksFont import ConvertHeeksFont
ConvertHeeksFont() | 1.429688 | 1 |
examples/mlp.py | AlexanderViand/EVA | 0 | 12791493 | <reponame>AlexanderViand/EVA
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from eva import EvaProgram, Input, Output, evaluate, save, load
from eva.ckks import CKKSCompiler
from eva.seal import generate_keys
from eva.metric import valuation_mse
import os
import time
import copy
import math
import numpy as np
import pandas as pd
from numpy import random
import known_type_pb2
import eva_pb2
####################
# BENCHMARKING #
####################
times = {
't_keygen': [],
't_input_encryption': [],
't_computation': [],
't_decryption': []
}
def delta_ms(t0, t1):
return round(1000 * abs(t0 - t1))
all_times = []
cur_times = []
# Generate Data
image_size = 32 * 32
layer1_units = 32
layer2_units = 16
# Fix seed so we can compare result in c++ more easily
random.seed(0)
# Input image
image = [0.5] * image_size
# Considering images padded to 32x32 for easier math
weights_1 = np.random.rand(layer1_units, image_size) - 0.5
bias_1 = np.random.rand(layer1_units) - 0.5
# Allowing 16 output classes, where the 6 extra ones are always zero/ignored
weights_2 = np.random.rand(layer2_units, layer1_units) - 0.5
bias_2 = np.random.rand(layer2_units) - 0.5
def diag(matrix, d):
m, n = matrix.shape
r = [0] * n
for k in range(n):
r[k] = matrix[k % m][(k + d) % n]
return r
def mvp(ptxt_matrix, enc_vector):
m, n = ptxt_matrix.shape
log2_n_div_m = math.ceil(math.log(n // m, 2))
t = 0
for i in range(m):
t += (enc_vector << i) * diag(ptxt_matrix, i)
# TODO: if n/m isn't a power of two, we need to masking/padding here
for i in range(log2_n_div_m):
offset = n // (2 << i)
t += t << offset
return t
def compile():
print('Compile time')
mlp = EvaProgram('NN (MLP)', vec_size=32 * 32)
with mlp:
image = Input('input_0')
d1 = mvp(weights_1, image)
d1 = d1 + bias_1.tolist()
act1 = d1 * d1
d2 = mvp(weights_2, act1)
d2 = d2 + bias_2.tolist()
act2 = d2 * d2
Output('output', act2)
Output('output', d1)
mlp.set_output_ranges(60)
mlp.set_input_scales(60)
compiler = CKKSCompiler()
mlp, params, signature = compiler.compile(mlp)
save(mlp, 'mlp.eva')
save(params, 'mlp.evaparams')
save(signature, 'mlp.evasignature')
# Print IR representation
with open('mlp.eva', 'rb') as f, open('mlp.txt', 'w') as g:
read_kt = known_type_pb2.KnownType()
read_kt.ParseFromString(f.read())
read_eva = eva_pb2.Program()
read_eva.ParseFromString(read_kt.contents.value)
g.write(str(read_eva))
def compute():
################################################
print('Numpy version')
d1 = np.dot(weights_1, image)
d1 = d1 + bias_1
act1 = d1 * d1
d2 = np.dot(weights_2, act1)
d2 = d2 + bias_2
ref_result = d2 * d2
print(ref_result)
################################################
print('EVA plaintext version')
mlp = load('mlp.eva')
eva_ptxt_version = evaluate(mlp, {'input_0': image})
print(eva_ptxt_version['output'])
#################################################
print('Key generation time')
params = load('mlp.evaparams')
t0 = time.perf_counter()
public_ctx, secret_ctx = generate_keys(params)
t1 = time.perf_counter()
cur_times['t_keygen'] = delta_ms(t0, t1)
save(public_ctx, 'mlp.sealpublic')
save(secret_ctx, 'mlp.sealsecret')
#################################################
print('Runtime on client')
signature = load('mlp.evasignature')
public_ctx = load('mlp.sealpublic')
inputs = {
'input_0': image
}
t0 = time.perf_counter()
encInputs = public_ctx.encrypt(inputs, signature)
t1 = time.perf_counter()
cur_times['t_input_encryption'] = delta_ms(t0, t1)
save(encInputs, 'mlp_inputs.sealvals')
#################################################
print('Runtime on server')
mlp = load('mlp.eva')
public_ctx = load('mlp.sealpublic')
encInputs = load('mlp_inputs.sealvals')
t0 = time.perf_counter()
encOutputs = public_ctx.execute(mlp, encInputs)
t1 = time.perf_counter()
cur_times['t_computation'] = delta_ms(t0, t1)
save(encOutputs, 'mlp_outputs.sealvals')
#################################################
print('Back on client')
secret_ctx = load('mlp.sealsecret')
encOutputs = load('mlp_outputs.sealvals')
t0 = time.perf_counter()
outputs = secret_ctx.decrypt(encOutputs, signature)
t1 = time.perf_counter()
cur_times['t_decryption'] = delta_ms(t0, t1)
reference = {'output': [ref_result]}
print('Expected', reference)
print('Got', outputs)
print('MSE', valuation_mse(outputs, reference))
def main():
compile()
num_runs = int(os.getenv("NUM_RUNS")) if os.getenv("NUM_RUNS") is not None else 10
for run in range(num_runs):
global cur_times
cur_times = copy.copy(times)
compute()
print(cur_times)
all_times.append(cur_times)
# Output the benchmarking results
df = pd.DataFrame(all_times)
output_filename = "mlp_eva.csv"
if 'OUTPUT_FILENAME' in os.environ:
output_filename = os.environ['OUTPUT_FILENAME']
df.to_csv(output_filename, index=False)
if __name__ == "__main__":
main()
| 2.09375 | 2 |
tests/test_db_url.py | Swamii/django-bananas | 0 | 12791494 | <gh_stars>0
from urllib.parse import quote
from django.test import TestCase
from bananas import url
__test__ = {
'Doctest': url
}
class DBURLTest(TestCase):
def test_sqlite_memory(self):
conf = url.database_conf_from_url('sqlite://')
self.assertDictEqual(conf, {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '',
'USER': None,
'HOST': None,
'PORT': None,
'PARAMS': {},
'SCHEMA': None,
'PASSWORD': None,
})
def test_db_url(self):
conf = url.database_conf_from_url(
'pgsql://joar:[email protected]:4242/tweets/tweetschema'
'?hello=world')
self.assertDictEqual(conf, {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '5monkeys.se',
'NAME': 'tweets',
'PARAMS': {'hello': 'world'},
'PASSWORD': 'hunter2',
'PORT': 4242,
'SCHEMA': 'tweetschema',
'USER': 'joar',
})
def test_alias(self):
self.assertEqual(repr(url.Alias(target='x')), '<Alias to "x">')
def test_register(self):
url.register_engine('abc', 'a.b.c')
conf = url.database_conf_from_url('abc://5monkeys.se')
self.maxDiff = None
self.assertDictEqual(conf, {
'ENGINE': 'a.b.c',
'HOST': '5monkeys.se',
'NAME': '',
'PARAMS': {},
'PASSWORD': None,
'PORT': None,
'SCHEMA': None,
'USER': None,
})
def test_resolve(self):
url.register_engine('abc', 'a.b.c')
self.assertRaises(KeyError, url.resolve, cursor={}, key='xyz')
def test_get_engine(self):
self.assertRaisesMessage(KeyError, 'postgres has no sub-engines',
url.get_engine, 'postgres+psycopg2+postgis')
url.register_engine('a', ['b'])
self.assertRaisesRegex(ValueError, '^django-bananas\.url',
url.get_engine, 'a')
url.register_engine('a', ['a', {'b': 'c'}])
self.assertEqual(url.get_engine('a+b'), 'c')
def test_parse(self):
self.assertRaises(ValueError, url.parse_path, None)
self.assertRaisesRegex(Exception, '^Your url is',
url.parse_database_url, 'sqlite://:memory:')
def test_db_url_with_slashes(self):
name = quote('/var/db/tweets.sqlite', safe='')
conf = url.database_conf_from_url('sqlite3:///{0}'.format(name))
self.assertDictEqual(conf, {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/var/db/tweets.sqlite',
'USER': None,
'HOST': None,
'PORT': None,
'PARAMS': {},
'SCHEMA': None,
'PASSWORD': None,
})
| 2.546875 | 3 |
python3-variant/scraper.py | yacmeno/one-piece-scraper | 0 | 12791495 | """
Script that scrapes Jaimini's box website to retrieve
when was the last One Piece chapter released to then ask you if
you want to read the chapter in your browser or download it
"""
from bs4 import BeautifulSoup
import requests
import webbrowser
from os import getcwd
url = 'https://jaiminisbox.com/reader/series/one-piece-2/' #website with chapters
r = requests.get(url).text
soup = BeautifulSoup(r, 'lxml')
#the first <div> tag with class 'element' is the latest chapter
#there are 3 anchor tags <a> with 'href' attributes within the div
#links respectively redirect to:download, reading and the uploader's profile
chapter = soup.find('div', class_='element')
chapter_title = chapter.find('div', class_='title').text
chapter_date = chapter.find('div', class_='meta_r').text
print('the latest one piece chapter is...\n')
print(chapter_title)
print(chapter_date+'(yyyy/mm/dd)\n')
links = chapter.find_all('a') #list of len 3 with links inside <a> tags
#convert links' content into strings to use split() to get the urls as strings
for i in range(len(links)):
links[i] = str(links[i])
links[i] = links[i].split('"')
#the 3 items in links are now lists where the second element for each are urls
#print(links)
#visually: [[htmlstuff, url, htmlstuff], [idem], [idem]]
#This format has been consistently used, which allows to hardcode this
#without the use of regular expressions for 'http*'
action = input('would you like to download or read the chapter [d/r]?')
if action == 'd':
d_url = links[0][1]
print('Downloading...')
r2 = requests.get(d_url)
chapter_zip = open('%s' %chapter_title, 'wb')
chapter_zip.write(r2.content)
chapter_zip.close()
print('Zip file in:', getcwd())
elif action == 'r':
r_url = links[1][1]
webbrowser.open(r_url)
else:
print('that was neither d nor r, quitting...')
| 3.6875 | 4 |
Python/Crawling/naver/jsontoxml.py | zionhan/TIL | 1 | 12791496 | import json
import xmltodict
with open('complexes.json', 'r', encoding='UTF-8') as f:
jsonString = f.read()
print('JSON input (json_to_xml.json):')
print(jsonString)
xmlString = xmltodict.unparse(json.loads(jsonString), pretty=True)
print('\nXML output(json_to_xml.xml):')
print(xmlString)
with open('json_to_xml.xml', 'w') as f:
f.write(xmlString) | 3.171875 | 3 |
optigurator/pareto.py | ovidner/staircase-optigurator | 1 | 12791497 | import numpy as np
from openmdao.api import CaseReader
from optigurator.utils import recording_filename
def get_case_reader(data_dir, problem_constants):
return CaseReader(recording_filename(data_dir, problem_constants.id))
def generate_valid_points(problem_constants, crm):
for (i, case_id) in enumerate(crm.list_cases()):
model_case = crm.get_case(case_id)
if (
model_case.outputs["usability.min_max_step_height"][1]
<= problem_constants.step_height.upper
and model_case.outputs["usability.min_max_step_depth"][0]
>= problem_constants.step_depth.lower
and model_case.outputs["usability.min_free_height"][0]
> problem_constants.free_height_lower
):
yield [
model_case.outputs["price_availability.total_price"][0],
model_case.outputs["usability.usability_penalty"][0],
model_case.outputs["price_availability.total_delivery_time"][0],
i,
]
def calculate(inputPoints, dominates):
paretoPoints = set()
candidateRowNr = 0
dominatedPoints = set()
normalizedRowNr = 0
# skapar en kopia på matrisen som normaliseras senare
normalizedPoints = np.array(inputPoints.copy())
sum1 = 0
sum2 = 0
sum3 = 0
sum4 = 0
for i in range(0, len(normalizedPoints)):
# summerar värden kolonnvis till nämnare för normalisering
sum1 = sum1 + normalizedPoints[i, 0] ** 2
sum2 = sum2 + normalizedPoints[i, 1] ** 2
sum3 = sum3 + normalizedPoints[i, 2] ** 2
# definerar en vektor med normaliseringsvärden
myarray_normalize = [sum1 ** 0.5, sum2 ** 0.5, sum3 ** 0.5, 1]
# Normaliserar matrisen
normalizedPoints = np.array(inputPoints) / np.array(myarray_normalize)
while True:
candidateRow = inputPoints[candidateRowNr]
normalized = normalizedPoints[normalizedRowNr]
normalizedPoints = np.delete(normalizedPoints, normalizedRowNr, 0)
inputPoints.remove(candidateRow)
rowNr = 0
nonDominated = True
while len(normalizedPoints) != 0 and rowNr < len(normalizedPoints):
row = normalizedPoints[rowNr]
rowIP = inputPoints[rowNr]
if dominates(
row, normalized
): # Går in om candidateRow är bättre än utmanaren.
normalizedPoints = np.delete(normalizedPoints, rowNr, 0)
inputPoints.remove(rowIP)
dominatedPoints.add(tuple(rowIP))
elif dominates(
normalized, row
): # Går in om utmanare är större än kandidaten.
nonDominated = False
dominatedPoints.add(tuple(candidateRow))
rowNr += 1
else:
rowNr += 1
if nonDominated: # Lägg till nondominated punkter till pareto
ID = int(normalized[3])
paretoPoints.add(tuple(candidateRow))
if len(normalizedPoints) == 0: # SLutar när man gått igenom alla punkter.
break
dp = np.array(list(dominatedPoints))
pp = np.array(list(paretoPoints))
return paretoPoints, dominatedPoints, dp, pp
def dominates(row, normalized): # Beräknar om utmanare är bättre än candidate.
return sum([row[x] >= normalized[x] for x in range(len(row) - 1)]) == len(row) - 1
def WeightPPpoints(pp, my_weights):
Pareto_points = pp
np.size(Pareto_points)
Nrofrows_pareto = np.size(Pareto_points, 0)
# skapar en vektor med ID
ID_vektor = np.delete(Pareto_points, [0, 1, 2], 1).tolist()
# skapar matris med outputvärden utan ID kolonn
A = np.delete(Pareto_points, 3, 1)
np.size(A)
# definerar storleken på matrisen som kommer som paretopoints output
Nrofcolumns = np.size(A, 1)
Nrofrows = np.size(A, 0)
sizeofA = (Nrofrows, Nrofcolumns)
# Skapar matris som sedan fylls med bästa lösningarnas ID
IDpoints = []
# skapar en kopia på matrisen som normaliseras senare
B = A.copy()
sum1 = 0
sum2 = 0
sum3 = 0
for i in range(0, Nrofrows):
# summerar värden kolonnvis till nämnare för normalisering
sum1 = sum1 + A[i, 0] ** 2
sum2 = sum2 + A[i, 1] ** 2
sum3 = sum3 + A[i, 2] ** 2
# definerar en vektor med normaliseringsvärden
myarray_normalize = [sum1 ** 0.5, sum2 ** 0.5, sum3 ** 0.5]
# Normaliserar matrisen
B = A / myarray_normalize
# kopierar matrisen och multiplicerar kolonnvis med viktningar
C = B.copy()
# Loop för 5 olika viktningar -> 5 optimala pareto punkter som output
for j in range(0, len(my_weights)):
for i in range(0, Nrofrows):
C[i, 0] = B[i, 0] * my_weights[j, 0]
C[i, 1] = B[i, 1] * my_weights[j, 1]
C[i, 2] = B[i, 2] * my_weights[j, 2]
# Definera ideala värden A_positive samt icke ideala värden A_negative
A_positive = [C[:, 0].min(), C[:, 1].min(), C[:, 2].min()]
A_negative = [C[:, 0].max(), C[:, 1].max(), C[:, 2].max()]
S_positive = np.zeros((Nrofrows, 1))
S_negative = np.zeros((Nrofrows, 1))
C_value = np.zeros((Nrofrows, 1))
# Vektor_ID_optimala=np.zeros((1,5))
for i in range(0, Nrofrows):
S_positive[i] = (
(C[i, 0] - A_positive[0]) ** 2
+ (C[i, 1] - A_positive[1]) ** 2
+ (C[i, 2] - A_positive[2]) ** 2
) ** 0.5
S_negative[i] = (
(C[i, 0] - A_negative[0]) ** 2
+ (C[i, 1] - A_negative[1]) ** 2
+ (C[i, 2] - A_negative[2]) ** 2
) ** 0.5
C_value[i] = S_negative[i] / (S_negative[i] + S_positive[i])
Best_value = C_value.max()
# ta fram vilken rad i C_vektorn som har det största värdet
Row_best_option = np.argmax(C_value)
# ta fram vilket ingående ID lösningen har
Vektor_ID_optimala = np.array(ID_vektor[Row_best_option]).tolist()
IDpoints.append(int(max(Vektor_ID_optimala)))
return IDpoints
def generate_pareto_cases(data_dir, problem_constants):
crm = get_case_reader(data_dir, problem_constants)
input_points = list(generate_valid_points(problem_constants, crm))
pareto_points, dominated_points, dp, pp = calculate(input_points, dominates)
my_weights = np.matrix(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
) # Weights used to pick points from the ParetoFront
pareto_case_ids = WeightPPpoints(pp, my_weights)
for i in pareto_case_ids:
yield crm.get_case(i)
| 2.25 | 2 |
data_pipeline/sql/alter_statement.py | iagcl/data_pipeline | 16 | 12791498 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
# Module: alter_statement
# Purpose: Represents SQL alter statements
#
# Notes:
#
###############################################################################
import data_pipeline.sql.utils as sql_utils
import data_pipeline.constants.const as const
from .ddl_statement import DdlStatement
class AlterStatement(DdlStatement):
"""Contains data necessary to produce a valid SQL ALTER statement"""
def __init__(self, table_name):
super(AlterStatement, self).__init__(table_name)
self.statement_type = const.ALTER
def add_entry(self, **kwargs):
if const.ALTER_ENTRY in kwargs:
self.entries.append(kwargs[const.ALTER_ENTRY])
else:
alter_entry = {
const.OPERATION: kwargs[const.OPERATION],
const.FIELD_NAME: kwargs[const.FIELD_NAME],
const.DATA_TYPE: kwargs[const.DATA_TYPE],
const.PARAMS: kwargs[const.PARAMS],
const.CONSTRAINTS: kwargs[const.CONSTRAINTS]
}
self.add_entry(alter_entry=alter_entry)
def tosql(self, applier):
return applier.build_alter_sql(self)
def __str__(self):
return sql_utils.build_alter_sql(self)
| 1.820313 | 2 |
helper_scripts/shaderpatch.py | w23/Right_Hemisphere | 2 | 12791499 | <filename>helper_scripts/shaderpatch.py
#!/usr/bin/env python3
import json
import re
import argparse
parser = argparse.ArgumentParser(description='Patch minified shader')
parser.add_argument('--patch', required=True, help='Minified file to patch')
parser.add_argument('input', type=argparse.FileType('r'), help='Patch data (json)')
args = parser.parse_args()
shader = open(args.patch, 'r').read()
patch = json.load(args.input)
for k, v in patch.items():
shader = shader.replace(k, v)
open(args.patch, 'w').write(shader)
| 2.78125 | 3 |
django_group_by/__init__.py | alissonmuller/django-group-by | 25 | 12791500 | """
This module contains the package exports.
"""
from .mixin import GroupByMixin
| 1.234375 | 1 |
src/read_data.py | GavinNishizawa/ncaa-march-madness-2018 | 1 | 12791501 | """
Read in the data from csv files
"""
import pandas as pd
import os
import glob
from save_data import save_object, load_object
def load_csv(filename):
# load data from pickle file if it exists
obj = load_object(filename)
if obj != None:
return obj
# otherwise load from csv
else:
data = pd.read_csv(filename, encoding="latin_1")
save_object(filename, data)
return data
def load_data():
pickle_fn = "data/loaded_data"
data = load_object(pickle_fn)
# load data from pickle file if it exists
if data != None:
return data
# otherwise load from csv
else:
data = {}
# load all csv files in data directory
for f in glob.glob(os.path.join("data", "*.csv")):
# key based on their filename
f_key = os.path.basename(f).split('.')[0]
print("Loading:", f_key)
data[f_key] = load_csv(f)
save_object(pickle_fn, data)
return data
def test_load_csv():
print("Test loading csv")
data = load_csv("data/NCAATourneySeeds.csv")
print(data)
def main():
data = load_data()
print("Available DataSet Keys: ")
for key in data.keys():
print("\t"+key)
if __name__ == "__main__":
main()
| 3.8125 | 4 |
bench/bench.py | ToriML/DNN-bench | 16 | 12791502 | <reponame>ToriML/DNN-bench<filename>bench/bench.py
import timeit
def benchmark_speed(benchmark_func, repeat=1000, number=1, warmup=100):
assert repeat >= 2 * warmup, "Warmup should be at leat 2x smaller than repeat."
out = timeit.repeat(benchmark_func, repeat=repeat, number=number)
# remove warmup
out = out[warmup:]
# calculate statistics
mean = sum(out) / len(out)
std = (sum((x - mean) ** 2 for x in out) / len(out)) ** 0.5
return dict(min=min(out), max=max(out), mean=mean, std=std, data=out)
| 2.6875 | 3 |
Projekteuler/projecteuler_aufgabe001.py | kilian-funk/Python-Kurs | 0 | 12791503 | <filename>Projekteuler/projecteuler_aufgabe001.py
"""
Aufgabe 1 aus http://projecteuler.net
(Deutsche Übersetzung auf http://projekteuler.de)
Wenn wir alle natürlichen Zahlen unter 10 auflisten, die Vielfache von 3
oder 5 sind, so erhalten wir 3, 5, 6 und 9. Die Summe dieser Vielfachen ist 23.
Finden Sie die Summe aller Vielfachen von 3 oder 5 unter 1000.
Lösungshilfe: Zerlege die Aufgabenstellung in die verschiedenen Teile. Löse erst
vereinfachte Aufgaben, z. B. Finde alle Vielfache von 3 unter 20. Nähere die Aufgabe Stück
für Stück der eigentlichen Fage an.
"""
summe = # Los gehts ...
print(summe)
| 2.71875 | 3 |
pytils/classes/_static.py | d33jiang/pytils | 0 | 12791504 | from typing import NoReturn, Type
__all__ = [
'static'
]
def _raise_init():
raise NotImplementedError('Static classes cannot be instantiated')
def static(cls) -> Type:
"""
Decorator for defining static classes.
The resulting static class cannot be instantiated. If the __init__ method is defined, then it is invoked with None
as the sole argument when the static class is defined.
"""
def on_init(*_args, **_kwargs) -> NoReturn:
_raise_init()
init_function = getattr(cls, '__init__', lambda _: None)
cls.__new__ = on_init
cls.__init__ = on_init
cls.__call__ = on_init
init_function(None)
return cls
| 3.25 | 3 |
URL/util.py | Hydrophobefireman/self-host-google-fonts | 0 | 12791505 | from urllib.parse import ParseResult
from os.path import realpath, dirname, join as _path_join
import requests
from json import load as json_load
script_loc = realpath(__file__)
script_dir = dirname(script_loc)
del dirname
del realpath
mime_types: dict
with open(_path_join(script_dir, "mimes.json")) as f:
mime_types = json_load(f)
UA_m = "Mozilla/5.0 (Linux; Android 8.1.0; Pixel Build/OPM2.171019.029; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/68.0.3325.109 Mobile Safari/537.36"
UA_d = "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3526.73 Safari/537.36"
basic_headers = {
"Accept-Encoding": "gzip, deflate",
"User-Agent": UA_d,
"Upgrade-Insecure-Requests": "1",
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8",
"dnt": "1",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
}
def _abort_request_after(url: str, byte_len: int = 1024):
with requests.get(
url, headers=basic_headers, allow_redirects=True, stream=True
) as chunk:
for _ in chunk.iter_content(byte_len):
headers, url = chunk.headers, chunk.url
chunk.close()
return (headers, url)
def _normalise_url(parsed, remove_frag: bool = True):
d: dict = parsed._asdict()
d["scheme"] = d["scheme"].lower()
d["netloc"] = d["netloc"].lower()
d["fragment"] = ""
return ParseResult(**d)
def remove_quotes(s):
if s is None or len(s) < 2:
return s
for quote in ('"', "'"):
if s[0] == quote and s[-1] == quote:
return s[1:-1]
return s
def int_or_none(i: any):
if isinstance(i, int):
return i
try:
return int(i)
except:
return None
| 2.671875 | 3 |
Setups/autoboard.py | matthewvanderson/MagicBot | 1 | 12791506 | from disnake.ext import commands
from utils.clash import client, pingToChannel, getClan
import disnake
usafam = client.usafam
clans = usafam.clans
server = usafam.server
class autoB(commands.Cog, name="Board Setup"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.slash_command(name="autoboard")
async def autoboard(self, ctx):
pass
@autoboard.sub_command(name="create", description="Create server autoposting leaderboards")
async def setupboard(self, ctx: disnake.ApplicationCommandInteraction, channel: disnake.TextChannel, autoboard_type: str = commands.Param(choices=["Player Leaderboard", "Clan Leaderboard"])):
perms = ctx.author.guild_permissions.manage_guild
if not perms:
embed = disnake.Embed(description="Command requires you to have `Manage Server` permissions.",
color=disnake.Color.red())
return await ctx.send(embed=embed)
await ctx.response.defer()
msg = await ctx.original_message()
country = None
if autoboard_type == "Clan Leaderboard":
rr = []
tracked = clans.find({"server": ctx.guild.id})
limit = await clans.count_documents(filter={"server": ctx.guild.id})
for clan in await tracked.to_list(length=limit):
tag = clan.get("tag")
c = await getClan(tag)
location = str(c.location)
if location not in rr:
rr.append(str(location))
options = []
for country in rr:
options.append(disnake.SelectOption(label=f"{country}", value=f"{country}"))
select1 = disnake.ui.Select(
options=options,
placeholder="Page Navigation",
min_values=1, # the minimum number of options a user must select
max_values=1 # the maximum number of options a user can select
)
action_row = disnake.ui.ActionRow()
action_row.append_item(select1)
embed = disnake.Embed(title="**For what country would you like the leaderboard autoboard?**",
color=disnake.Color.green())
await ctx.edit_original_message(embed=embed, components=[action_row])
def check(res: disnake.MessageInteraction):
return res.message.id == msg.id
country = False
while country == False:
try:
res: disnake.MessageInteraction = await self.bot.wait_for("message_interaction", check=check,
timeout=600)
except:
await msg.edit(components=[])
break
if res.author.id != ctx.author.id:
await res.send(content="You must run the command to interact with components.", ephemeral=True)
continue
country = str(res.values[0])
tex = ""
if autoboard_type == "Player Leaderboard":
await server.update_one({"server": ctx.guild.id}, {'$set': {"topboardchannel": channel.id}})
await server.update_one({"server": ctx.guild.id}, {'$set': {"tophour": 5}})
else:
await server.update_one({"server": ctx.guild.id}, {'$set': {"lbboardChannel": channel.id}})
await server.update_one({"server": ctx.guild.id}, {'$set': {"country": country}})
await server.update_one({"server": ctx.guild.id}, {'$set': {"lbhour": 5}})
tex = f"\nCountry: {country}"
time = f"<t:{1643263200}:t>"
embed = disnake.Embed(title="**Autoboard Successfully Setup**",
description=f"Channel: {channel.mention}\n"
f"Time: {time}\n"
f"Type: {autoboard_type}{tex}",
color=disnake.Color.green())
await msg.edit(embed=embed)
@autoboard.sub_command(name="remove", description="Remove a server autoboard")
async def removeboard(self, ctx: disnake.ApplicationCommandInteraction, autoboard_type: str = commands.Param(choices=["Player Leaderboard", "Clan Leaderboard"])):
perms = ctx.author.guild_permissions.manage_guild
if not perms:
embed = disnake.Embed(description="Command requires you to have `Manage Server` permissions.",
color=disnake.Color.red())
return await ctx.send(embed=embed)
if autoboard_type == "Player Leaderboard":
await server.update_one({"server": ctx.guild.id}, {'$set': {"topboardchannel": None}})
await server.update_one({"server": ctx.guild.id}, {'$set': {"tophour": None}})
else:
await server.update_one({"server": ctx.guild.id}, {'$set': {"lbboardChannel": None}})
await server.update_one({"server": ctx.guild.id}, {'$set': {"country": None}})
await server.update_one({"server": ctx.guild.id}, {'$set': {"lbhour": None}})
embed = disnake.Embed(description=f"{autoboard_type} autoboard has been removed.",
color=disnake.Color.green())
await ctx.send(embed=embed, components=[])
@autoboard.sub_command(name="list", description="View server autoboards")
async def boardlist(self, ctx):
tbc = None
th = None
lbc = None
lbh = None
country = None
results = await server.find_one({"server": ctx.guild.id})
real_times = []
start_time = 1643263200
for x in range(0, 24):
t = start_time + (x * 3600)
real_times.append(t)
try:
tbc = results.get("topboardchannel")
tbc = await pingToChannel(ctx, tbc)
tbc = tbc.mention
except:
pass
try:
th = results.get("tophour")
th = real_times[th - 5]
th = f"<t:1643263200:t>"
except:
pass
try:
lbc = results.get("lbboardChannel")
lbc = await pingToChannel(ctx, lbc)
lbc = lbc.mention
except:
pass
try:
lbh = results.get("lbhour")
lbh = real_times[lbh - 5]
lbh = f"<t:1643263200:t>"
except:
pass
try:
country = results.get("country")
except:
pass
embed = disnake.Embed(title="**Autoboard List**",
description=f"Player leaderboard Channel: {tbc}\n"
f"Player leaderboard Post Time: {th}\n"
f"Clan leaderboard Channel: {lbc}\n"
f"Clan leaderboard Post Time: {lbh}\n"
f"Clan leaderboard Country: {country}\n",
color=disnake.Color.green())
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(autoB(bot)) | 2.328125 | 2 |
dev-infra/bazel/extract_js_module_output.bzl | yuchenghu/angular-cn | 17 | 12791507 | <filename>dev-infra/bazel/extract_js_module_output.bzl
load("@build_bazel_rules_nodejs//:providers.bzl", "DeclarationInfo", "JSEcmaScriptModuleInfo", "JSModuleInfo", "JSNamedModuleInfo")
"""Converts a provider name to its actually Starlark provider instance."""
def _name_to_js_module_provider(name):
if name == "JSModuleInfo":
return JSModuleInfo
elif name == "JSNamedModuleInfo":
return JSNamedModuleInfo
elif name == "JSEcmaScriptModuleInfo":
return JSEcmaScriptModuleInfo
fail("Unexpected JavaScript module provider.")
"""Implementation of the extract_js_module_output rule."""
def _extract_js_module_output_impl(ctx):
js_module_provider = _name_to_js_module_provider(ctx.attr.provider)
depsets = []
for dep in ctx.attr.deps:
# Include JavaScript sources (including transitive outputs) based on the
# configured JavaScript module provider.
if js_module_provider in dep:
depsets.append(dep[js_module_provider].sources)
# Based on whether declarations should be collected, extract direct
# and transitive declaration files using the `DeclarationInfo` provider.
if ctx.attr.include_declarations and DeclarationInfo in dep:
depsets.append(dep[DeclarationInfo].transitive_declarations)
# Based on whether default files should be collected, extract direct
# files which are exposed using the `DefaultInfo` provider. Also include
# data runfiles which are needed for the current target.
# https://docs.bazel.build/versions/main/skylark/lib/DefaultInfo.html#data_runfiles
if ctx.attr.include_default_files and DefaultInfo in dep:
depsets.append(dep[DefaultInfo].files)
depsets.append(dep[DefaultInfo].data_runfiles.files)
sources = depset(transitive = depsets)
return [DefaultInfo(files = sources)]
"""
Rule that collects declared JavaScript module output files from a list of dependencies
based on a configurable JavaScript module provider. The extracted outputs are exposed
within the `DefaultInfo` provider. Targets defined using this rule can be used as input
for rules that require JavaScript sources, or if there are multiple JavaScript output
variants defined for a target while for example only the `JSModule` outputs are of interest.
As an example: This rule is helpful in combination with `ts_library` and `ng_module` as
those rule expose multiple output flavors (which are distinguishable by the JavaScript module
providers as imported from `providers.bzl`). i.e. these rules expose flavors for named AMD
modules and ECMAScript module output. For reference:
https://github.com/bazelbuild/rules_nodejs/blob/stable/packages/typescript/internal/build_defs.bzl#L334-L337
"""
extract_js_module_output = rule(
implementation = _extract_js_module_output_impl,
attrs = {
"deps": attr.label_list(
allow_files = True,
),
"provider": attr.string(
doc = "JavaScript module info provider that is used for collecting sources from the dependencies.",
mandatory = True,
values = ["JSModuleInfo", "JSNamedModuleInfo", "JSEcmaScriptModuleInfo"],
),
"include_declarations": attr.bool(
mandatory = True,
doc = "Whether declaration files should be collected from the dependencies.",
),
"include_default_files": attr.bool(
mandatory = True,
doc = """
Whether files from the `DefaultInfo` provider should be collected. Includes
data runfiles needed for the default outputs from dependencies.
""",
),
},
)
| 1.976563 | 2 |
dopplerr/api/add_route.py | Stibbons/sonarr-sub-downloader-docker | 9 | 12791508 | # coding: utf-8
# Third Party Libraries
from sanic_transmute import add_route
from transmute_core.compat import string_type
from transmute_core.function import TransmuteAttributes
def describe_add_route(blueprint, **kwargs):
# if we have a single method, make it a list.
if isinstance(kwargs.get("paths"), string_type):
kwargs["paths"] = [kwargs["paths"]]
if isinstance(kwargs.get("methods"), string_type):
kwargs["methods"] = [kwargs["methods"]]
attrs = TransmuteAttributes(**kwargs)
def decorator(fnc):
if hasattr(fnc, "transmute"):
fnc.transmute = fnc.transmute | attrs
else:
fnc.transmute = attrs
add_route(blueprint, fnc)
return fnc
return decorator
| 2.234375 | 2 |
easy_sdm/data/species_data.py | math-sasso/masters_research | 1 | 12791509 | import os
from typing import Dict
from abc import ABC
from easy_sdm.data import ShapefileRegion
import geopandas as gpd
import numpy as np
import pandas as pd
import requests
from easy_sdm.configs import configs
from easy_sdm.utils import logger
from typing import Dict, Optional
from pathlib import Path
class GBIFOccurencesRequester:
"""[This class makes request to GBIF]
"""
def __init__(self, taxon_key: int, species_name: str):
self.taxon_key = taxon_key
self.species_name = species_name
self.base_url = "http://api.gbif.org/v1/occurrence/search"
def request(self, offset: int = 0):
"""[ Request GBIF information about an species]
Args:
offset (int, optional): [Offsset is a parameter to where starting the
request in GBIF databse, since the requests have a
limit of 300 row for request]. Defaults to 0.
Returns:
[type]: [int]
"""
gbif_configs = configs["gbif"]
params = {
"taxonKey": str(self.taxon_key),
"limit": gbif_configs["one_request_limit"],
"hasCoordinate": True,
"year": f"{gbif_configs['low_year']},{gbif_configs['up_year']}",
"country": gbif_configs["country"],
"offset": offset,
}
r = requests.get(self.base_url, params=params)
status_code = r.status_code
if r.status_code != 200:
logger.logging.info(
f"API call failed at offset {offset} with a status code of {r.status_code}."
)
end_of_records = True
else:
r = r.json()
end_of_records = r["endOfRecords"]
return r, end_of_records, status_code
class Species:
def __init__(self, taxon_key: int, name: str):
self.taxon_key = taxon_key
self.name = name
def __str__(self) -> str:
return "Species {self.name} with taxon key {self.taxon_key}"
class SpeciesDFBuilder:
"""[This class organize data requested to GBIF into pandas dataframes]
"""
def __init__(self, species: Species):
self.gbif_occ_requester = GBIFOccurencesRequester(
species.taxon_key, species.name
)
self.__df_memory = None
def get_specie_df(self):
"""Get species as DataFrame"""
if self.__df_memory:
df = self.__df_memory
else:
df = self.__request_species_df()
df = self.__clean_species_df(df)
self.__df_memory = df
return df
def __request_species_df(self):
"""[Organizes GBIF information in a dataframe considering offsets ]"""
end_of_records = False
offset = 0
status = 200
df = None
while end_of_records == False and status == 200:
r, end_of_records, status = self.gbif_occ_requester.request(offset)
df = self.__build_species_df(r, df)
offset = len(df) + 1
self.__clean_species_df(df)
return df
def __build_species_df(self, request, df=None):
"""[Create species dataframe with the request data]
Args:
df ([type]): [description]
request ([type]): [description]
Returns:
[df]: [description]
"""
if df is None:
df = pd.DataFrame(
columns=[
"SCIENTIFIC_NAME",
"LONGITUDE",
"LATITUDE",
"COUNTRY",
"STATE_PROVINCE",
"IDENTIFICATION_DATE",
"DAY",
"MONTH",
"YEAR",
]
)
for result in request["results"]:
result = self.__refact_dict(result)
df = df.append(
{
"SCIENTIFIC_NAME": result["scientificName"],
"LONGITUDE": result["decimalLongitude"],
"LATITUDE": result["decimalLatitude"],
"COUNTRY": result["country"],
"STATE_PROVINCE": result["stateProvince"],
"IDENTIFICATION_DATE": result["eventDate"],
"DAY": result["day"],
"MONTH": result["month"],
"YEAR": result["year"],
},
ignore_index=True,
)
return df
def __refact_dict(self, result: Dict):
"""Refact dict placing None in empty cells"""
columns = result.keys()
desired_columns = [
"scientificName",
"decimalLongitude",
"decimalLatitude",
"country",
"stateProvince",
"eventDate",
"day",
"month",
"year",
"occurrenceRemarks",
]
for d_col in desired_columns:
if d_col not in columns:
result[d_col] = None
return result
def __clean_species_df(self, df: pd.DataFrame):
"""[Cleaning Gbif Data]
Args:
df ([pd.DaraFrame]): [description]
Returns:
[pd.DaraFrame]: [description]
"""
# Double check to certify there is no empty lat/long data
df = df[pd.notnull(df["LATITUDE"])]
df = df[pd.notnull(df["LONGITUDE"])]
# Removing duplicate data
df = (
df.drop_duplicates(ignore_index=True)
if configs["gbif"]["drop_duplicates"]
else df
)
# Sorting Data by STATE_PROVINCE
df.sort_values("STATE_PROVINCE", inplace=True, ignore_index=True)
return df
class SpeciesGDFBuilder(SpeciesDFBuilder):
"""[This class organize data requested to GBIF into geopandas geodataframes]
"""
def __init__(
self, species: Species, proposed_region: Optional[ShapefileRegion] = None
):
super().__init__(species)
self.proposed_region = proposed_region
self.__gdf_memory = None
def save_species_gdf(self, output_path: Path):
if not str(output_path).endswith(".shp"):
raise TypeError("output_path must ends with shp")
output_path.parent.mkdir(parents=True, exist_ok=True)
gdf = self.get_species_gdf()
gdf.to_file(output_path)
def get_species_gdf(self):
if not (self.__gdf_memory is None):
gdf = self.__gdf_memory
else:
df = self.get_specie_df()
gdf = gpd.GeoDataFrame(
df, geometry=gpd.points_from_xy(df.LONGITUDE, df.LATITUDE)
)
gdf = gdf.set_crs(f"EPSG:{configs['maps']['default_epsg']}")
gdf = (
self.__filter_species_in_region(gdf)
if not (self.proposed_region is None)
else gdf
)
self.__gdf_memory = gdf
return gdf
def __filter_species_in_region(self, gdf: gpd.GeoDataFrame):
return self.proposed_region.get_points_inside(gdf)
class SpeciesInfoExtractor:
"""[A Wrapper to extract relevant information from spescies geodataframes]
"""
def __init__(self, species_geodataframe: gpd.GeoDataFrame) -> None:
self.species_geodataframe = species_geodataframe
def get_coordinates(self,):
coordinates = np.array(
(
np.array(self.species_geodataframe["LATITUDE"]),
np.array(self.species_geodataframe["LONGITUDE"]),
)
).T
return coordinates
def get_longitudes(self,):
coordinates = self.get_coordinates()
return coordinates[:, 1]
def get_latitudes(self,):
coordinates = self.get_coordinates()
return coordinates[:, 0]
| 2.609375 | 3 |
tests/links_tests/model_tests/fpn_tests/test_head.py | souravsingh/chainercv | 0 | 12791510 | from __future__ import division
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.fpn import Head
from chainercv.links.model.fpn import head_loss_post
from chainercv.links.model.fpn import head_loss_pre
def _random_array(xp, shape):
return xp.array(
np.random.uniform(-1, 1, size=shape), dtype=np.float32)
@testing.parameterize(
{'n_class': 1 + 1},
{'n_class': 5 + 1},
{'n_class': 20 + 1},
)
class TestHead(unittest.TestCase):
def setUp(self):
self.link = Head(n_class=self.n_class, scales=(1 / 2, 1 / 4, 1 / 8))
def _check_call(self):
hs = [
chainer.Variable(_random_array(self.link.xp, (2, 64, 32, 32))),
chainer.Variable(_random_array(self.link.xp, (2, 64, 16, 16))),
chainer.Variable(_random_array(self.link.xp, (2, 64, 8, 8))),
]
rois = [
self.link.xp.array(((4, 1, 6, 3),), dtype=np.float32),
self.link.xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
self.link.xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
roi_indices = [
self.link.xp.array((0,), dtype=np.int32),
self.link.xp.array((1, 0), dtype=np.int32),
self.link.xp.array((1,), dtype=np.int32),
]
locs, confs = self.link(hs, rois, roi_indices)
self.assertIsInstance(locs, chainer.Variable)
self.assertIsInstance(locs.array, self.link.xp.ndarray)
self.assertEqual(locs.shape, (4, self.n_class, 4))
self.assertIsInstance(confs, chainer.Variable)
self.assertIsInstance(confs.array, self.link.xp.ndarray)
self.assertEqual(confs.shape, (4, self.n_class))
def test_call_cpu(self):
self._check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
def _check_distribute(self):
rois = self.link.xp.array((
(0, 0, 10, 10),
(0, 1000, 0, 1000),
(0, 0, 224, 224),
(100, 100, 224, 224),
), dtype=np.float32)
roi_indices = self.link.xp.array((0, 1, 0, 0), dtype=np.int32)
rois, roi_indices = self.link.distribute(rois, roi_indices)
self.assertEqual(len(rois), 3)
self.assertEqual(len(roi_indices), 3)
for l in range(3):
self.assertIsInstance(rois[l], self.link.xp.ndarray)
self.assertIsInstance(roi_indices[l], self.link.xp.ndarray)
self.assertEqual(rois[l].shape[0], roi_indices[l].shape[0])
self.assertEqual(rois[l].shape[1:], (4,))
self.assertEqual(roi_indices[l].shape[1:], ())
self.assertEqual(sum(rois[l].shape[0] for l in range(3)), 4)
def test_distribute_cpu(self):
self._check_distribute()
@attr.gpu
def test_distribute_gpu(self):
self.link.to_gpu()
self._check_distribute()
def _check_decode(self):
rois = [
self.link.xp.array(((4, 1, 6, 3),), dtype=np.float32),
self.link.xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
self.link.xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
roi_indices = [
self.link.xp.array((0,), dtype=np.int32),
self.link.xp.array((1, 0), dtype=np.int32),
self.link.xp.array((1,), dtype=np.int32),
]
locs = chainer.Variable(_random_array(
self.link.xp, (4, self.n_class, 4)))
confs = chainer.Variable(_random_array(
self.link.xp, (4, self.n_class)))
bboxes, labels, scores = self.link.decode(
rois, roi_indices,
locs, confs,
(0.4, 0.2), ((100, 100), (200, 200)),
0.5, 0.1)
self.assertEqual(len(bboxes), 2)
self.assertEqual(len(labels), 2)
self.assertEqual(len(scores), 2)
for n in range(2):
self.assertIsInstance(bboxes[n], self.link.xp.ndarray)
self.assertIsInstance(labels[n], self.link.xp.ndarray)
self.assertIsInstance(scores[n], self.link.xp.ndarray)
self.assertEqual(bboxes[n].shape[0], labels[n].shape[0])
self.assertEqual(bboxes[n].shape[0], scores[n].shape[0])
self.assertEqual(bboxes[n].shape[1:], (4,))
self.assertEqual(labels[n].shape[1:], ())
self.assertEqual(scores[n].shape[1:], ())
def test_decode_cpu(self):
self._check_decode()
@attr.gpu
def test_decode_gpu(self):
self.link.to_gpu()
self._check_decode()
class TestHeadLoss(unittest.TestCase):
def _check_head_loss_pre(self, xp):
rois = [
xp.array(((4, 1, 6, 3),), dtype=np.float32),
xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
roi_indices = [
xp.array((0,), dtype=np.int32),
xp.array((1, 0), dtype=np.int32),
xp.array((1,), dtype=np.int32),
]
bboxes = [
xp.array(((2, 4, 6, 7), (1, 12, 3, 30)), dtype=np.float32),
xp.array(((10, 2, 12, 12),), dtype=np.float32),
]
labels = [
xp.array((10, 4), dtype=np.float32),
xp.array((1,), dtype=np.float32),
]
rois, roi_indices, gt_locs, gt_labels = head_loss_pre(
rois, roi_indices, (0.1, 0.2), bboxes, labels)
self.assertEqual(len(rois), 3)
self.assertEqual(len(roi_indices), 3)
self.assertEqual(len(gt_locs), 3)
self.assertEqual(len(gt_labels), 3)
for l in range(3):
self.assertIsInstance(rois[l], xp.ndarray)
self.assertIsInstance(roi_indices[l], xp.ndarray)
self.assertIsInstance(gt_locs[l], xp.ndarray)
self.assertIsInstance(gt_labels[l], xp.ndarray)
self.assertEqual(rois[l].shape[0], roi_indices[l].shape[0])
self.assertEqual(rois[l].shape[0], gt_locs[l].shape[0])
self.assertEqual(rois[l].shape[0], gt_labels[l].shape[0])
self.assertEqual(rois[l].shape[1:], (4,))
self.assertEqual(roi_indices[l].shape[1:], ())
self.assertEqual(gt_locs[l].shape[1:], (4,))
self.assertEqual(gt_labels[l].shape[1:], ())
def test_head_loss_pre_cpu(self):
self._check_head_loss_pre(np)
@attr.gpu
def test_head_loss_pre_gpu(self):
import cupy
self._check_head_loss_pre(cupy)
def _check_head_loss_post(self, xp):
locs = chainer.Variable(_random_array(xp, (20, 81, 4)))
confs = chainer.Variable(_random_array(xp, (20, 81)))
roi_indices = [
xp.random.randint(0, 2, size=5).astype(np.int32),
xp.random.randint(0, 2, size=7).astype(np.int32),
xp.random.randint(0, 2, size=8).astype(np.int32),
]
gt_locs = [
_random_array(xp, (5, 4)),
_random_array(xp, (7, 4)),
_random_array(xp, (8, 4)),
]
gt_labels = [
xp.random.randint(0, 80, size=5).astype(np.int32),
xp.random.randint(0, 80, size=7).astype(np.int32),
xp.random.randint(0, 80, size=8).astype(np.int32),
]
loc_loss, conf_loss = head_loss_post(
locs, confs, roi_indices, gt_locs, gt_labels, 2)
self.assertIsInstance(loc_loss, chainer.Variable)
self.assertIsInstance(loc_loss.array, xp.ndarray)
self.assertEqual(loc_loss.shape, ())
self.assertIsInstance(conf_loss, chainer.Variable)
self.assertIsInstance(conf_loss.array, xp.ndarray)
self.assertEqual(conf_loss.shape, ())
def test_head_loss_post_cpu(self):
self._check_head_loss_post(np)
@attr.gpu
def test_head_loss_post_gpu(self):
import cupy
self._check_head_loss_post(cupy)
testing.run_module(__name__, __file__)
| 2 | 2 |
tfx/utils/channel.py | Bumbleblo/tfx | 0 | 12791511 | <gh_stars>0
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Channel definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from typing import Iterable
from typing import Optional
from typing import Text
from typing import Union
from tfx.utils import types
class Channel(object):
"""Tfx Channel.
TFX Channel is an abstract concept that connects data producers and data
consumers. It contains restriction of the artifact type that should be fed
into or read from it.
Attributes:
type_name: A string representing the artifact type the Channel takes.
"""
# TODO(b/124763842): Consider replace type_name with ArtifactType.
# TODO(b/125348988): Add support for real Channel in addition to static ones.
def __init__(self,
type_name: Text,
artifacts: Optional[Iterable[types.TfxArtifact]] = None):
"""Initialization of Channel.
Args:
type_name: Name of the type that should be fed into or read from the
Channel.
artifacts: (Optional) A collection of artifacts as the
values that can be read from the Channel. This is used to construct a
static Channel.
"""
self.type_name = type_name
self._artifacts = artifacts or []
self._validate_type()
def __str__(self):
return 'Channel<{}: {}>'.format(self.type_name,
self._artifacts)
def __repr__(self):
return self.__str__()
def _validate_type(self) -> None:
for artifact in self._artifacts:
if artifact.type_name != self.type_name:
raise ValueError(
"Artifacts provided do not match Channel's artifact type {}"
.format(self.type_name))
def get(self) -> Iterable[types.TfxArtifact]:
"""Returns all artifacts that can be get from this Channel.
Returns:
An artifact collection.
"""
# TODO(b/125037186): We should support dynamic query against a Channel
# instead of a static Artifact collection.
return self._artifacts
def type_check(self, expected_type_name: Text) -> None:
"""Checks whether a Channel has the expected type name.
Args:
expected_type_name: Expected type_name to check against.
Raises:
TypeError if the type_name of given Channel is different from expected.
"""
if self.type_name != expected_type_name:
raise TypeError('Expected {} but found {}.'.format(expected_type_name,
str(self.type_name)))
def as_channel(source: Union[Channel, Iterable[types.TfxArtifact]]) -> Channel:
"""Converts artifact collection of the same artifact type into a Channel.
Args:
source: Either a Channel or an iterable of TfxArtifact.
Returns:
A static Channel containing the source artifact collection.
Raises:
ValueError when source is not a non-empty iterable of TfxArtifact.
"""
if isinstance(source, Channel):
return source
elif isinstance(source, collections.Iterable):
try:
first_element = next(iter(source))
if isinstance(first_element, types.TfxArtifact):
return Channel(
type_name=first_element.type_name,
artifacts=source)
else:
raise ValueError('Invalid source to be a channel: {}'.format(source))
except StopIteration:
raise ValueError('Cannot convert empty artifact collection into Channel')
else:
raise ValueError('Invalid source to be a channel: {}'.format(source))
| 2.46875 | 2 |
charity_donation_app/users/views.py | ikolokotronis/donation_app | 0 | 12791512 | <filename>charity_donation_app/users/views.py
from django.shortcuts import render, redirect
from django.views import View
from main.models import Donation, DonationCategories, \
TokenTemporaryStorage
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.core.mail import send_mail
from django.urls import reverse
from django.utils.encoding import force_bytes, force_str, DjangoUnicodeDecodeError
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.contrib.sites.shortcuts import get_current_site
from .utils import token_generator
from django.core.exceptions import ObjectDoesNotExist
class LoginView(View):
def get(self, request):
return render(request, 'login.html')
def post(self, request):
try:
email = request.POST.get('email')
password = request.POST.get('password')
user = authenticate(request, username=email, password=password)
if user is not None:
login(request, user)
return redirect('/')
elif not User.objects.get(email=email).check_password(password):
messages.error(request, 'Incorrect password')
return render(request, 'login.html')
except ObjectDoesNotExist:
messages.error(request, 'Given e-mail does not exist in the database')
return render(request, 'login.html')
class RegisterView(View):
def get(self, request):
return render(request, 'register.html')
def post(self, request):
name = request.POST.get('name')
surname = request.POST.get('surname')
email = request.POST.get('email')
password = request.POST.get('password')
password2 = request.POST.get('password2')
if len(password) < 8 or len(password2) < 8:
messages.error(request, 'Password too short (Min. 8 characters)')
return render(request, 'register.html')
elif any(not c.isalnum() for c in password2) is False \
or any(c.isupper() for c in password2) is False \
or any(c.islower() for c in password2) is False \
or any(c.isdigit() for c in password2) is False:
messages.error(request, 'The password does not have all special characters'
'(There should be letters, lowercase letters, numbers and special characters)')
return render(request, 'register.html')
elif User.objects.filter(username=email):
messages.error(request, 'A user with the given e-mail already exists')
return render(request, 'register.html')
elif password != <PASSWORD>:
messages.error(request, 'Passwords mismatch')
return render(request, 'register.html')
user = User.objects.create_user(username=email, first_name=name, last_name=surname, email=email)
user.set_password(<PASSWORD>)
user.is_active = False
user.save()
uidb64 = urlsafe_base64_encode(force_bytes(user.pk))
token = token_generator.make_token(user)
TokenTemporaryStorage.objects.create(user_id=user.id, token=token)
domain = get_current_site(request).domain
link = reverse('activate-page', kwargs={'uidb64': uidb64, 'token': token})
email_subject = 'Activate your account'
activation_url = f'http://{domain}{link}'
email_body = f'Hello {user}, your activation link: {activation_url}'
send_mail(
email_subject,
email_body,
'<EMAIL>',
[email],
fail_silently=False,
)
messages.success(request, 'Check your e-mail account for further information')
return render(request, 'register.html')
class VerificationView(View):
def get(self, request, uidb64, token):
try:
id = force_str(urlsafe_base64_decode(uidb64))
user = User.objects.get(id=id)
stored_token = TokenTemporaryStorage.objects.get(user=user).token
if token == stored_token:
TokenTemporaryStorage.objects.get(user=user).delete()
if not token_generator.check_token(user, token):
messages.error(request, 'Account is already activated')
return redirect('login-page')
if user.is_active:
return redirect('login-page')
user.is_active = True
user.save()
messages.success(request, 'Account successfully activated')
return redirect('login-page')
else:
messages.error(request, 'Incorrect link or account is already activated')
return redirect('login-page')
except ObjectDoesNotExist:
messages.error(request, 'Incorrect link or account is already activated')
return redirect('login-page')
class LogoutView(View):
def get(self, request):
logout(request)
return redirect('/')
class UserPanelView(View):
def get(self, request, user_id):
donations = Donation.objects.filter(user_id=user_id).order_by('date_added') \
.order_by('date_taken').order_by('time_taken').order_by('is_taken')
donation_categories = DonationCategories.objects.all()
return render(request, 'user_panel.html', {'donations': donations,
'donation_categories': donation_categories})
def post(self, request, user_id):
name = request.POST.get('name')
surname = request.POST.get('surname')
message = request.POST.get('message')
email_subject = f'Contact form(Sent by user {name} {surname}'
email_body = message
administrators = User.objects.filter(is_superuser=True)
if not name or not surname or not message:
messages.error(request, 'Please fill all fields correctly')
return redirect('/')
for administrator in administrators:
email = administrator.email
send_mail(
email_subject,
email_body,
'<EMAIL>',
[email],
fail_silently=False,
)
messages.success(request, 'Successfully sent')
return redirect(f'/panel/{request.user.id}/')
class UserEditView(View):
def get(self, request, user_id):
if request.user.id != user_id:
return redirect(f'/edit/{request.user.id}/')
return render(request, 'user-edit.html')
def post(self, request, user_id):
user = User.objects.get(id=user_id)
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
email = request.POST.get('email')
if not request.POST.get('password') or not request.POST.get('password2'):
messages.error(request, 'Please fill all fields correctly')
return render(request, 'user-edit.html')
password = request.POST.get('password')
password2 = request.POST.get('password2')
if password != <PASSWORD>:
messages.error(request, 'Passwords mismatch')
return render(request, 'user-edit.html')
user = authenticate(request, username=request.user.email, password=<PASSWORD>)
if user is None:
messages.error(request, 'Incorrect password')
return render(request, 'user-edit.html')
user.first_name = first_name
user.last_name = last_name
user.email = email
user.save()
messages.success(request, 'Data has been changed')
return redirect(f'/edit/{request.user.id}/')
class PasswordChangeView(View):
def get(self, request, user_id):
if request.user.id != user_id:
return redirect(f'/edit/{request.user.id}/')
return render(request, 'change-password.html')
def post(self, request, user_id):
if not request.POST.get('old_password') or not request.POST.get('new_password1') or not request.POST.get(
'new_password2'):
messages.error(request, 'Please fill all fields correctly')
return render(request, 'change-password.html')
old_password = request.POST.get('old_password')
user = authenticate(request, username=request.user.email, password=old_password)
if user is None:
messages.error(request, 'Old password incorrect')
return render(request, 'change-password.html')
new_password1 = request.POST.get('new_password1')
new_password2 = request.POST.get('new_password2')
if new_password1 != new_password2:
messages.error(request, 'Passwords mismatch')
return render(request, 'change-password.html')
user.set_password(<PASSWORD>)
user.save()
new_user = authenticate(request, username=request.user.email, password=<PASSWORD>)
if user is None:
messages.error(request, 'Something went wrong')
return render(request, 'change-password.html')
login(request, new_user)
messages.success(request, 'Data successfully changed')
return redirect(f'/edit/{request.user.id}/')
class PasswordResetView(View):
def get(self, request):
return render(request, 'password-reset.html')
def post(self, request):
email = request.POST.get('email')
try:
user = User.objects.get(email=email)
uidb64 = urlsafe_base64_encode(force_bytes(user.pk))
token = token_generator.make_token(user)
TokenTemporaryStorage.objects.create(user_id=user.id, token=token)
domain = get_current_site(request).domain
link = reverse('password-reset-verification', kwargs={'uidb64': uidb64, 'token': token})
email_subject = 'Password reset'
activation_url = f'http://{domain}{link}'
email_body = f'Hello {user}, twój password reset link: {activation_url}'
send_mail(
email_subject,
email_body,
'<EMAIL>',
[email],
fail_silently=False,
)
messages.success(request, 'Check your e-mail inbox')
return render(request, 'password-reset.html')
except ObjectDoesNotExist:
messages.error(request, 'Incorrect e-mail')
return render(request, 'password-reset.html')
class PasswordResetVerificationView(View):
def get(self, request, uidb64, token):
try:
id = force_str(urlsafe_base64_decode(uidb64))
user = User.objects.get(id=id)
stored_token = TokenTemporaryStorage.objects.get(user=user).token
if token == stored_token:
if not token_generator.check_token(user, token):
messages.error(request, 'Password has already been changed')
return redirect('login-page')
return render(request, 'new-password-form.html')
else:
messages.error(request, 'Incorrect link or password is already changed')
return redirect('login-page')
except ObjectDoesNotExist:
messages.error(request, 'Incorrect link or password is already changed')
return redirect('login-page')
def post(self, request, uidb64, token):
id = force_str(urlsafe_base64_decode(uidb64))
user = User.objects.get(id=id)
password1 = request.POST.get('<PASSWORD>')
password2 = request.POST.get('password2')
if password1 != <PASSWORD>:
messages.error(request, 'Passwords mismatch')
return render(request, 'new-password-form.html')
user.set_password(<PASSWORD>)
user.save()
TokenTemporaryStorage.objects.get(user=user).delete()
messages.success(request, 'Password changed successfully')
return redirect('login-page')
| 2.125 | 2 |
tableauserverclient/models/target.py | zuarbase/server-client-python | 470 | 12791513 | <filename>tableauserverclient/models/target.py
"""Target class meant to abstract mappings to other objects"""
class Target:
def __init__(self, id_, target_type):
self.id = id_
self.type = target_type
def __repr__(self):
return "<Target#{id}, {type}>".format(**self.__dict__)
| 2.5625 | 3 |
LibMTL/weighting/GradVac.py | median-research-group/LibMTL | 83 | 12791514 | <reponame>median-research-group/LibMTL<filename>LibMTL/weighting/GradVac.py
import torch, random
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from LibMTL.weighting.abstract_weighting import AbsWeighting
class GradVac(AbsWeighting):
r"""Gradient Vaccine (GradVac).
This method is proposed in `Gradient Vaccine: Investigating and Improving Multi-task Optimization in Massively Multilingual Models (ICLR 2021 Spotlight) <https://openreview.net/forum?id=F1vEjWK-lH_>`_ \
and implemented by us.
Args:
beta (float, default=0.5): The exponential moving average (EMA) decay parameter.
.. warning::
GradVac is not supported by representation gradients, i.e., ``rep_grad`` must be ``False``.
"""
def __init__(self):
super(GradVac, self).__init__()
def init_param(self):
self.rho_T = torch.zeros(self.task_num, self.task_num).to(self.device)
def backward(self, losses, **kwargs):
beta = kwargs['beta']
if self.rep_grad:
raise ValueError('No support method GradVac with representation gradients (rep_grad=True)')
else:
self._compute_grad_dim()
grads = self._compute_grad(losses, mode='backward') # [task_num, grad_dim]
batch_weight = np.ones(len(losses))
pc_grads = grads.clone()
for tn_i in range(self.task_num):
task_index = list(range(self.task_num))
task_index.remove(tn_i)
random.shuffle(task_index)
for tn_j in task_index:
rho_ij = torch.dot(pc_grads[tn_i], grads[tn_j]) / (pc_grads[tn_i].norm()*grads[tn_j].norm())
if rho_ij < self.rho_T[tn_i, tn_j]:
w = pc_grads[tn_i].norm()*(self.rho_T[tn_i, tn_j]*(1-rho_ij**2).sqrt()-rho_ij*(1-self.rho_T[tn_i, tn_j]**2).sqrt())/(grads[tn_j].norm()*(1-self.rho_T[tn_i, tn_j]**2).sqrt())
pc_grads[tn_i] += grads[tn_j]*w
batch_weight[tn_j] += w.item()
self.rho_T[tn_i, tn_j] = (1-beta)*self.rho_T[tn_i, tn_j] + beta*rho_ij
new_grads = pc_grads.sum(0)
self._reset_grad(new_grads)
return batch_weight
| 2.109375 | 2 |
asrtoolkit/data_handlers/stm.py | mgoldey/asrtoolkit-draft | 0 | 12791515 | #!/usr/bin/env python
"""
Module for reading STM files
Expected file format is derived from http://www1.icsi.berkeley.edu/Speech/docs/sctk-1.2/infmts.htm#stm_fmt_name_0
This expects a segment from class derived in convert_text
"""
from asrtoolkit.data_structures.segment import segment
def format_segment(seg):
"""
Formats a segment assuming it's an instance of class segment with elements
audiofile, channel, speaker, start and stop times, label, and text
"""
return " ".join(seg.__dict__[_] for _ in ('audiofile', 'channel', 'speaker', 'start', 'stop', 'label', 'text'))
def parse_line(line):
" parse a single line of an stm file"
data = line.strip().split()
seg = None
if len(data) > 6:
audiofile, channel, speaker, start, stop, label = data[:6]
text = " ".join(data[6:])
seg = segment(
{
'audiofile': audiofile,
'channel': channel,
'speaker': speaker,
'start': start,
'stop': stop,
'label': label,
'text': text
}
)
return seg if seg and seg.validate() else None
def read_file(file_name):
"""
Reads an STM file, skipping any gap lines
"""
segments = []
with open(file_name, encoding="utf-8") as f:
for line in f:
seg = parse_line(line)
if seg is not None:
segments.append(seg)
return segments
| 3.390625 | 3 |
core/management/commands/populate_db.py | dishad/ADD | 0 | 12791516 | <filename>core/management/commands/populate_db.py
from django.core.management.base import BaseCommand
from core.models import Category
import os
import json
class Command(BaseCommand):
help = 'Populate the deanslist database with some mock data to display in index.html'
def _create_categories(self):
with open('categories.json', 'r') as categories_file:
categories_json = json.load(categories_file)
for category in categories_json:
category_name = list(category.keys())[0]
some_category = Category(name=category_name)
print('Adding category %s' % category_name)
some_category.save()
for subcategory_name in category[category_name]:
some_subcategory = Category(name=subcategory_name, parent_category=some_category)
print('\tAdding subcategory %s' % subcategory_name)
some_subcategory.save()
def handle(self, *args, **options):
self._create_categories() | 2.578125 | 3 |
server/entities/livro.py | marcia-santos/simpleBookstore | 0 | 12791517 | from entities.entityBase import *
class Livro(EntityBase):
__tablename__ = 'livro'
id = sqlalchemy.Column(sqlalchemy.Integer,primary_key=True)
isbn = sqlalchemy.Column(sqlalchemy.String(length=20))
titulo = sqlalchemy.Column(sqlalchemy.String(length=255))
autor = sqlalchemy.Column(sqlalchemy.String(length=255))
anoPublicacao = sqlalchemy.Column(sqlalchemy.Integer)
quantidadeEstoque = sqlalchemy.Column(sqlalchemy.Integer)
preco = sqlalchemy.Column(sqlalchemy.Float)
ativo = sqlalchemy.Column(sqlalchemy.Boolean)
| 2.78125 | 3 |
PySimpleGUI_PyWebIO_Streamlit/test/image_clf.py | philip-shen/note_python | 0 | 12791518 | <filename>PySimpleGUI_PyWebIO_Streamlit/test/image_clf.py
from PIL import Image
import json
import os
import streamlit as st
import pandas as pd
import numpy as np
from keras.preprocessing import image
from keras.applications.xception import Xception, preprocess_input, decode_predictions
st.set_option('deprecation.showfileUploaderEncoding', False)
@st.cache(allow_output_mutation=True)
def load_model():
"""
Xceptionモデルをloadする。
"""
model = Xception(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
return model
def preprocessing_image(image_pil_array: 'PIL.Image'):
"""
予測するためにPIL.Imageで読み込んだarrayを加工する。
299×299にして、pixelを正規化
cf: https://keras.io/ja/applications/#xception
"""
image_pil_array = image_pil_array.convert('RGB')
x = image.img_to_array(image_pil_array)
x = np.expand_dims(x, axis=0)
print(x.shape)
x = preprocess_input(x)
print(x.shape)
return x
def main():
model = load_model()
st.title('画像分類器')
st.write("pretrained modelを使って、アップロードした画像を分類します。")
uploaded_file = st.file_uploader('Choose a image file to predict')
if uploaded_file is not None:
image_pil_array = Image.open(uploaded_file)
st.image(
image_pil_array, caption='uploaded image',
use_column_width=True
)
x = preprocessing_image(image_pil_array)
result = model.predict(x)
predict_rank = decode_predictions(result, top=5)[0]
st.write('機械学習モデルは画像を', predict_rank[0][1], 'と予測しました。')
st.write('#### 予測確率@p5')
df = pd.DataFrame(predict_rank, columns=['index', 'name', 'predict_proba'])
st.write(df)
df_chart = df[['name', 'predict_proba']].set_index('name')
st.bar_chart(df_chart)
if __name__ == '__main__':
main()
| 3.25 | 3 |
test/python/proto2dict.py | jannekai/protoc-gen-elixir | 3 | 12791519 | <filename>test/python/proto2dict.py
# -*- coding: utf-8 -*-
from base64 import b64encode, b64decode
from google.protobuf.descriptor import FieldDescriptor as FD
def proto_to_dict(msg):
result = {}
for fd, value in msg.ListFields():
result[fd.name] = encode_value(fd, value, encode_func(fd))
return result
def encode_func(fd):
if fd.type == FD.TYPE_MESSAGE:
func = proto_to_dict
elif fd.type == FD.TYPE_BOOL:
func = bool
elif fd.type == FD.TYPE_STRING:
func = unicode
elif fd.type == FD.TYPE_BYTES:
func = b64encode
elif fd.type == FD.TYPE_DOUBLE or fd.type == FD.TYPE_FLOAT:
func = float
elif fd.type == FD.TYPE_INT32 or fd.type == FD.TYPE_UINT32 or fd.type == FD.TYPE_SINT32 or fd.type == FD.TYPE_ENUM:
func = int
elif fd.type == FD.TYPE_INT64 or fd.type == FD.TYPE_UINT64 or fd.type == FD.TYPE_SINT64 or fd.type == FD.TYPE_FIXED32 or fd.type == FD.TYPE_FIXED64 or fd.type == FD.TYPE_SFIXED32 or fd.type == FD.TYPE_SFIXED64:
func = long
else:
raise Error("Unknown field type %s", fd.type)
return func
def encode_value(fd, value, encode_func):
if fd.label == FD.LABEL_REPEATED:
encoded_value = []
for v in value:
encoded_value.append(encode_func(v))
else:
encoded_value = encode_func(value)
return encoded_value
def dict_to_proto(dictionary, msg):
decode_msg(dictionary, msg)
return msg
def decode_msg(dictionary, msg):
msg.SetInParent()
for key, value in dictionary.iteritems():
if value is None:
continue
field = str(key)
if isinstance(value, dict):
decode_msg(value, getattr(msg, field))
elif isinstance(value, list):
decode_list(value, getattr(msg, field), msg.DESCRIPTOR.fields_by_name[field])
else:
setattr(msg, field, decode_value(value, msg.DESCRIPTOR.fields_by_name[field]))
def decode_list(values, field, fd):
if isinstance(values[0], dict):
for v in values:
dict_to_proto(v, field.add())
else:
for v in values:
field.append(decode_value(v, fd))
def decode_value(value, fd):
if fd.type == FD.TYPE_BYTES:
return b64decode(value)
if fd.type == FD.TYPE_BOOL:
return bool(value)
if fd.type == FD.TYPE_INT32 or fd.type == FD.TYPE_UINT32 or fd.type == FD.TYPE_SINT32 or fd.type == FD.TYPE_ENUM:
return int(value)
if fd.type == FD.TYPE_INT64 or fd.type == FD.TYPE_UINT64 or fd.type == FD.TYPE_SINT64 or fd.type == FD.TYPE_FIXED32 or fd.type == FD.TYPE_FIXED64 or fd.type == FD.TYPE_SFIXED32 or fd.type == FD.TYPE_SFIXED64:
return long(value)
return value
| 2.625 | 3 |
setup.py | maximlt/project | 0 | 12791520 | <reponame>maximlt/project<gh_stars>0
import pathlib
from setuptools import setup, find_packages
import codecs
import re
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
AUTOMATION_REQUIRE = ["tox>=3.12.1"]
LINTERS_REQUIRE = ["flake8>=3.7.8"]
FORMATTER_REQUIRE = ["black>=19.3b0"]
NOTEBOOK_REQUIRE = ["jupyterlab>=1.0.4"]
TEST_REQUIRE = ["pytest>=5.0.1", "pytest-cov>=2.7.1"]
TEST_NOTEBOOKS_REQUIRE = ["pytest>=5.0.1", "nbval>=0.1"]
PUBLISH_REQUIRE = ["twine>=1.13.0"]
DOCS_REQUIRE = ["sphinx>=2.1.2", "sphinx-rtd-theme>=0.4.3"]
DEV_REQUIRE = list(
set(
AUTOMATION_REQUIRE
+ LINTERS_REQUIRE
+ FORMATTER_REQUIRE
+ NOTEBOOK_REQUIRE
+ TEST_REQUIRE
+ TEST_NOTEBOOKS_REQUIRE
+ DOCS_REQUIRE
+ PUBLISH_REQUIRE
)
)
def read(*parts):
# with codecs.open(os.path.join(HERE, *parts), "r") as fp:
with codecs.open(HERE.joinpath(*parts), "r") as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# This call to setup() does all the work
setup(
name="projectxyxyxy",
# First method from https://packaging.python.org/guides/single-sourcing-package-version/
version=find_version("project", "__init__.py"),
description="Project description",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/maximlt/project",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
python_requires=">=3.6",
packages=find_packages(),
include_package_data=True,
install_requires=["Click>=7.0"],
entry_points={"console_scripts": ["calc=project:cli"]},
extras_require={
"dev": DEV_REQUIRE,
"test": TEST_REQUIRE,
"test_notebooks": TEST_NOTEBOOKS_REQUIRE,
"formatter": FORMATTER_REQUIRE,
"linters": LINTERS_REQUIRE,
"docs": DOCS_REQUIRE,
},
project_urls={"Documentation": "https://xyxyxy.readthedocs.io/en/latest/"},
)
| 1.546875 | 2 |
engines.py | Artemigos/rpi_adjustable_desk | 0 | 12791521 | import RPi.GPIO as gpio
def up_off():
print("[debug] turning UP engines off")
_OUT.left_up.off()
_OUT.RIGHT_UP.off()
def up_on():
print("[debug] turning UP engines on")
_OUT.left_down.off()
_OUT.right_down.off()
_OUT.main.on()
_OUT.left_up.on()
_OUT.right_up.on()
def down_off():
print("[debug] turning DOWN engines off")
_OUT.left_down.off()
_OUT.right_down.off()
def down_on():
print("[debug] turning DOWN engines on")
_OUT.left_up.off()
_OUT.right_up.off()
_OUT.main.on()
_OUT.left_down.on()
_OUT.right_down.on()
def all_off():
print("[debug] turning all engines off")
_OUT.main.off()
_OUT.left_up.off()
_OUT.right_up.off()
_OUT.left_down.off()
_OUT.right_down.off()
def initialize(outputs):
global _OUT
_OUT = outputs
all_off()
| 2.875 | 3 |
tests/us23_test.py | pdamiano-11/Team-4-Code | 4 | 12791522 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 5 14:29:04 2020
@author: ptrda
"""
import os
os.chdir(os.path.dirname(os.path.abspath('../tests')))
import sys
sys.path.append(os.path.abspath('../Team-4-Code/src/UserStories'))
sys.path.append(os.path.abspath('../Team-4-Code/src'))
cwd = os.getcwd()
os.chdir(os.path.join(cwd, 'seeds'))
from us23 import us23
import Project02
import unittest
class us23_test(unittest.TestCase):
def test1(self):
individuals = Project02.createIndividualsDataFrame('seed.ged')
df = individuals[0:3]
df = df.append(individuals.iloc[0]).reset_index(drop = True)
res = "not unique"
self.assertEqual(us23(df), res)
def test2(self):
individuals = Project02.createIndividualsDataFrame('seed.ged')
df = individuals[0:5]
res = "unique"
self.assertEqual(us23(df), res)
def test3(self):
individuals = Project02.createIndividualsDataFrame('seed.ged')
df = individuals[0:7]
df = df.append(individuals.iloc[3]).reset_index(drop = True)
res = "not unique"
self.assertEqual(us23(df), res)
def test4(self):
individuals = Project02.createIndividualsDataFrame('seed.ged')
df = individuals[0:9]
df = df.append(individuals.iloc[11]).reset_index(drop = True)
res = "unique"
self.assertEqual(us23(df), res)
def test5(self):
individuals = Project02.createIndividualsDataFrame('seed.ged')
df = individuals[0:11]
df = df.append(individuals.iloc[4]).reset_index(drop = True)
res = "not unique"
self.assertEqual(us23(df), res)
unittest.main(argv=['first-arg-is-ignored'], exit=False) | 2.53125 | 3 |
app/authentication/urls.py | GiorgosXonikis/RestaurantReviews-RestAPI | 0 | 12791523 | from django.urls import path
from .views import (PasswordResetView,
PasswordResetValidationView)
urlpatterns = [
path('password-reset/', PasswordResetView.as_view(), name='password_reset'),
path('password-reset/validate/', PasswordResetValidationView.as_view(), name='password_reset-validate'),
] | 1.835938 | 2 |
9_Lesson9/exceptions/example4-else.py | turovod/Otus | 0 | 12791524 | <reponame>turovod/Otus
my_dict = {"a": 1, "b": 2, "c": 3}
# Without finally
try:
value = my_dict["a"]
except KeyError:
print("A KeyError occurred!")
else:
print("No error occurred!")
# With finally
try:
value = my_dict["a"]
except KeyError:
print("A KeyError occurred!")
else:
print("No error occurred!")
finally:
print("The finally statement ran!")
| 3.703125 | 4 |
Part2/reply_post.py | tommakessense/RedditBot | 0 | 12791525 | from json import loads, dumps
from random import randint
import stanza
import praw
import re
import os
from urllib.parse import quote
from stanza import Pipeline
def log_into_reddit():
reddit = praw.Reddit('bot1')
print(reddit.user.me())
return reddit
def get_posts_replied_to():
# Have we run this code before? If not, create an empty list
if not os.path.isfile("posts_replied_to.txt"):
posts_replied_to = []
# If we have run the code before, load the list of posts we have replied to
else:
# Read the file into a list and remove any empty values
with open("posts_replied_to.txt", "r") as f:
posts_replied_to = f.read()
posts_replied_to = posts_replied_to.split("\n")
posts_replied_to = list(filter(None, posts_replied_to))
return posts_replied_to
# variable
LOCATIONS = [
" Sales of Hitler's political autobiography \"<NAME>\"sometimes referred to as the bible of the Nazi Party, made him a millionaire. ",
"Hitler had dreams of playing a musical instrument. He had short but unsuccessful lessons in piano and violin and also dabbled in the flute and harmonica. In the end, he settled for whistling, which he did frequently.",
"Though he shunned meat, Hitler was a voracious ‘sweet tooth’, consuming large amounts of cake, pastries, chocolate and sugar. He sometimes took as many as five teaspoons of sugar in his tea.",
"When the regime came into power in 1933, they passed a comprehensive set of laws for animal protection. When all of these were in place, Hitler said something about animal cruelty. With the new Reich, there will be no grounds for any form of animal abuse and cruelty.",
"It’s already a known fact that during Hitler’s reign, their main objective was to free the world of Jews. However, Hilter unknowingly had a Jewish chauffeur. <NAME> was also his friend and personal chauffeur. When it got known to many, <NAME> was ready to target Maurice for expulsion. Hitler came to the rescue and made an exception for him and his brothers. He called them “honorary Aryans”.",
"In a pre-cursor to modern stances and laws in this area, the Nazi party were the first people to ban smoking. Nazi doctors were the first to establish a link between smoking and lung cancer which meant that a fierce anti-smoking campaign began under Hitler. The Nazi leadership strongly condemned smoking and advised the general population to give it up.",
"During the Second World War, German doctors came up with a methamphetamine based experimental drug to increase soldier’s performance. This was very successful in trials when tested and made the troops super tough. It was found that they could march 55 miles without any tiredness which is pretty amazing. The plan was to roll it out to all soldiers serving in the war but the German’s lost before it could be put into place."]
ANALYTICS_JSON = "posts_analytics.json"
def get_posts_analytics():
if not os.path.isfile(ANALYTICS_JSON):
posts_analytics = []
# If we have run the code before, load the list of posts we have replied to
else:
# Read the file into a list and remove any empty values
with open(ANALYTICS_JSON, "r") as f:
posts_analytics = loads(f.read())
return posts_analytics
def initiate_nlp() -> Pipeline:
stanza.download('en')
nlp_pipe = stanza.Pipeline('en', processors="tokenize,pos")
return nlp_pipe
def fetch_reddit_posts(selected_subreddit: str, limit: int) -> list:
subreddit = reddit.subreddit(selected_subreddit)
return subreddit.hot(limit=limit)
def process_post(post, nlp_pipe: Pipeline):
doc = nlp_pipe(post.title)
keywords = get_keywords_from_post(doc.sentences)
# write all keywords in lower case
keywords = [keyword.lower() for keyword in keywords]
# remove all duplicates and keep original order
unique_keywords = list(dict.fromkeys(keywords))
print(" ".join(unique_keywords))
print(f"{post.title}")
print(f"https://www.reddit.com/r/fakehistoryporn/comments/{post.id}")
URL_keywords = quote(' '.join(unique_keywords))
print(f"https://en.wikipedia.org/w/index.php?search={URL_keywords}")
return post.id, post.title, unique_keywords
def filter_analytics(posts, posts_analytics):
post_ids = [post_id for post_id, _, _ in posts_analytics]
filtered_posts = []
for post in posts:
if post.id in post_ids:
continue
filtered_posts.append(post)
return filtered_posts
def get_keywords_from_post(sentences: list):
keywords = []
for sentence in sentences:
for word in sentence.words:
if word.upos not in ['NOUN', 'VERB', 'NUM', 'PROPN']:
continue
keywords.append(word.text)
return keywords
def filter_posts(posts, posts_replied_to):
filtered_posts = []
for post in posts:
if post.id in posts_replied_to:
continue
if not re.search("(nazi|hitler|hilter|german)", post.title, re.IGNORECASE):
continue
filtered_posts.append(post)
return filtered_posts
def reply_to_post(post):
# Reply to the post
randomnumber = randint(0, len(LOCATIONS))
post.reply(f"Did you know that: {LOCATIONS[randomnumber]}")
print(f"Bot replying to: {post.title} https://www.reddit.com/r/fakehistoryporn/comments/{post.id}")
def store_line(f, line):
f.write(line + "\n")
if __name__ == '__main__':
# log into reddit
reddit = log_into_reddit()
# check posts replied to
posts_replied_to = get_posts_replied_to()
# initiate nlp
nlp_pipe = initiate_nlp()
# create posts_analytics
posts_analytics = get_posts_analytics()
# fetch reddit posts
posts = fetch_reddit_posts("fakehistoryporn", 10)
analytics_filtered = filter_analytics(posts, posts_analytics)
# read submission titles
for post in analytics_filtered:
nlp_data = process_post(post, nlp_pipe)
posts_analytics.append(nlp_data)
# store nlp doc in posts_analytics
with open(ANALYTICS_JSON, "w") as f:
f.write(dumps(posts_analytics))
# filter for keywords
filtered_posts = filter_posts(posts, posts_replied_to)
# respond to filtered posts
with open("posts_replied_to.txt", "a") as f:
for post in filtered_posts:
reply_to_post(post)
# store post_id in posts_replied_to
store_line(f, post.id) | 3.34375 | 3 |
model/utils_bert.py | sahara2001/editsql | 0 | 12791526 | <filename>model/utils_bert.py
# modified from https://github.com/naver/sqlova
import os, json
import random as rd
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from .gated_graph_conv import GatedGraphConv
from .bert import tokenization as tokenization
from .bert.modeling import BertConfig, BertModel
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_bert(params):
BERT_PT_PATH = './model/bert/data/annotated_wikisql_and_PyTorch_bert_param'
map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12',
'uL': 'uncased_L-24_H-1024_A-16',
'cS': 'cased_L-12_H-768_A-12',
'cL': 'cased_L-24_H-1024_A-16',
'mcS': 'multi_cased_L-12_H-768_A-12'}
bert_type = map_bert_type_abb[params.bert_type_abb]
if params.bert_type_abb == 'cS' or params.bert_type_abb == 'cL' or params.bert_type_abb == 'mcS':
do_lower_case = False
else:
do_lower_case = True
no_pretraining = False
bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json')
vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt')
init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin')
print('bert_config_file', bert_config_file)
print('vocab_file', vocab_file)
print('init_checkpoint', init_checkpoint)
bert_config = BertConfig.from_json_file(bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
bert_config.print_status()
model_bert = BertModel(bert_config)
if no_pretraining:
pass
else:
model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu'))
print("Load pre-trained parameters.")
model_bert.to(device)
return model_bert, tokenizer, bert_config
def generate_inputs(tokenizer, nlu1_tok, hds1):
tokens = []
segment_ids = []
t_to_tt_idx_hds1 = []
tokens.append("[CLS]")
i_st_nlu = len(tokens) # to use it later
segment_ids.append(0)
for token in nlu1_tok:
tokens.append(token)
segment_ids.append(0)
i_ed_nlu = len(tokens)
tokens.append("[SEP]")
segment_ids.append(0)
i_hds = []
for i, hds11 in enumerate(hds1):
i_st_hd = len(tokens)
t_to_tt_idx_hds11 = []
sub_tok = []
for sub_tok1 in hds11.split():
t_to_tt_idx_hds11.append(len(sub_tok))
sub_tok += tokenizer.tokenize(sub_tok1)
t_to_tt_idx_hds1.append(t_to_tt_idx_hds11)
tokens += sub_tok
i_ed_hd = len(tokens)
i_hds.append((i_st_hd, i_ed_hd))
segment_ids += [1] * len(sub_tok)
if i < len(hds1)-1:
tokens.append("[SEP]")
segment_ids.append(0)
elif i == len(hds1)-1:
tokens.append("[SEP]")
segment_ids.append(1)
else:
raise EnvironmentError
i_nlu = (i_st_nlu, i_ed_nlu)
return tokens, segment_ids, i_nlu, i_hds, t_to_tt_idx_hds1
def gen_l_hpu(i_hds):
"""
# Treat columns as if it is a batch of natural language utterance with batch-size = # of columns * # of batch_size
i_hds = [(17, 18), (19, 21), (22, 23), (24, 25), (26, 29), (30, 34)])
"""
l_hpu = []
for i_hds1 in i_hds:
for i_hds11 in i_hds1:
l_hpu.append(i_hds11[1] - i_hds11[0])
return l_hpu
def get_bert_output(model_bert, tokenizer, nlu_t, hds, max_seq_length):
"""
Here, input is toknized further by WordPiece (WP) tokenizer and fed into BERT.
INPUT
:param model_bert:
:param tokenizer: WordPiece toknizer
:param nlu: Question
:param nlu_t: CoreNLP tokenized nlu.
:param hds: Headers
:param hs_t: None or 1st-level tokenized headers
:param max_seq_length: max input token length
OUTPUT
tokens: BERT input tokens
nlu_tt: WP-tokenized input natural language questions
orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token
tok_to_orig_index: inverse map.
"""
l_n = []
l_hs = [] # The length of columns for each batch
input_ids = []
tokens = []
segment_ids = []
input_mask = []
i_nlu = [] # index to retreive the position of contextual vector later.
i_hds = []
doc_tokens = []
nlu_tt = []
t_to_tt_idx = []
tt_to_t_idx = []
t_to_tt_idx_hds = []
for b, nlu_t1 in enumerate(nlu_t):
hds1 = hds[b]
l_hs.append(len(hds1))
# 1. 2nd tokenization using WordPiece
tt_to_t_idx1 = [] # number indicates where sub-token belongs to in 1st-level-tokens (here, CoreNLP).
t_to_tt_idx1 = [] # orig_to_tok_idx[i] = start index of i-th-1st-level-token in all_tokens.
nlu_tt1 = [] # all_doc_tokens[ orig_to_tok_idx[i] ] returns first sub-token segement of i-th-1st-level-token
for (i, token) in enumerate(nlu_t1):
t_to_tt_idx1.append(
len(nlu_tt1)) # all_doc_tokens[ indicate the start position of original 'white-space' tokens.
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tt_to_t_idx1.append(i)
nlu_tt1.append(sub_token) # all_doc_tokens are further tokenized using WordPiece tokenizer
nlu_tt.append(nlu_tt1)
tt_to_t_idx.append(tt_to_t_idx1)
t_to_tt_idx.append(t_to_tt_idx1)
l_n.append(len(nlu_tt1))
# [CLS] nlu [SEP] col1 [SEP] col2 [SEP] ...col-n [SEP]
# 2. Generate BERT inputs & indices.
tokens1, segment_ids1, i_nlu1, i_hds1, t_to_tt_idx_hds1 = generate_inputs(tokenizer, nlu_tt1, hds1)
assert len(t_to_tt_idx_hds1) == len(hds1)
t_to_tt_idx_hds.append(t_to_tt_idx_hds1)
input_ids1 = tokenizer.convert_tokens_to_ids(tokens1)
# Input masks
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask1 = [1] * len(input_ids1)
# 3. Zero-pad up to the sequence length.
if len(nlu_t) == 1:
max_seq_length = len(input_ids1)
while len(input_ids1) < max_seq_length:
input_ids1.append(0)
input_mask1.append(0)
segment_ids1.append(0)
assert len(input_ids1) == max_seq_length
assert len(input_mask1) == max_seq_length
assert len(segment_ids1) == max_seq_length
input_ids.append(input_ids1)
tokens.append(tokens1)
segment_ids.append(segment_ids1)
input_mask.append(input_mask1)
i_nlu.append(i_nlu1)
i_hds.append(i_hds1)
# Convert to tensor
all_input_ids = torch.tensor(input_ids, dtype=torch.long).to(device)
all_input_mask = torch.tensor(input_mask, dtype=torch.long).to(device)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long).to(device)
# 4. Generate BERT output.
all_encoder_layer, pooled_output = model_bert(all_input_ids, all_segment_ids, all_input_mask)
# 5. generate l_hpu from i_hds
l_hpu = gen_l_hpu(i_hds)
assert len(set(l_n)) == 1 and len(set(i_nlu)) == 1
assert l_n[0] == i_nlu[0][1] - i_nlu[0][0]
return all_encoder_layer, pooled_output, tokens, i_nlu, i_hds, \
l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx, t_to_tt_idx_hds
def get_wemb_n(i_nlu, l_n, hS, num_hidden_layers, all_encoder_layer, num_out_layers_n):
"""
Get the representation of each tokens.
"""
bS = len(l_n)
l_n_max = max(l_n)
# print('wemb_n: [bS, l_n_max, hS * num_out_layers_n] = ', bS, l_n_max, hS * num_out_layers_n)
wemb_n = torch.zeros([bS, l_n_max, hS * num_out_layers_n]).to(device)
for b in range(bS):
# [B, max_len, dim]
# Fill zero for non-exist part.
l_n1 = l_n[b]
i_nlu1 = i_nlu[b]
for i_noln in range(num_out_layers_n):
i_layer = num_hidden_layers - 1 - i_noln
st = i_noln * hS
ed = (i_noln + 1) * hS
wemb_n[b, 0:(i_nlu1[1] - i_nlu1[0]), st:ed] = all_encoder_layer[i_layer][b, i_nlu1[0]:i_nlu1[1], :]
return wemb_n
def get_wemb_h(i_hds, l_hpu, l_hs, hS, num_hidden_layers, all_encoder_layer, num_out_layers_h):
"""
As if
[ [table-1-col-1-tok1, t1-c1-t2, ...],
[t1-c2-t1, t1-c2-t2, ...].
...
[t2-c1-t1, ...,]
]
"""
bS = len(l_hs)
l_hpu_max = max(l_hpu)
num_of_all_hds = sum(l_hs)
wemb_h = torch.zeros([num_of_all_hds, l_hpu_max, hS * num_out_layers_h]).to(device)
# print('wemb_h: [num_of_all_hds, l_hpu_max, hS * num_out_layers_h] = ', wemb_h.size())
b_pu = -1
for b, i_hds1 in enumerate(i_hds):
for b1, i_hds11 in enumerate(i_hds1):
b_pu += 1
for i_nolh in range(num_out_layers_h):
i_layer = num_hidden_layers - 1 - i_nolh
st = i_nolh * hS
ed = (i_nolh + 1) * hS
wemb_h[b_pu, 0:(i_hds11[1] - i_hds11[0]), st:ed] \
= all_encoder_layer[i_layer][b, i_hds11[0]:i_hds11[1],:]
return wemb_h
def get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length, num_out_layers_n=1, num_out_layers_h=1):
# get contextual output of all tokens from bert
all_encoder_layer, pooled_output, tokens, i_nlu, i_hds,\
l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx, t_to_tt_idx_hds = get_bert_output(model_bert, tokenizer, nlu_t, hds, max_seq_length)
# all_encoder_layer: BERT outputs from all layers.
# pooled_output: output of [CLS] vec.
# tokens: BERT intput tokens
# i_nlu: start and end indices of question in tokens
# i_hds: start and end indices of headers
# get the wemb
wemb_n = get_wemb_n(i_nlu, l_n, bert_config.hidden_size, bert_config.num_hidden_layers, all_encoder_layer,
num_out_layers_n)
wemb_h = get_wemb_h(i_hds, l_hpu, l_hs, bert_config.hidden_size, bert_config.num_hidden_layers, all_encoder_layer,
num_out_layers_h)
return wemb_n, wemb_h, l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx, t_to_tt_idx_hds
def prepare_input(tokenizer, input_sequence, input_schema, max_seq_length):
nlu_t = []
hds = []
nlu_t1 = input_sequence # segmented question
all_hds = input_schema.column_names_embedder_input # table name . column
nlu_tt1 = []
# print(1111111,nlu_t1,all_hds)
for (i, token) in enumerate(nlu_t1):
nlu_tt1 += tokenizer.tokenize(token)
current_hds1 = []
for hds1 in all_hds:
new_hds1 = current_hds1 + [hds1]
tokens1, segment_ids1, i_nlu1, i_hds1, t_to_tt_idx_hds1 = generate_inputs(tokenizer, nlu_tt1, new_hds1)
if len(segment_ids1) > max_seq_length:
nlu_t.append(nlu_t1)
hds.append(current_hds1)
current_hds1 = [hds1]
else:
current_hds1 = new_hds1
if len(current_hds1) > 0:
nlu_t.append(nlu_t1)
hds.append(current_hds1)
return nlu_t, hds
def prepare_input_gnn0(tokenizer, input_sequence, input_schema, max_seq_length,pad_len=12):
"""
Return: Nodes(list of tokenized db items)
Return: relations(lists of list of related columns), inner list corresponds to edge type
"""
nlu_t = []
hds = []
nlu_t1 = input_sequence # segmented question
all_hds = input_schema.column_names_embedder_input # table name . column
tables = []
tb_name = {} # index of header in node - len(nodes)
columns = {}
nodes = []
relations = [[],[],[]] # three edge types, we use tb_name.col as embedding
# print(relations)
all_columns = {}
# print(1111111,nlu_t1,all_hds)
nodes.append('*')
for i in all_hds:
# print(i.split('.'))
if i != "*" and len(i.split('.')) > 1:
header,col = i.split('.')
# if col.strip() != '*':
# print(header,col)
# first add headers
nodes.append(i)
# if not col in columns:
if not header in tables:
tables.append(header)
tb_name[header] = len(tables) -1
#columns[col]= len(nodes)-1 # add column name to columns with index in nodes as value
# take redundancy for foreign key
if col in columns: # find('id') != -1
# print('key')
relations[2].append([tb_name[header],columns[col]]) # add foreign key relation
else:
# column id
columns[col] = len(nodes) -1
# assume primary key have "id"
if col.find("id") != -1:
# print('primary')
relations[1].append([tb_name[header],columns[col]])
else:
relations[0].append([tb_name[header],columns[col]])
# for *
# nodes += tables
base = len(nodes)
nodes += tables
for i in relations:
for j in i:
j[0] += base
# tokenize nodes to feed into model
masks = []
new_schema = []
for i in range(len(nodes)):
new_schema.append(nodes[i])
# print(nodes[i])
nodes[i] = tokenizer.tokenize(nodes[i])
masks.append([1]*len(nodes[i]) + [0]*(pad_len-len(nodes[i])))
nodes[i] += ['[PAD]'] * (pad_len-len(nodes[i]))
nodes[i] = tokenizer.convert_tokens_to_ids(nodes[i])
# print(nodes[i],masks[i])
# print(relations)
# print(nodes,relations)
# for (i, token) in enumerate(nlu_t1):
# nlu_tt1 += tokenizer.tokenize(token)
# current_hds1 = []
# for hds1 in all_hds:
# new_hds1 = current_hds1 + [hds1]
# tokens1, segment_ids1, i_nlu1, i_hds1, t_to_tt_idx_hds1 = generate_inputs(tokenizer, nlu_tt1, new_hds1)
# if len(segment_ids1) > max_seq_length:
# nlu_t.append(nlu_t1)
# hds.append(current_hds1)
# current_hds1 = [hds1]
# else:
# current_hds1 = new_hds1
# if len(current_hds1) > 0:
# nlu_t.append(nlu_t1)
# hds.append(current_hds1)
return nodes,relations,masks, new_schema
def prepare_input_gnn(tokenizer, input_sequence, input_schema, max_seq_length,pad_len=12):
"""
Return: Nodes(list of tokenized db items)
Return: relations(lists of list of related columns), inner list corresponds to edge type
"""
nlu_t = []
hds = []
nlu_t1 = input_sequence # segmented question
all_hds = input_schema.column_names_surface_form # table name.column
tables = []
tb_name = {} # index of header in node - len(nodes)
columns = {}
nodes = []
foreign_idx = [ i for i,j in input_schema.table_schema['foreign_keys']]
primary_idx = [ i for i in input_schema.table_schema['primary_keys']]
foreign_key = [-1] * len(foreign_idx)
# print(input_schema.table_schema['foreign_keys'])
relations = [[],[],[]] # three edge types, we use tb_name.col as embedding
# print(relations)
all_columns = {}
# print(1111111,nlu_t1,all_hds)
nodes.append('*')
for i in all_hds:
# print(i.split('.'))
if i != "*" and len(i.split('.')) > 1:
header,col = i.split('.')
# if col.strip() != '*':
# print(header,col)
# first add headers
nodes.append(i)
# if not col in columns:
if not header in tables:
tables.append(header)
tb_name[header] = len(tables) -1
#columns[col]= len(nodes)-1 # add column name to columns with index in nodes as value
# take redundancy for foreign key
if col in columns: # find('id') != -1
# print('key')
relations[2].append([tb_name[header],columns[col]]) # add foreign key relation
# relations[0].append([tb_name[header],columns[col]])
else:
# column id
columns[col] = len(nodes) -1
# assume primary key have "id"
if col.find("id") != -1:
# print('primary')
relations[1].append([tb_name[header],columns[col]])
if not (len(nodes) - 1 in foreign_idx or len(nodes)-1 in primary_idx):
relations[0].append([tb_name[header],columns[col]])
# find table name correspond to foreign key
if len(nodes)-1 in foreign_idx:
# print(foreign_idx[0])
foreign_key[foreign_idx.index(len(nodes)-1)] = tb_name[header]
## NOTE: foreign key relation can be column to column or table to column (we choose latter one)
base = len(nodes)
nodes += tables
relations[0] = relations[0] #column
relations[1] = [[i,j] for i,j in enumerate(input_schema.table_schema['primary_keys'])]#primary
relations[2] = input_schema.table_schema['foreign_keys']#foriegn
for i,item in enumerate(foreign_key):
relations[2][i][0] = item
# nodes += tables
# print(1111111,input_schema.column_names_surface_form,relations, len(nodes),foreign_key,foreign_idx)
# exit(0)
for i in relations:
for j in i:
j[0] += base
# tokenize nodes to feed into model
masks = []
## update new schema
new_schema = input_schema.column_names_surface_form
if len(new_schema) != len(nodes):
new_schema = input_schema.column_names_surface_form + tables
# if len(new_schema) != len(nodes):
# print(new_schema,nodes, len(nodes),len(new_schema))
assert len(new_schema) ==len(nodes)
for i in range(len(nodes)):
# new_schema.append(nodes[i])
# print(nodes[i])
nodes[i] = tokenizer.tokenize(nodes[i])
# print(nodes[i])
# masks.append([1]*len(nodes[i]) + [0]*(pad_len-len(nodes[i])))
# nodes[i] += ['[PAD]'] * (pad_len-len(nodes[i]))
# nodes[i] = tokenizer.convert_tokens_to_ids(nodes[i])
# print(nodes[i],masks[i])
# print(relations)
# print(nodes,relations)
# for (i, token) in enumerate(nlu_t1):
# nlu_tt1 += tokenizer.tokenize(token)
# current_hds1 = []
# for hds1 in all_hds:
# new_hds1 = current_hds1 + [hds1]
# tokens1, segment_ids1, i_nlu1, i_hds1, t_to_tt_idx_hds1 = generate_inputs(tokenizer, nlu_tt1, new_hds1)
# if len(segment_ids1) > max_seq_length:
# nlu_t.append(nlu_t1)
# hds.append(current_hds1)
# current_hds1 = [hds1]
# else:
# current_hds1 = new_hds1
# if len(current_hds1) > 0:
# nlu_t.append(nlu_t1)
# hds.append(current_hds1)
return nodes,relations, new_schema
def prepare_input_gnn2(schema,tokenizer):
nodes = schema.nodes
masks = []
new_schema = []
for i in range(len(nodes)):
new_schema.append(nodes[i])
# print(nodes[i])
nodes[i] = tokenizer.tokenize(nodes[i])
masks.append([1]*len(nodes[i]) + [0]*(pad_len-len(nodes[i])))
nodes[i] += ['[PAD]'] * (pad_len-len(nodes[i]))
nodes[i] = tokenizer.convert_tokens_to_ids(nodes[i])
return nodes, masks, new_schema
def prepare_input_v2(tokenizer, input_sequence, input_schema):
nlu_t = []
hds = []
max_seq_length = 0
nlu_t1 = input_sequence
all_hds = input_schema.column_names_embedder_input
nlu_tt1 = []
for (i, token) in enumerate(nlu_t1):
nlu_tt1 += tokenizer.tokenize(token)
current_hds1 = []
current_table = ''
for hds1 in all_hds:
hds1_table = hds1.split('.')[0].strip()
if hds1_table == current_table:
current_hds1.append(hds1)
else:
tokens1, segment_ids1, i_nlu1, i_hds1, t_to_tt_idx_hds1 = generate_inputs(tokenizer, nlu_tt1, current_hds1)
max_seq_length = max(max_seq_length, len(segment_ids1))
nlu_t.append(nlu_t1)
hds.append(current_hds1)
current_hds1 = [hds1]
current_table = hds1_table
if len(current_hds1) > 0:
tokens1, segment_ids1, i_nlu1, i_hds1, t_to_tt_idx_hds1 = generate_inputs(tokenizer, nlu_tt1, current_hds1)
max_seq_length = max(max_seq_length, len(segment_ids1))
nlu_t.append(nlu_t1)
hds.append(current_hds1)
return nlu_t, hds, max_seq_length
def get_gnn_encoding(tokenizer,model_bert,input_sequence,input_schema,gnn,gnn_encoder1,embedder=None,bert_input_version='v1',num_out_layers_h=1, max_seq_length=512,num_out_layers_n=1):
# only get graph encoding without input_sequence dependency
nodes=relations=new_schema=None
if bert_input_version == 'v1':
nodes, relations, new_schema = prepare_input_gnn( tokenizer, input_sequence, input_schema, max_seq_length)
elif bert_input_version == 'v2':
raise("not inplemented")
nlu_t, hds, max_seq_length = prepare_input_v2(tokenizer, input_sequence, input_schema)
# relations = input_schema.relations
# TODO: feed into gnn and return embedding
# print(relations)
# print(2222222,type(input_nodes),input_nodes)
masks = None
input_nodes = nodes
all_encoder_layer = None
if not embedder:
input_nodes =[ torch.tensor([i], dtype=torch.long).to(device) for i in nodes]
masks = torch.tensor(masks, dtype=torch.long).to(device)
with torch.no_grad():
all_encoder_layer= torch.cat([torch.cat(model_bert(i,j)[0],1) for i,j in zip(input_nodes,masks)],0)
all_encoder_layer = torch.cat([gnn_encoder1(i.unsqueeze(0))[0][1][0].unsqueeze(0) for i in all_encoder_layer],0)
else:
all_encoder_layer = torch.cat([torch.cat([embedder(token).unsqueeze(0) for token in i],0).mean(0).unsqueeze(0) for i in input_nodes],0)
if len(nodes) <=1:
print(input_schema.column_names_embedder_input)
print(input_schema.num_col)
print(input_sequence)
assert len(nodes) > 1
assert len(relations[0]) > 0
# print(123123123,all_encoder_layer[0][0].size(),len(all_encoder_layer[0]),len(all_encoder_layer),len(all_encoder_layer[3]),len(all_encoder_layer[10]))
# print(123123, all_encoder_layer.size(),type(all_encoder_layer))
# all_encoder_layer = all_encoder_layer.permute(2,1,0)
# print(all_encoder_layer.size())
# print([gnn_encoder1(i.unsqueeze(0))[0][1][0] for i in all_encoder_layer][0])
# all_encoder_layer = torch.cat([gnn_encoder1(i.unsqueeze(0))[0][1][0].unsqueeze(0) for i in all_encoder_layer],0)
# all_encoder_layer = [gnn_encoder1(i.squeeze()) for i in all_encoder_layer]
# all_encoder_layer = [gnn_encoder1(torch.cat(i,1))[0][1] for i in all_encoder_layer] # get hidden layer output as representation for each schema items
relations = [torch.tensor(i, dtype=torch.long).to(device) for i in relations]
# print(333333,relations, all_encoder_layer.size())
output = [i for i in gnn(all_encoder_layer,relations)]
# print(output)
# wemb_n, wemb_h, l_n, l_hpu, l_hs, nlu_tt, t_to_tt_idx, tt_to_t_idx, t_to_tt_idx_hds = get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length, num_out_layers_n, num_out_layers_h)
# t_to_tt_idx = t_to_tt_idx[0]
# assert len(t_to_tt_idx) == len(input_sequence)
# assert sum(len(t_to_tt_idx_hds1) for t_to_tt_idx_hds1 in t_to_tt_idx_hds) == len(input_schema.column_names_embedder_input)
# assert list(wemb_h.size())[0] == len(input_schema.column_names_embedder_input)
# utterance_states = []
# for i in range(len(t_to_tt_idx)):
# start = t_to_tt_idx[i]
# if i == len(t_to_tt_idx)-1:
# end = l_n[0]
# else:
# end = t_to_tt_idx[i+1]
# utterance_states.append(torch.mean(wemb_n[:,start:end,:], dim=[0,1]))
# assert len(utterance_states) == len(input_sequence)
# schema_token_states = []
# cnt = -1
# for t_to_tt_idx_hds1 in t_to_tt_idx_hds:
# for t_to_tt_idx_hds11 in t_to_tt_idx_hds1:
# cnt += 1
# schema_token_states1 = []
# for i in range(len(t_to_tt_idx_hds11)):
# start = t_to_tt_idx_hds11[i]
# if i == len(t_to_tt_idx_hds11)-1:
# end = l_hpu[cnt]
# else:
# end = t_to_tt_idx_hds11[i+1]
# schema_token_states1.append(torch.mean(wemb_h[cnt,start:end,:], dim=0))
# assert len(schema_token_states1) == len(input_schema.column_names_embedder_input[cnt].split())
# schema_token_states.append(schema_token_states1)
# assert len(schema_token_states) == len(input_schema.column_names_embedder_input)
return output,new_schema
def get_bert_encoding(bert_config, model_bert, tokenizer, input_sequence, input_schema, bert_input_version='v1', gnn=None ,use_gnn=True, max_seq_length=512, num_out_layers_n=1, num_out_layers_h=1):
# NOTE: add gnn above final output layer
#add input schema table
# print(11111111,input_schema.column_names_embedder_input,input_schema.column_names_surface_form)
relations = None
if use_gnn:
if not (input_schema.table_schema['table_names'][0] in input_schema.column_names_embedder_input):
input_schema.column_names_embedder_input += input_schema.table_schema['table_names']
input_schema.num_col += len(input_schema.table_schema['table_names'])
input_schema.column_names_surface_form += [i.lower() for i in input_schema.table_schema['table_names_original']]
nodes, relations, new_schema = prepare_input_gnn( tokenizer, input_sequence, input_schema, max_seq_length)
if bert_input_version == 'v1':
nlu_t, hds = prepare_input(tokenizer, input_sequence, input_schema, max_seq_length)
elif bert_input_version == 'v2':
nlu_t, hds, max_seq_length = prepare_input_v2(tokenizer, input_sequence, input_schema)
wemb_n, wemb_h, l_n, l_hpu, l_hs, nlu_tt, t_to_tt_idx, tt_to_t_idx, t_to_tt_idx_hds = get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length, num_out_layers_n, num_out_layers_h)
t_to_tt_idx = t_to_tt_idx[0]
assert len(t_to_tt_idx) == len(input_sequence)
assert sum(len(t_to_tt_idx_hds1) for t_to_tt_idx_hds1 in t_to_tt_idx_hds) == len(input_schema.column_names_embedder_input)
assert list(wemb_h.size())[0] == len(input_schema.column_names_embedder_input)
# print(22222222,len(input_schema.column_names_embedder_input),input_schema.column_names_embedder_input,input_schema.column_names_surface_form)
utterance_states = []
for i in range(len(t_to_tt_idx)):
start = t_to_tt_idx[i]
if i == len(t_to_tt_idx)-1:
end = l_n[0]
else:
end = t_to_tt_idx[i+1]
utterance_states.append(torch.mean(wemb_n[:,start:end,:], dim=[0,1]))
assert len(utterance_states) == len(input_sequence)
schema_token_states = []
cnt = -1
for t_to_tt_idx_hds1 in t_to_tt_idx_hds:
for t_to_tt_idx_hds11 in t_to_tt_idx_hds1:
cnt += 1
schema_token_states1 = []
for i in range(len(t_to_tt_idx_hds11)):
start = t_to_tt_idx_hds11[i]
if i == len(t_to_tt_idx_hds11)-1:
end = l_hpu[cnt]
else:
end = t_to_tt_idx_hds11[i+1]
schema_token_states1.append(torch.mean(wemb_h[cnt,start:end,:], dim=0))
assert len(schema_token_states1) == len(input_schema.column_names_embedder_input[cnt].split())
schema_token_states.append(schema_token_states1)
assert len(schema_token_states) == len(input_schema.column_names_embedder_input)
if use_gnn:
return utterance_states, schema_token_states, relations
else:
return utterance_states, schema_token_states | 2.109375 | 2 |
tests/test_fstr.py | rmorshea/fstr | 0 | 12791527 | import pytest
from sys import version_info
import fstr
def test_basic():
template = fstr("{x} + {y} = {x + y}", x=1)
assert template.format(y=2) == "1 + 2 = 3"
assert template.format(y=3) == "1 + 3 = 4"
def test_basic_format_language():
template = fstr("{x!r} + {y!r} = {x + y!r}", x="a")
assert template.format(y="b") == "'a' + 'b' = 'ab'"
assert template.format(y="c") == "'a' + 'c' = 'ac'"
_A_GLOBAL = 1
def test_simple_fstr_evaluate():
a_local = 2 # noqa: F841
assert fstr("{_A_GLOBAL} {a_local}").evaluate() == "1 2"
def test_format_language_with_inner_fstr():
template = fstr("{x:{width}}")
assert template.format(x=10, width=3) == " 10"
assert template.format(x=3, width=4) == " 3"
template = fstr("{x:{width}.{precision}}")
assert template.format(x=1.2345, width=4, precision=2) == " 1.2"
def test_dict():
d = {'"': "double-quote", "'": "single-quote", "foo": "bar"}
assert fstr("""{d["'"]}""").format(d=d) == "single-quote"
assert fstr("""{d['"']}""").format(d=d) == "double-quote"
assert fstr('{d["foo"]}').format(d=d) == "bar"
assert fstr("{d['foo']}").format(d=d) == "bar"
def test_format_with_function():
def add(x, y):
return x + y
template = fstr("{add(x, y)}", add=add)
assert template.format(x=1, y=2) == "3"
def test_even_double_brace_replacement():
template = fstr("{{}}")
assert template.format() == "{}"
def test_odd_double_brace_replacement():
template = fstr("{{{x}}}")
assert template.format(x=1) == "{1}"
def test_trailing_and_leading_space():
assert fstr("{ 1 + 2}").format() == "3"
assert fstr("{1 + 2 }").format() == "3"
assert fstr("{ 1 + 2 }").format() == "3"
def dict_inside_braces_with_padding():
template = fstr("{ {x: y} }", x="a")
assert template.format(y=1) == "{'a': 1}"
def test_hash_in_string():
# These aren't comments, since they're in strings.
d = {"#": "hash"}
assert fstr("{'#'}").format() == "#"
assert fstr("{d['#']}").format(d=d) == "hash"
@pytest.mark.parametrize("brace", "])}")
def test_unclosed_braces(brace):
with pytest.raises(SyntaxError):
fstr("{%s}" % brace).format()
def test_many_expressions():
context = {"x": "X", "width": 1}
def make_template(n, extra=""):
return fstr(("{x} " + extra) * n)
for n in range(250, 260):
make_template(n).format(**context)
# Test around 256.
for i in range(250, 260):
actual = make_template(i).format(**context)
expected = (context["x"] + " ") * i
assert actual == expected
actual = make_template(250, "{x:{width}} ").format(**context)
expected = (context["x"] + " ") * 500
assert actual == expected
# Test lots of expressions and constants.
assert fstr("{1} {'x'} {'y'} " * 1000).format() == "1 x y " * 1000
_format_specifier_width_precision_templates = [
"result: {value:{width}.{precision}}",
"result: {value:{width!r}.{precision}}",
"result: {value:{width:0}.{precision:1}}",
"result: {value:{1}{0:0}.{precision:1}}",
"result: {value:{ 1}{ 0:0}.{ precision:1}}",
]
_format_specifier_expression_expecteds = [
"result: 12.35",
"result: 12.35",
"result: 12.35",
"result: 12.35",
"result: 12.35",
" 0xa",
" 0xa",
" -0xa",
" -0xa",
" 0xa",
]
@pytest.mark.parametrize("template", _format_specifier_width_precision_templates)
def test_format_width_precision_specifier_expressions(template):
context = {"width": 10, "precision": 4, "value": 12.34567}
assert fstr(template).format(**context) == "result: 12.35"
_format_hex_specifier_templates = [
(10, "{value:#{1}0x}"),
(10, "{value:{'#'}1{0}{'x'}}"),
(-10, "{value:-{'#'}1{0}x}"),
(-10, "{value:{'-'}#{1}0{'x'}}"),
(10, "{value:#{3 != {4:5} and width}x}"),
]
@pytest.mark.parametrize("value, template", _format_hex_specifier_templates)
def test_format_hex_specifier_expressions(value, template):
expected = " -0xa" if value < 0 else " 0xa"
assert fstr(template).format(value=value, width=10) == expected
_invalid_format_specifier_templates = ["{'s'!r{':10'}}", "{4:{/5}}", "{'s'!{'r'}}"]
@pytest.mark.parametrize("template", _invalid_format_specifier_templates)
def test_invalid_format_specifier_expressions(template):
with pytest.raises(SyntaxError):
fstr(template).format()
def test_side_effect_order():
class X:
def __init__(self):
self.i = 0
def __format__(self, spec):
self.i += 1
return str(self.i)
fstr("{x} {x}").format(x=X()) == "1 2"
_bad_missing_expressions = [
"{}",
"{ '" " {} ",
"{!r}",
"{ !r}",
"{10:{ }}",
" { } ",
# The Python parser ignores also the following
# whitespace characters in additional to a space.
"{\t\f\r\n}",
# Catch the empty expression before the
# invalid conversion.
"{!x}",
"{ !xr}",
"{!x:}",
"{!x:a}",
"{ !xr:}",
"{ !xr:a}",
"{!}",
"{:}",
# We find the empty expression before the
# missing closing brace.
"{!",
"{!s:",
"{:",
"{:x",
"{\xa0}",
]
@pytest.mark.parametrize("template", _bad_missing_expressions)
def test_missing_expression(template):
with pytest.raises(SyntaxError):
fstr(template).format()
_bad_parens_in_expressions = ["{,}", "{,}", "{3)+(4}", "{\n}"]
@pytest.mark.parametrize("template", _bad_parens_in_expressions)
def test_bad_parens_in_expressions(template):
with pytest.raises(SyntaxError):
fstr(template).format()
_backlashes_in_string_part = [
("\t", "\t"),
(r"\t", "\\t"),
("{2}\t", "2\t"),
("{2}\t{3}", "2\t3"),
("\t{3}", "\t3"),
("\u0394", "\u0394"),
(r"\u0394", "\\u0394"),
(r"\u0394", "\\u0394"),
("{2}\u0394", "2\u0394"),
("{2}\u0394{3}", "2\u03943"),
("\u0394{3}", "\u03943"),
("\x20", " "),
(r"\x20", "\\x20"),
(r"\x20", "\\x20"),
("{2}\x20", "2 "),
("{2}\x20{3}", "2 3"),
("\x20{3}", " 3"),
("2\x20", "2 "),
("2\x203", "2 3"),
("\x203", " 3"),
("\\{6*7}", "\\42"),
(r"\{6*7}", "\\42"),
]
if version_info >= (3, 0):
_backlashes_in_string_part.extend(
[
("\U00000394", "\u0394"),
(r"\U00000394", "\\U00000394"),
(r"\U00000394", "\\U00000394"),
("{2}\U00000394", "2\u0394"),
("{2}\U00000394{3}", "2\u03943"),
("\U00000394{3}", "\u03943"),
("\N{GREEK CAPITAL LETTER DELTA}", "\u0394"),
("{2}\N{GREEK CAPITAL LETTER DELTA}", "2\u0394"),
("{2}\N{GREEK CAPITAL LETTER DELTA}{3}", "2\u03943"),
("\N{GREEK CAPITAL LETTER DELTA}{3}", "\u03943"),
("2\N{GREEK CAPITAL LETTER DELTA}", "2\u0394"),
("2\N{GREEK CAPITAL LETTER DELTA}3", "2\u03943"),
("\N{GREEK CAPITAL LETTER DELTA}3", "\u03943"),
]
)
@pytest.mark.parametrize("template, expected", _backlashes_in_string_part)
def test_backslashes_in_string_part(template, expected):
assert fstr(template).format() == expected
_backslashes_in_expression = [r"{\}", r"{\'a\'}", r"{\t3}", "{\n}"]
@pytest.mark.parametrize("template", _backslashes_in_expression)
def test_no_backslashes_in_expression_part(template):
with pytest.raises(SyntaxError):
fstr(template).format()
def test_newlines_in_expressions():
assert fstr("{0}").format() == "0"
assert (
fstr(
"""{3+
4}"""
).format()
== "7" # noqa: W503
)
_empty_format_specifiers = [
("{x}", "test"),
("{x:}", "test"),
("{x!s:}", "test"),
("{x!r:}", "'test'"),
]
@pytest.mark.parametrize("template, expected", _empty_format_specifiers)
def test_empty_format_specifier(template, expected):
assert fstr(template).format(x="test") == expected
_bad_mismatched_braces = [
"{{}",
"{{}}}",
"}",
"x}",
"x}x",
"{3:}>10}",
"{3:}}>10}",
"{3:{{>10}",
"{3",
"{3!",
"{3:",
"{3!s",
"{3!s:",
"{3!s:3",
"x{",
"x{x",
"{x",
"{3:s",
"{{{",
"{{}}{",
"{",
]
@pytest.mark.parametrize("template", _bad_mismatched_braces)
def test_bad_mismatched_braces(template):
with pytest.raises(SyntaxError):
fstr(template).format()
_ok_mismatched_braces = [("{'{'}", "{"), ("{'}'}", "}")]
@pytest.mark.parametrize("template, expected", _ok_mismatched_braces)
def test_ok_mistmatched_braces(template, expected):
assert fstr(template).format() == expected
_ok_lambdas = [
("{(lambda y:x*y)('8')!r}", "'88888'"),
("{(lambda y:x*y)('8')!r:10}", "'88888' "),
("{(lambda y:x*y)('8'):10}", "88888 "),
]
@pytest.mark.parametrize("template, expected", _ok_lambdas)
def test_lambda(template, expected):
assert fstr(template, x=5).format() == expected
_triple_quoted_strings = [
("{'''x'''}", "x"),
("{'''eric's'''}", "eric's"),
('{"x" """eric"s""" "y"}', 'xeric"sy'),
('{"x" """eric"s"""}', 'xeric"s'),
('{"""eric"s""" "y"}', 'eric"sy'),
('{"""x""" """eric"s""" "y"}', 'xeric"sy'),
('{"""x""" """eric"s""" """y"""}', 'xeric"sy'),
('{r"""x""" """eric"s""" """y"""}', 'xeric"sy'),
]
@pytest.mark.parametrize("template, expected", _triple_quoted_strings)
def test_expressions_with_triple_quoted_strings(template, expected):
assert fstr(template).format() == expected
def test_missing_variable():
with pytest.raises(NameError):
fstr("v:{value}").format()
def test_missing_format_spec():
class Obj:
def __format__(self, spec):
if not spec:
return "*"
return spec
assert fstr("{Obj():x}").format(Obj=Obj) == "x"
assert fstr("{Obj()}").format(Obj=Obj) == "*"
assert fstr("{Obj():}").format(Obj=Obj) == "*"
assert fstr("{3:}").format() == "3"
assert fstr("{3!s:}").format() == "3"
def test_call():
def foo(x):
return "x=" + str(x)
assert fstr("{foo(10)}").format(foo=foo) == "x=10"
def test_leading_trailing_spaces():
assert fstr("{ 3}").format() == "3"
assert fstr("{ 3}").format() == "3"
assert fstr("{3 }").format() == "3"
assert fstr("{3 }").format() == "3"
assert fstr("expr={ {x: y for x, y in [(1, 2), ]} }").format() == "expr={1: 2}"
assert fstr("expr={ {x: y for x, y in [(1, 2), ]}}").format() == "expr={1: 2}"
def test_not_equal():
# There's a special test for this because there's a special
# case in the f-string parser to look for != as not ending an
# expression. Normally it would, while looking for !s or !r.
assert fstr("{3!=4}").format() == "True"
assert fstr("{3!=4:}").format() == "True"
assert fstr("{3!=4!s}").format() == "True"
assert fstr("{3!=4!s:.3}").format() == "Tru"
def test_conversions():
assert fstr("{3.14:10.10}").format() == " 3.14"
assert fstr("{3.14!s:10.10}").format() == "3.14 "
assert fstr("{3.14!r:10.10}").format() == "3.14 "
if version_info >= (3, 0):
assert fstr("{3.14!a:10.10}").format() == "3.14 "
assert fstr('{"a"}').format() == "a"
assert fstr('{"a"!r}').format() == "'a'"
if version_info >= (3, 0):
assert fstr('{"a"!a}').format() == "'a'"
# Not a conversion.
assert fstr('{"a!r"}').format() == "a!r"
# Not a conversion, but show that ! is allowed in a format spec.
assert fstr("{3.14:!<10.10}").format() == "3.14!!!!!!"
bad_conversions = [
"{3!g}"
"{3!A}"
"{3!3}"
"{3!G}"
"{3!!}"
"{3!:}"
"{3! s}" # no space before conversion char
"{x!s{y}}",
"{3!ss}",
"{3!ss:}",
"{3!ss:s}",
]
for bad in bad_conversions:
with pytest.raises(SyntaxError):
fstr(bad).format()
_invalid_expressions = ["{a[4)}", "{a(4]}"]
@pytest.mark.parametrize("invalid", _invalid_expressions)
def test_invalid_expressions(invalid):
with pytest.raises(SyntaxError):
fstr(invalid).format()
if version_info < (3, 0):
_causes_errors = [("{1000:j}", SyntaxError)]
elif version_info < (3, 6):
_causes_errors = [
("{(lambda: 0):x}", TypeError),
("{(0,):x}", TypeError),
("{1000:j}", SyntaxError),
]
else:
_causes_errors = [
("{(lambda: 0):x}", TypeError),
("{(0,):x}", TypeError),
("{1000:j}", ValueError),
]
@pytest.mark.parametrize("bad, etype", _causes_errors)
def test_errors(bad, etype):
with pytest.raises(etype):
fstr(bad).format()
| 2.625 | 3 |
formidable/__init__.py | peopledoc/django-formidable | 11 | 12791528 | <reponame>peopledoc/django-formidable
from .json_migrations import latest_version
default_app_config = 'formidable.app.FormidableConfig'
version = '7.2.0.dev0'
json_version = latest_version
| 1.15625 | 1 |
1_fetch.py | doofmars/bgg-quartets | 0 | 12791529 | <reponame>doofmars/bgg-quartets
# coding=utf-8
import json
import os
import configparser
import requests as req
# noinspection PyUnresolvedReferences
from bs4 import BeautifulSoup, Tag
config = configparser.ConfigParser()
def load_data(url, file_name):
"""
Load data either from web or cache if already present
:param url: url to load
:param file_name: name of cached file
:return: soup to parse
"""
if not os.path.exists(config['fetch']['CACHE_DIRECTORY']):
os.mkdir(config['fetch']['CACHE_DIRECTORY'])
collection_file = os.path.join(config['fetch']['CACHE_DIRECTORY'], file_name)
if not os.path.exists(collection_file):
print(f'Reading {file_name} page from web')
response = req.get(url)
with open(collection_file, 'w', encoding='utf-8') as fp:
fp.write(response.text)
print(f'{file_name} saved to cache folder')
html = response.text
else:
print(f'Reading {file_name} from cache')
with open(collection_file, 'r', encoding='utf-8') as fp:
html = fp.read()
if file_name.endswith('html'):
return BeautifulSoup(html, 'html.parser')
if file_name.endswith('xml'):
return BeautifulSoup(html, 'lxml')
def get_collection():
"""
Get the collection and convert to json
"""
# Find table containing collection
collection_file_key = config['fetch']['COLLECTION_FILE_KEY']
collection_table = load_data(config['fetch']['URL'], file_name=f'{collection_file_key}.html').find(id='collectionitems')
collection = list()
# Iterate over collection table, store results to dict
first = True
for collection_row in collection_table.find_all('tr'):
# Skip header, we don't care about this
if first:
first = False
continue
# Append parsed collection row to collection list for later dumping
collection.append(parse_collection_row(collection_row))
print(f'Parsed {len(collection)} items, writing JSON file')
print(f'\nCollecting game data:')
for game in collection:
game_id = game.get("id")
game_data = load_data(f'https://boardgamegeek.com/xmlapi/boardgame/{game_id}?stats=1', f'{game_id}.xml')
game['boardgamecategory'] = [category.text for category in game_data.find_all('boardgamecategory')]
game['boardgamesubdomain'] = [domain.text for domain in game_data.find_all('boardgamesubdomain')]
game['image'] = tex_or_none(game_data.find('image')).strip()
game['minplayers'] = tex_or_none(game_data.find('minplayers'))
game['maxplayers'] = tex_or_none(game_data.find('maxplayers'))
game['playingtime'] = tex_or_none(game_data.find('playingtime'))
suggested_numplayers = parse_poll(game_data.find('poll', attrs={"name": "suggested_numplayers"}))
game['best_minplayers'] = map_poll(suggested_numplayers, is_best)[0]
game['best_maxpleyers'] = map_poll(suggested_numplayers, is_best)[-1]
game['best_numplayers'] = map_poll(suggested_numplayers, is_best)
game['recommended_numplayers'] = map_poll(suggested_numplayers, is_recommended)
# Finally dump data as JSON
print(f'\nWriting result to JSON:')
with open(os.path.join(config['fetch']['RESULT_DIRECTORY'], f'{collection_file_key}.json'), 'w', encoding='UTF-8') as fp:
json.dump(collection, fp, indent=2)
print(f'JSON file written to cache folder')
def parse_collection_row(collection_row):
"""
Parse a single collection table row into a dict
:param collection_row: the row to parse
:return: a dictionary containing row values
"""
collection_item = dict()
collection_item['name'] = collection_row.find('a', class_='primary').text
version = collection_row.find('div', class_='geekitem_name')
if version is not None:
collection_item['version'] = version.text.strip()
year = collection_row.find('span', class_='smallerfont')
if year is not None:
collection_item['year'] = year.text[1:-1]
collection_item['id'] = collection_row.find('a', class_='primary')['href'].split('/')[2]
collection_item['user_rating'] = tex_or_none(collection_row.find('div', class_='ratingtext'))
geek_rating = collection_row.find('td', class_='collection_bggrating').text.strip()
if geek_rating == 'N/A':
collection_item['geek_rating'] = None
else:
collection_item['geek_rating'] = geek_rating
collection_item['status'] = collection_row.find('td', class_='collection_status').text.strip()
plays = collection_row.find('td', class_='collection_plays')
if plays.a is None:
collection_item['plays'] = 0
else:
collection_item['plays'] = int(plays.a.text)
return collection_item
def map_poll(poll, check):
"""
Map the voting poll results dict to a list containing only voting options that pass the check
:param poll: The voting poll consisting of vote topic with recommendations by the community
:param check: Checking function to validate against
:return: None if nothing is recommended or a list of recommended player numbers.
"""
try:
recommended = [vote_option for (vote_option, votes) in poll.items() if check(votes)]
except KeyError:
return [None]
if len(recommended) == 0:
return [None]
else:
return recommended
def is_best(votes):
return int(votes['Best']) >= int(votes['Recommended']) + int(votes['Not Recommended'])
def is_recommended(votes):
return int(votes['Best']) + int(votes['Recommended']) >= int(votes['Not Recommended'])
def tex_or_none(tag):
if tag is None:
return None
else:
return tag.text
def parse_poll(poll_data):
if poll_data is None:
return None
else:
poll = dict()
results = poll_data.find_all('results')
for result in results:
poll[result['numplayers']] = {
str(child['value']): child['numvotes']
for child in result.children
if isinstance(child, Tag)
}
return poll
if __name__ == '__main__':
config.read("config.ini")
get_collection()
| 2.6875 | 3 |
kanbanflow_prj_selector/boards/board.py | igorbasko01/kanbanflow-prj-selector | 0 | 12791530 | <gh_stars>0
import requests
import logging
from ..constants import KFLOW_BASE_URL
from .column import Column
from .swimlane import Swimlane
from .task import Task
class Board(object):
def __init__(self, token):
self.log = logging.getLogger()
self.FULL_BOARD_URL = f"{KFLOW_BASE_URL}/board"
self.FULL_TASKS_URL = f"{KFLOW_BASE_URL}/tasks"
(self.id, self.name, self.columns, self.swimlanes) = self.parse_board(self.fetch_board_json(token))
tasks_by_column = [self.fetch_tasks_by_column(column, self.name, token) for column in self.columns]
self.tasks = self.flatten_tasks(tasks_by_column)
def fetch_board_json(self, token):
self.log.info("Pulling token: %s", token)
resp = requests.get(f"{self.FULL_BOARD_URL}?apiToken={token}")
self.log.info("Status code: %s", resp.status_code)
return resp.json()
def parse_board(self, board_dict):
columns = [Column(col_dict) for col_dict in board_dict["columns"]]
swimlanes = [Swimlane(lane_dict) for lane_dict in board_dict["swimlanes"]]
return board_dict["_id"], board_dict['name'], columns, swimlanes
def flatten_tasks(self, lists_of_tasks):
return [task for sublist in lists_of_tasks for task in sublist]
def fetch_tasks_by_column(self, column, board_name, token):
self.log.info("Pulling tasks for %s column in %s board", column.name, board_name)
def fetch_tasks(column_id, next_task=None):
base_url = f"{self.FULL_TASKS_URL}?apiToken={token}&columnId={column_id}"
final_url = f"{base_url}&startTaskId={next_task}" if next_task else f"{base_url}"
resp = requests.get(final_url)
self.log.info("Status code: %s", resp.status_code)
resp_dict = resp.json()
tasks = [Task(t_dict) for t_dict in resp_dict[0]["tasks"]]
if resp_dict[0].get("tasksLimited"):
tasks.extend(fetch_tasks(column_id, resp_dict[0]["nextTaskId"]))
return tasks
return fetch_tasks(column.uniqueId)
def get_spent_time(self):
return sum([task.spent for task in self.tasks])
def __str__(self):
return f"{{ Id: {self.id}. Name: {self.name}. Columns: {self.columns}. Swimlanes: {self.swimlanes}. Tasks: {self.tasks} }}"
def __repr__(self):
return str(self)
| 2.78125 | 3 |
app/waterQual/DGSA/period.py | fkwai/geolearn | 0 | 12791531 | <gh_stars>0
from hydroDL import kPath
from hydroDL.app import waterQuality, DGSA
from hydroDL.data import gageII, usgs, gridMET
from hydroDL.master import basins
from hydroDL.post import axplot, figplot
import matplotlib.pyplot as plt
import importlib
from astropy.timeseries import LombScargle
import pandas as pd
import numpy as np
import os
import time
wqData = waterQuality.DataModelWQ('Silica64')
siteNoLst = wqData.siteNoLst
dictP = dict()
for k, siteNo in enumerate(siteNoLst):
print(siteNo)
dfObs = waterQuality.readSiteY(siteNo, ['00955'])
dfObs = waterQuality.readSiteY(siteNo, ['00955'])
# rm outlier
df = dfObs[dfObs['00955'].notna().values]
y = df['00955'].values
yV = y[y < np.percentile(y, 99)]
yV = yV[yV > np.percentile(y, 1)]
ul = np.mean(yV)+np.std(yV)*5
dfObs[dfObs['00955'] > ul] = np.nan
# fourier
df = dfObs[dfObs.notna().values]
tt = dfObs.index.values
xx = (tt.astype('datetime64[D]') -
np.datetime64('1979-01-01')).astype(np.float)
t = df.index.values
x = (t.astype('datetime64[D]') -
np.datetime64('1979-01-01')).astype(np.float)
y = df['00955'].values
y = y-np.nanmean(y)
nt = len(xx)
freq = np.fft.fftfreq(nt)[1:]
ls = LombScargle(x, y)
power = ls.power(freq)
prob = ls.false_alarm_probability(power)
ind = np.where(prob < 0.05)[0]
pd = np.unique(np.abs((1/freq[ind]).astype(int)))
dictP[siteNo] = pd.tolist()
pLst = sum(list(dictP.values()), [])
pu, pc = np.unique(np.array(pLst), return_counts=True)
temp = np.stack([pu, pc]).transpose()
rMat = np.zeros([len(siteNoLst), 3])
for k, siteNo in enumerate(siteNoLst):
temp = dictP[siteNo]
if 6 in temp or 7 in temp:
rMat[k, 0] = 1
if 182 in temp:
rMat[k, 1] = 1
if 365 in temp:
rMat[k, 2] = 1
# plot map
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
figM, axM = plt.subplots(3, 1, figsize=(8, 6))
for k in range(3):
mm = axplot.mapPoint(axM[k], lat, lon, rMat[:, k], s=12)
axM[0].set_title('weekly signal')
axM[1].set_title('half yearly signal')
axM[2].set_title('yearly signal')
figM.show()
dfG = gageII.readData(varLst=gageII.varLst, siteNoLst=siteNoLst)
dfG = gageII.updateCode(dfG)
pMat = dfG.values
dfS = DGSA.DGSA_light(
pMat, rMat, ParametersNames=dfG.columns.tolist(), n_clsters=3)
ax = dfS.sort_values(by=0).plot.barh()
plt.show()
dfSP = dfS.sort_values(by=0)
fig, ax = plt.subplots(1, 1)
x = range(len(dfSP))
cLst = list()
for b in (dfSP[0] > 1).tolist():
cLst.append('r') if b is True else cLst.append('b')
ax.barh(x, dfSP[0].values, color=cLst)
ax.set_yticks(x)
ax.set_yticklabels(dfSP.index.tolist())
plt.tight_layout()
fig.show()
| 2 | 2 |
tools/builder.py | dp92987/nginx-amplify-agent | 308 | 12791532 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
from builders import deb, rpm, amazon
from builders.util import shell_call
__author__ = "<NAME>"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
if __name__ == '__main__':
package = 'nginx-amplify-agent' if len(sys.argv) == 1 else sys.argv[1]
if os.path.isfile('/etc/debian_version'):
deb.build(package=package)
elif os.path.isfile('/etc/redhat-release'):
rpm.build(package=package)
else:
os_release = shell_call('cat /etc/os-release', important=False)
if 'amazon linux' in os_release.lower():
amazon.build(package=package)
else:
print("sorry, it will be done later\n")
| 2.234375 | 2 |
socket_server.py | afinello/simple-echo-server | 0 | 12791533 |
import socket, select
import signal
class SimpleSignalHandler:
should_exit = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.should_exit = True
class Client:
addr = None
def __init__(self, addr):
self.addr = addr
class Server:
active_sockets = []
clients_info = {}
is_running = False
def __init__(self, host, port):
self.host = host
self.port = port
def start(self, max_clients=10):
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind((self.host, self.port))
self.server_socket.listen(max_clients)
self.active_sockets.append(self.server_socket)
self.is_running = True
def shutdown(self):
for sock in self.active_sockets:
sock.close()
# clear all connections list
self.active_sockets[:] = []
self.clients_info.clear()
# close listening socket
self.server_socket.close()
self.is_running = False
def disconnect_client(self, sockfd):
sockfd.close()
self.active_sockets.remove(sockfd)
client = self.clients_info.pop(sockfd)
print "Client (%s, %s) is offline" % client.addr
def select(self, timeout=1):
try:
# get the list of sockets which are ready to be read
read_sockets,write_sockets,error_sockets = select.select(self.active_sockets,[] , self.active_sockets, timeout)
for sock in read_sockets:
# new connection
if sock == self.server_socket:
# Handle the case in which there is a new connection recieved through server_socket
sockfd, addr = self.server_socket.accept()
self.active_sockets.append(sockfd)
self.clients_info[sockfd] = Client(addr)
print "Client (%s, %s) connected" % addr
# got message from a client
else:
# in Windows, when a TCP program closes abruptly,
# a "Connection reset by peer" exception will be thrown
try:
data = sock.recv(RECV_BUFFER)
# if client socket has been closed - the received buffer will be empty
if len(data) > 0:
# echo back the client message
sock.send(data)
else:
# client disconnected
self.disconnect_client(sock)
# client disconnected (Windows)
except:
self.disconnect_client(sock)
continue
for sock in error_sockets:
self.disconnect_client(sock)
except (select.error, KeyboardInterrupt) as e:
self.shutdown()
if __name__ == "__main__":
RECV_BUFFER = 1024 # buffer size
PORT = 8000
MAX_CLIENTS = 10
signal_handler = SimpleSignalHandler()
server = Server("0.0.0.0", PORT)
server.start(MAX_CLIENTS)
print "Server started on port " + str(PORT)
timeout = 1
while server.is_running:
server.select(timeout)
if signal_handler.should_exit:
server.shutdown()
print "Server exited"
| 2.9375 | 3 |
restApi/product/migrations/0002_auto_20201219_1633.py | rtx-abir/ecom | 0 | 12791534 | # Generated by Django 3.0.8 on 2020-12-19 21:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('category', '0001_initial'),
('product', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='category.Category'),
),
migrations.AlterField(
model_name='product',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='images/'),
),
migrations.AlterField(
model_name='product',
name='is_active',
field=models.BooleanField(blank=True, default=True),
),
]
| 1.53125 | 2 |
sg_covid_impact/make_glass_validate.py | nestauk/sg_covid_impact | 2 | 12791535 | <reponame>nestauk/sg_covid_impact
# Scrip to validate glass data
import pandas as pd
import json
import numpy as np
import os
import logging
import requests
from zipfile import ZipFile
from io import BytesIO
import altair as alt
from sg_covid_impact.getters.glass_house import get_glass_house
from sg_covid_impact.getters.companies_house import get_address, get_sector
from sg_covid_impact.make_sic_division import (
load_sic_taxonomy,
extract_sic_code_description,
)
from sg_covid_impact.descriptive import (
assign_nuts1_to_lad,
read_shape,
plot_choro,
read_lad_nuts1_lookup,
make_section_division_lookup,
)
from sg_covid_impact.utils.altair_save_utils import (
google_chrome_driver_setup,
save_altair,
)
from sg_covid_impact.utils.altair_s3 import export_chart
import sg_covid_impact
project_dir = sg_covid_impact.project_dir
FIG_PATH = f"{project_dir}/figures/scotland"
driver = google_chrome_driver_setup()
nspl_target = f"{project_dir}/data/raw/nspl"
nspl_location = os.path.join(nspl_target, "Data", "NSPL_NOV_2020_UK.csv")
meta_location = os.path.join(nspl_target, "Documents")
# Functions
def make_glass_meta(companies, score=60):
"""Makes the glass metadata table"""
logging.info("Making glass metadata")
glass_house = get_glass_house()
glass_ch_meta = glass_house.query(f"score>{score}").merge(
companies, on="company_number"
)
return glass_ch_meta
def make_companies():
"""Make the companies house table"""
logging.info("Making CH")
companies_address = get_address()
companies_sector = get_sector()
companies = (
companies_address[["company_number", "postcode"]]
.merge(
companies_sector.query("rank==1")[["company_number", "SIC4_code"]],
on="company_number",
)
.assign(division=lambda x: [c[:2] for c in x["SIC4_code"]])
.assign(division_name=lambda x: x["division"].map(_DIV_NAME_LOOKUP))
.merge(nspl, left_on="postcode", right_on="pcds")
)
return companies
def fetch_nspl():
"""Fetch NSPL (if needed)"""
nspl_url = "https://www.arcgis.com/sharing/rest/content/items/4df8a1a188e74542aebee164525d7ca9/data"
if os.path.exists(nspl_target) is True:
logging.info("Already collected NSPL")
else:
os.makedirs(nspl_target, exist_ok=True)
req = requests.get(nspl_url)
zipf = ZipFile(BytesIO(req.content)).extractall(nspl_target)
def make_lad_lookup(geo_var_name="LAD20"):
"""Lookup between LAD names and codes 2020"""
name_lu = pd.read_csv(
os.path.join(meta_location, "LA_UA names and codes UK as at 04_20.csv")
)
name_dict = name_lu.set_index(f"{geo_var_name}CD")[f"{geo_var_name}NM"].to_dict()
return name_dict
def read_nspl(
geo="laua", names="LA_UA names and codes UK as at 04_20.csv", geo_var_name="LAD20"
):
"""Read and tag NSPL"""
logging.info("Reading NSPL")
nspl = pd.read_csv(nspl_location, usecols=["pcds", geo]).dropna(
axis=0, subset=[geo]
)
name_lu = pd.read_csv(os.path.join(meta_location, names))
name_dict = name_lu.set_index(f"{geo_var_name}CD")[f"{geo_var_name}NM"].to_dict()
nspl[f"{geo}_name"] = nspl[geo].map(name_dict)
nspl["nuts1"] = nspl[geo].apply(assign_nuts1_to_lad)
return nspl
def make_shares_comparison(glass, ch, variable):
"""Compare distributions between Glass and CH"""
out = (
pd.concat(
[df[[variable]].value_counts(normalize=True) for df in [glass, ch]], axis=1
)
.rename(columns={0: "glass", 1: "companies"})
.assign(share_norm=lambda x: (x["glass"] / x["companies"]) - 1)
)
return out
fetch_nspl()
# Lookups
_DIV_NAME_LOOKUP = extract_sic_code_description(load_sic_taxonomy(), "Division")
_SECTION_DIVISION_LOOKUP, _SECTION_NAME_LOOKUP = make_section_division_lookup()
_LAD_NUTS1_LOOKUP = read_lad_nuts1_lookup()
_LAD_NAME_DICT = make_lad_lookup()
# Read everything
nspl = read_nspl()
companies = make_companies()
glass_meta = make_glass_meta(companies)
# Focus on Scotland
# Scot
glass_meta_sc, companies_sc = [
df.query("nuts1=='Scotland'").reset_index(drop=True)
for df in [glass_meta, companies]
]
sector_shares = (
make_shares_comparison(glass_meta_sc, companies_sc, "division")
.reset_index(drop=False)
.assign(
section_name=lambda x: x["division"]
.map(_SECTION_DIVISION_LOOKUP)
.map(_SECTION_NAME_LOOKUP)
)
.dropna(axis=0)
)
# Calculate correlations
sector_shares[["glass", "companies"]].corr()
# Sorted divisions
sorted_divs = sector_shares.sort_values(
["section_name", "share_norm"], ascending=[True, False]
)["division"].to_list()
sector_shares["division_name"] = sector_shares["division"].map(_DIV_NAME_LOOKUP)
# Chart comparing sector distributions
sector_comparison_chart = (
alt.Chart(sector_shares)
.mark_bar()
.encode(
y=alt.Y("division", sort=sorted_divs, axis=alt.Axis(labels=False, ticks=False)),
x=alt.X("share_norm", title="Glass vs CH share"),
color=alt.Color("section_name", title="Section"),
tooltip=["division_name"],
)
).properties(height=300, width=150)
sector_comparison_chart
save_altair(
sector_comparison_chart, "glass_sector_validation", driver=driver, path=FIG_PATH
)
export_chart(sector_comparison_chart, "glass_sector_validation")
# Chart comparing geo distributions
sh = read_shape()
lad_shares = make_shares_comparison(glass_meta_sc, companies_sc, "laua")
lad_shares[["glass", "companies"]].corr()
merged = sh.merge(
lad_shares.reset_index(drop=False), left_on="lad19cd", right_on="laua"
)
merged_json = json.loads(merged.to_json())
glass_share_map = (
plot_choro(
merged_json, "share_norm", "Glass vs CH share", "lad19nm", scale_type="linear"
)
# .configure_view(strokeWidth=0)
.properties(height=300, width=200)
)
glass_validation = alt.hconcat(sector_comparison_chart, glass_share_map)
glass_validation
save_altair(glass_validation, "glass_place_validation", driver, path=FIG_PATH)
export_chart(glass_validation, "glass_place_validation")
# LAD by division coverage
lad_sector_shares = (
pd.concat(
[
df.groupby("laua").apply(
lambda x: x["division"].value_counts(normalize=True)
)
for df, name in zip([glass_meta_sc, companies_sc], ["glass", "ch"])
],
axis=1,
)
).fillna(0)
lad_sector_shares.columns = ["glass", "ch"]
lad_sector_shares = (
lad_sector_shares.assign(share_norm=lambda x: x["glass"] / x["ch"])
.reset_index(drop=False)
.rename(columns={"level_1": "division"})
.assign(division_name=lambda x: x["division"].map(_DIV_NAME_LOOKUP))
.assign(lad_name=lambda x: x["laua"].map(_LAD_NAME_DICT))
)
corr_list = []
for x in set(lad_sector_shares["laua"]):
sel = lad_sector_shares.query(f"laua=='{x}'")
corr = np.float(sel[["glass", "ch"]].corr().iloc[0, 1])
corr_list.append([x, corr])
lads_corr_dict = {k[0]: k[1] for k in corr_list}
lads_sorted = [x[0] for x in sorted(corr_list, key=lambda x: x[1], reverse=True)]
lads_corr_df = pd.DataFrame(corr_list, columns=["lad_name", "glass_ch_correlation"])
# Plot
rep_chart = (
alt.Chart(lad_sector_shares)
.transform_filter(alt.datum.share_norm > 0)
.mark_rect()
.encode(
y=alt.Y("lad_name", sort=lads_sorted, title="Local Authority"),
x=alt.X("division", axis=alt.Axis(labels=False, ticks=False)),
color=alt.Color(
"share_norm",
sort="descending",
title="Glass vs CH share",
scale=alt.Scale(scheme="Spectral", type="log"),
legend=alt.Legend(orient="bottom"),
),
tooltip=["lad_name", "division_name", "share_norm"],
)
).properties(width=400, height=300)
corr_chart = (
alt.Chart(lads_corr_df)
.mark_point(filled=True, stroke="black", strokeWidth=0.2)
.encode(
y=alt.Y(
"lad_name",
title=None,
sort=lads_sorted,
axis=alt.Axis(labels=False, ticks=False, grid=True),
),
x=alt.X("glass_ch_correlation", title=["Glass-CH sector", "share correlation"]),
color=alt.Color("glass_ch_correlation", legend=None),
)
).properties(width=100, height=300)
lad_share_comparison = alt.hconcat(rep_chart, corr_chart, spacing=1).resolve_scale(
color="independent"
)
save_altair(
lad_share_comparison, "glass_sector_place_validation", driver=driver, path=FIG_PATH
)
export_chart(lad_share_comparison, "glass_sector_place_validation")
| 2.5625 | 3 |
setup.py | anapaulagomes/pardal-python | 2 | 12791536 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
from setuptools import find_packages, setup
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding="utf-8").read()
setup(
name="pardal",
version="0.1.0",
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
license="MIT",
url="https://github.com/anapaulagomes/pardal",
description="An accessible and customizable Twitter client",
packages=find_packages(exclude=["tests", "docs"]),
python_requires=">=3.7",
install_requires=[""], # FIXME
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: End Users/Desktop",
"Topic :: Adaptive Technologies",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
],
entry_points={},
)
| 1.445313 | 1 |
dataframe.py | UWSEDS/homework-4-documentation-and-style-hyspacex | 0 | 12791537 | """Create dataframe and check the quality
This script downloads a dataset from Seattle Open Data Portal and imports
as a Pandas Dataframe.
This tool checks if the dataframe:
1. Has at least 10 rows of data
2. Contains only the columns that specified as the second argument
3. Values in each column have the same python type
This script requires that `pandas` be installed within the Python
environment you are running this script in.
This file can also be imported as a module and contains the following
functions:
* test_column_names - returns bool if the column name match
* test_nan_values - returns bool if the dataframe has nan value
* test_least_row_counts - returns bool if the dataframe has at least one row of data
* main - the main function of the script
"""
import pandas as pd
DATAFRAMES = pd.read_csv(
'https://data.seattle.gov/api/views/tw7j-df_importaw/rows.csv?accessType=DOWNLOAD')
def test_datatype(df_import):
"""Test if all columns have values of the correct type
Parameters
----------
df_import : Pandas Dataframe
The dataset imported as Pandas Dataframe
Returns
-------
bool
a bool value: True if the datatype of each column match
"""
columns = list(df_import)
for name in columns:
try:
tp_name = (
isinstance(
df_import[name].iloc[1].item(),
df_import[name].map(type))).any().tolist()
except AttributeError:
tp_name = (
isinstance(
df_import[name].iloc[1],
df_import[name].map(type))).any().tolist()
return tp_name
def test_column_names(df_import):
"""Test if the dataframe has expected columns
Parameters
----------
df_import : Pandas Dataframe
The dataset imported as Pandas Dataframe
Returns
-------
bool
a bool value: True if the dataframe has expected columns
"""
df_import_columns = sorted(df_import.columns.tolist())
df_import_checklist = ['trip_id',
'starttime',
'stoptime',
'bikeid',
'tripduration',
'from_station_name',
'to_station_name',
'from_station_id',
'to_station_id',
'usertype',
'gender',
'birthyear']
if df_import_columns == sorted(df_import_checklist):
return True
def test_nan_values(df_import):
"""Test if the dataframe has non value
Parameters
----------
df_import : Pandas Dataframe
The dataset imported as Pandas Dataframe
Returns
-------
bool
a bool value: True if the dataframe has non value
"""
return df_import.notnull().values.any()
def test_least_row_counts(df_import):
"""Test if the dataframe has at least one row of data
Parameters
----------
df_import : Pandas Dataframe
The dataset imported as Pandas Dataframe
Returns
-------
bool
a bool value: True if the dataframe has at least one row of data
"""
return df_import.shape[0] >= 1
if __name__ == '__main__':
"""Main function
Returns
-------
bool
a bool value if the dataframe pass all the tests
"""
DATAFRAME = pd.read_csv(
'https://data.seattle.gov/api/views/tw7j-df_importaw/rows.csv?accessType=DOWNLOAD')
# only fetch first 10 rows for testing
DATAFRAME = DATAFRAME.head(10)
print(test_column_names(DATAFRAME) & test_datatype(DATAFRAME) &
test_least_row_counts(DATAFRAME) & test_nan_values(DATAFRAME))
| 3.921875 | 4 |
test/language/constraints/python/StructureConstraintsTest.py | dkBrazz/zserio | 86 | 12791538 | import unittest
import zserio
from testutils import getZserioApi
class StructureConstraintsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "constraints.zs").structure_constraints
def testReadCorrectColors(self):
writer = zserio.BitStreamWriter()
self.__class__._write(writer, self.api.BasicColor.BLACK, self.api.BasicColor.WHITE,
self.api.ExtendedColor.PURPLE)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
structureConstraints = self.api.StructureConstraints()
structureConstraints.read(reader)
self.assertEqual(self.api.BasicColor.BLACK, structureConstraints.black_color)
self.assertEqual(self.api.BasicColor.WHITE, structureConstraints.white_color)
self.assertEqual(self.api.ExtendedColor.PURPLE, structureConstraints.purple_color)
def testReadWrongBlackConstraint(self):
writer = zserio.BitStreamWriter()
self.__class__._write(writer, self.api.BasicColor.RED, self.api.BasicColor.WHITE,
self.api.ExtendedColor.PURPLE)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
structureConstraints = self.api.StructureConstraints()
with self.assertRaises(zserio.PythonRuntimeException):
structureConstraints.read(reader)
def testReadWrongWhiteConstraint(self):
writer = zserio.BitStreamWriter()
self.__class__._write(writer, self.api.BasicColor.BLACK, self.api.BasicColor.RED,
self.api.ExtendedColor.PURPLE)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
structureConstraints = self.api.StructureConstraints()
with self.assertRaises(zserio.PythonRuntimeException):
structureConstraints.read(reader)
def testReadWrongPurpleConstraint(self):
writer = zserio.BitStreamWriter()
self.__class__._write(writer, self.api.BasicColor.BLACK, self.api.BasicColor.WHITE,
self.api.ExtendedColor.LIME)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
structureConstraints = self.api.StructureConstraints()
with self.assertRaises(zserio.PythonRuntimeException):
structureConstraints.read(reader)
def testWriteCorrectConstraints(self):
structureConstraints = self.api.StructureConstraints(self.api.BasicColor.BLACK,
self.api.BasicColor.WHITE,
True,
self.api.ExtendedColor.PURPLE)
bitBuffer = zserio.serialize(structureConstraints)
readStructureConstraints = zserio.deserialize(self.api.StructureConstraints, bitBuffer)
self.assertEqual(self.api.BasicColor.BLACK, readStructureConstraints.black_color)
self.assertEqual(self.api.BasicColor.WHITE, readStructureConstraints.white_color)
self.assertEqual(self.api.ExtendedColor.PURPLE, readStructureConstraints.purple_color)
self.assertEqual(structureConstraints, readStructureConstraints)
def testWriteWrongBlackConstraint(self):
structureConstraints = self.api.StructureConstraints(self.api.BasicColor.RED,
self.api.BasicColor.WHITE,
True,
self.api.ExtendedColor.PURPLE)
writer = zserio.BitStreamWriter()
with self.assertRaises(zserio.PythonRuntimeException):
structureConstraints.write(writer)
def testWriteWrongWhiteConstraint(self):
structureConstraints = self.api.StructureConstraints(self.api.BasicColor.BLACK,
self.api.BasicColor.RED,
True,
self.api.ExtendedColor.PURPLE)
writer = zserio.BitStreamWriter()
with self.assertRaises(zserio.PythonRuntimeException):
structureConstraints.write(writer)
def testWriteWrongPurpleConstraint(self):
structureConstraints = self.api.StructureConstraints(self.api.BasicColor.BLACK,
self.api.BasicColor.WHITE,
True,
self.api.ExtendedColor.LIME)
writer = zserio.BitStreamWriter()
with self.assertRaises(zserio.PythonRuntimeException):
structureConstraints.write(writer)
@staticmethod
def _write(writer, blackColor, whiteColor, purpleColor):
writer.write_bits(blackColor.value, 8)
writer.write_bool(True)
writer.write_bits(whiteColor.value, 8)
writer.write_bool(True)
writer.write_bits(purpleColor.value, 16)
| 2.5625 | 3 |
notebook/2019-07-03_lit_in_biomarkers.py | jfear/larval_gonad | 1 | 12791539 | import yaml
import pandas as pd
from more_itertools import flatten
import os
os.chdir('notebook')
fbgn2symbol = (
pd.read_feather('../references/gene_annotation_dmel_r6-26.feather', columns=['FBgn', 'gene_symbol'])
.set_index('FBgn')
.to_dict()['gene_symbol']
)
config = yaml.safe_load(open('../config/common.yaml'))
CLUSTER_ANNOT = config['cluster_annot']
CLUSTER_ORDER = config['cluster_order']
lit_genes = yaml.safe_load(open('../config/literature_genes.yaml'))
lit_genes_all = list(flatten([
v
for k, v in lit_genes.items()
]))
bm = (
pd.read_feather('../output/seurat3-cluster-wf/combined_n3_biomarkers.feather', columns=['FBgn', 'gene_symbol', 'cluster', 'p_val_adj', 'pct.1'])
.query('p_val_adj <= 0.01')
.drop_duplicates(subset='FBgn', keep=False)
.reset_index(drop=True)
.assign(cluster=lambda df: df.cluster.cat.rename_categories(CLUSTER_ANNOT))
.assign(cluster=lambda df: df.cluster.cat.reorder_categories(CLUSTER_ORDER))
.set_index('FBgn')
.groupby('cluster')
)
def get_lit(cluster):
print(cluster)
df = bm.get_group(cluster)
return df.reindex(lit_genes_all).dropna()
get_lit('SP')
get_lit('EPS')
get_lit('PS1')
get_lit('PS2')
get_lit('PS3')
get_lit('ECY')
get_lit("CY1")
get_lit("CY2")
get_lit("TE")
get_lit("PC")
| 2.1875 | 2 |
Prosjekt 4 - Raspberry pi/waveshare/yrApi/yr_data.py | stellanova88/DigiFab | 0 | 12791540 | """
<NAME>
json from yr. Location Remmen, Halden: lat: 59.1304, lon: 11.3546, altitude: ca. 80
https://api.met.no/weatherapi/locationforecast/2.0/#!/data/get_compact_format
request api: https://api.met.no/weatherapi/locationforecast/2.0/compact?altitude=80&lat=63.4305&lon=10.3950
curl: curl -X GET --header 'Accept: application/json' 'https://api.met.no/weatherapi/locationforecast/2.0/compact?altitude=80&lat=59.1304&lon=11.3545'
"""
import requests # api module
import json #Save as Json
#Remmen location og altitude:
lat = "59.1304"
lon = "11.3546"
alt = "80"
# url to yr api
url = "https://api.met.no/weatherapi/locationforecast/2.0/complete.json?altitude=" + alt + "&lat=" + lat + "&lon=" + lon
# Header to tell yr where the request is coming from.
# NB! Find your user-agent and put the feeld
headers = {
"Content-type": "application/json",
"Cache-Control": "no-cache",
"user-agent": "Put your user-agent here"
}
# get the json api
response = requests.request("GET", url, headers=headers)
if response:
print('Success!')
else:
print('An error has occurred.')
data = response.json()
def write_json_file():
""" Save data as json file """
with open('yr_data_complete_format.json', 'w') as f:
json.dump(data, f)
write_json_file()
# TODO! If-Modified-Since
def updated_time():
""" Time updated at yr """
updated = (data["properties"]["meta"]["updated_at"])
return updated
#print(data["properties"]["timeseries"][0]["data"]["instant"]["details"])
def air_temperature():
""" Return the instant air temperature in celsius """
air_temp = (data["properties"]["timeseries"][0]["data"]["instant"]["details"]["air_temperature"])
return air_temp
def wind_speed():
""" Wind speed in m/s """
wind_speed = (data["properties"]["timeseries"][0]["data"]["instant"]["details"]["wind_speed"])
return wind_speed
# Precentage value of the total cloud cover at all heights
cloud_area = (data["properties"]["timeseries"][0]["data"]["instant"]["details"]["cloud_area_fraction"])
rel_humidity = (data["properties"]["timeseries"][0]["data"]["instant"]["details"]["relative_humidity"])
def summary_1_hour():
""" String value giving a summary for +1 hour """
summary_1_hour = (data["properties"]["timeseries"][0]["data"]["next_1_hours"]["summary"]["symbol_code"])
return summary_1_hour
def precipitation_1_hour():
""" Precipitation for +1 hour in mm """
precipitation_1_hour = (data["properties"]["timeseries"][0]["data"]["next_1_hours"]["details"]["precipitation_amount"])
return precipitation_1_hour
def wind_direction():
""" Return the wind from direction """
wind_from_direction = (data["properties"]["timeseries"][0]["data"]["instant"]["details"]["wind_from_direction"])
if wind_from_direction > 326.25 or wind_from_direction < 11.25:
print("Nord")
return "North"
elif wind_from_direction < 56.25:
print("Nordøst")
return "Northeast"
elif wind_from_direction < 101.25:
print("Øst")
return "East"
elif wind_from_direction < 146.25:
print("Sørøst")
return "Southeast"
elif wind_from_direction < 191.25:
print("Sør")
return "South"
elif wind_from_direction < 236.25:
print("Sørvest")
return "Southwest"
elif wind_from_direction < 281.25:
print("Vest")
return "West"
elif wind_from_direction < 326.25:
print("Nordvest")
return "Northwest"
#print(wind_direction())
| 3.328125 | 3 |
python/testData/resolve/ObjectMethods.py | jnthn/intellij-community | 2 | 12791541 | <filename>python/testData/resolve/ObjectMethods.py
class A:
x = 1
y = 1
class B(A):
def foo(self):
self.__repr__()
# <ref>
| 1.898438 | 2 |
leetcode/Hash Table/242. Valid Anagram.py | yanshengjia/algorithm | 23 | 12791542 | """
Given two strings s and t , write a function to determine if t is an anagram of s.
Example 1:
Input: s = "anagram", t = "nagaram"
Output: true
Example 2:
Input: s = "rat", t = "car"
Output: false
Follow up:
What if the inputs contain unicode characters? How would you adapt your solution to such case?
Use a hash table instead of a fixed size counter. Imagine allocating a large size array to fit the entire range of unicode characters, which could go up to more than 1 million. A hash table is a more generic solution and could adapt to any range of characters.
Solution:
1. Hash Table. Anagram means the type of chars in t is the same with its in s, as well as quantity.
2. Sorting. Sort two strings, if t is an anagram of s, they will be identical.
"""
# Hash Table
# Time-O(N), because accessing the counter table is a constant time operation.
# Space-O(1), Although we do use extra space, the space complexity is O(1)O(1) because the table's size stays constant no matter how large nn is.
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
d = dict()
for c in s:
if c not in d:
d[c] = 1
else:
d[c] += 1
for c in t:
if c not in d:
return False
else:
d[c] -= 1
for k,v in d.items():
if v != 0:
return False
return True
| 4.125 | 4 |
mocks/mock_drive_manager.py | shepherdjay/-r-winnipegjets-scripts | 0 | 12791543 | """Module containing the definition of a mocked out GDocs dependency. This is for test use only"""
class MockDriveManager():
"""Mocked out version of DriveManager for test purposes"""
def __init__(self):
none = None
def get_file_entries():
none = None
def get_drive_filetype():
none = None
def get_all_books_sheets():
none = None
def get_games_result():
none = None
def convert_rank():
none = None
def get_current_leaders():
none = None
def get_unwritten_leaderboard_games():
none = None
def get_history_game_points():
none = None
def overwrite_leaderboard():
none = None
def update_answerkey_results():
none = None
def update_game_start_time():
none = None
def create_new_sheet():
none = None
def new_response_data_available():
none = None
| 2.34375 | 2 |
out/production/mitmproxynew/mitmproxy/addons/browserup/har/har_verifications.py | 580/mitmproxy | 9 | 12791544 | import re
from glom import glom
import json
from jsonschema import validate
from jsonschema import ValidationError
from jsonpath_ng import parse
class HarVerifications:
def __init__(self, har):
self.har = har
def rmatch(self, val, str_rxp):
if val is None:
return False
if isinstance(val, bytes):
val = str(val, "utf-8")
return re.search(str_rxp, val, flags=re.IGNORECASE)
def rmatch_any(self, items, rxp):
if items is None or len(items) == 0 or rxp is None:
return False
for item in items:
if isinstance(item, bytes):
item = str(item, "utf-8")
if self.rmatch(item, rxp):
return True
return False
def rmatch_key_val(self, items, kv):
name = True
value = True
if items is None or len(items) == 0 or kv is None:
return False
for item in items:
if 'name' in kv:
name = self.rmatch(item['name'], kv['name'])
if 'value'in kv:
value = self.rmatch(kv['value'], item['value'])
if name and value:
return True
return False
def schema_validate(self, item, schema):
try:
if type(item) == str:
item = json.loads(item)
validate(instance=item, schema=schema)
except (ValidationError, ValueError) as e:
return False
return True
def valid_json(self, item):
if item is None:
return False
try:
json.loads(item)
except ValueError as e:
return False
return True
def has_json_path(self, json_str, json_path):
if self.valid_json(json_str) == False:
return False
jsonpath_expr = parse(json_path)
matches = jsonpath_expr.find(json.loads(json_str))
return len(matches) > 0
def current_page(self):
return self.har['log']['pages'][-1]['id']
# Use glom to dig into the har entries, responses and websockets to get down to an array of something or others
# (headers, websocket messages, content then we execute our test against that item
# current, *, or filter
def entries(self, criteria=False):
entry_list = self.har['log']['entries']
har_entry_filters = {
'page': (lambda item, pgref: glom(item, 'pageref', default='') == pgref ),
'status': (lambda item, status: self.rmatch(str(glom(item, 'response.status', default=None)), status)),
'url': (lambda item, url_rxp: self.rmatch(str(glom(item, 'request.url', default=None)), url_rxp)),
'content': (lambda item, content_rxp: self.rmatch(str(glom(item, 'response.content.text', default=None)), content_rxp)),
'content_type': (lambda item, content_type_rxp: self.rmatch(str(glom(item, 'response.content.mimeType', default=None)), content_type_rxp)),
'request_header': (lambda item, match_rgxp: self.rmatch_key_val(glom(item, 'request.headers',default=[]), match_rgxp)),
'response_header': (lambda item, match_rgxp: self.rmatch_key_val(glom(item, 'response.headers', default=[]), match_rgxp)),
'request_cookie': (lambda item, match_rgxp: self.rmatch_key_val(glom(item, 'request.cookies', default=[]), match_rgxp)),
'response_cookie': (lambda item, match_rgxp: self.rmatch_key_val(glom(item, 'response.cookies', default=[]), match_rgxp)),
'websocket_message': (lambda item, ws_rxp: self.rmatch_any(glom(item, ('_webSocketMessages', ['data']), default=[]), ws_rxp)),
'json_valid': (lambda item, _: self.valid_json(str(glom(item, 'response.content.text', default=None)))),
'json_path': (lambda item, path: self.has_json_path(str(glom(item, 'response.content.text', default=None)), path)),
'json_schema': (lambda item, schema: self.schema_validate(str(glom(item, 'response.content.text', default=None)),schema)),
}
for filter_name, target_value in criteria.items():
filter_lambda = har_entry_filters[filter_name]
if filter_name == 'page' and target_value == 'current':
target_value = self.current_page()
entry_list = [entry for entry in entry_list if filter_lambda(entry, target_value)]
return entry_list
def gsize(self, item, path):
return self.not_neg(glom(item, 'request.headersSize', default=0))
def not_neg(self, val):
val = int(val)
return 0 if val == -1 or val is None else val
def measure(self, items, measurement):
measurements = {
'request_headers': (lambda item: self.gsize(item, 'request.headersSize')),
'response_headers': (lambda item: self.gsize(item, 'response.headersSize')),
'request_body': (lambda item: self.gsize(item, 'request.bodySize')),
'response_body': (lambda item: self.gsize(item, 'request.bodySize')),
'request': (lambda item: self.gsize(item, 'request.bodySize') + self.gsize(item, 'request.headerSize')),
'response': (lambda item: self.gsize(item, 'response.bodySize') + self.gsize(item, 'response.headerSize')),
'time': (lambda item: self.gsize(item, 'time')),
}
method = measurements[measurement]
return list(map(method, items))
def present(self, criteria):
return len(self.entries(criteria)) > 0
def not_present(self, criteria):
return len(self.entries(criteria)) == 0
def max(self, criteria, measurement_name):
items = self.entries(criteria)
return max(self.measure(items, measurement_name), default=0)
def get_sum(self, criteria, measurement_name):
items = self.entries(criteria)
return sum(self.measure(items, measurement_name)) | 2.484375 | 2 |
apps/home/utils.py | MySmile/sfchat | 4 | 12791545 | from django.http import JsonResponse, HttpResponseNotFound
from django.template import RequestContext, loader, Template, TemplateDoesNotExist
import logging
logger = logging.getLogger(__name__)
def json_html_response(request, template_name, code, message):
"""
Provide response in json or html format accordingly content-type
:param request: HttpRequest
:param template_name: String
:param code: Integer
:param message: String
:return: JsonResponse|HttpResponseNotFound
:TODO fix format data duplication for v1/utils format for error response
"""
if request.META.get('CONTENT_TYPE') == 'application/json':
response = JsonResponse(status=code, data={'results': {'code': code, 'msg': message}})
else:
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
template = Template(message)
response = HttpResponseNotFound(template.render(RequestContext(request)))
return response
def e500(request, template_name='500.html'):
return json_html_response(request, template_name, 500, 'Internal Server Error')
def e404(request, template_name='404.html'):
return json_html_response(request, template_name, 404, 'Not Found')
def csrf_failure(request, reason=""):
logger.error('error 403: ' + str(request))
return json_html_response(request, '403.html', 403, 'Forbidden') | 2.328125 | 2 |
setup.py | stolarczyk/peppy | 15 | 12791546 | <reponame>stolarczyk/peppy<filename>setup.py
#! /usr/bin/env python
import os
import sys
from setuptools import setup
REQDIR = "requirements"
def read_reqs(reqs_name):
deps = []
with open(os.path.join(REQDIR, "requirements-{}.txt".format(reqs_name)), "r") as f:
for l in f:
if not l.strip():
continue
# deps.append(l.split("=")[0].rstrip("<>"))
deps.append(l)
return deps
# Additional keyword arguments for setup().
extra = {}
# Ordinary dependencies
DEPENDENCIES = read_reqs("all")
# numexpr for pandas
try:
import numexpr
except ImportError:
# No numexpr is OK for pandas.
pass
else:
# pandas 0.20.2 needs updated numexpr; the claim is 2.4.6, but that failed.
DEPENDENCIES.append("numexpr>=2.6.2")
extra["install_requires"] = DEPENDENCIES
# Additional files to include with package
def get_static(name, condition=None):
static = [
os.path.join(name, f)
for f in os.listdir(
os.path.join(os.path.dirname(os.path.realpath(__file__)), name)
)
]
if condition is None:
return static
else:
return [i for i in filter(lambda x: eval(condition), static)]
# scripts to be added to the $PATH
# scripts = get_static("scripts", condition="'.' in x")
scripts = None
with open("peppy/_version.py", "r") as versionfile:
version = versionfile.readline().split()[-1].strip("\"'\n")
with open("README.md") as f:
long_description = f.read()
setup(
name="peppy",
packages=["peppy"],
version=version,
description="A python-based project metadata manager for portable encapsulated projects",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
keywords="project, metadata, bioinformatics, sequencing, ngs, workflow",
url="https://github.com/pepkit/peppy/",
author=u"<NAME>, <NAME>, <NAME>, <NAME>",
license="BSD2",
scripts=scripts,
include_package_data=True,
test_suite="tests",
tests_require=read_reqs("dev"),
setup_requires=(
["pytest-runner"] if {"test", "pytest", "ptr"} & set(sys.argv) else []
),
**extra
)
| 2.296875 | 2 |
tests.py | iRobotCorporation/cfn-yaml-tags | 7 | 12791547 | <reponame>iRobotCorporation/cfn-yaml-tags
import unittest
import six
from six.moves import reload_module
import json
import yaml
import cfn_yaml_tags
class CfnYamlTagTest(unittest.TestCase):
def setUp(self):
for module in [yaml.representer, yaml.dumper, yaml.constructor, yaml.loader, yaml]:
reload_module(module)
reload_module(cfn_yaml_tags)
self.doc = """
AndTest: !And
- Condition1
- Condition2
AZsTest: !GetAZs us-east-1
Base64Test: !Base64 abc
ConditionTest: !Condition MyCondition
EqualsTest: !Equals [Value1, Value2]
FindInMapTest: !FindInMap [MapName, TopLevelKey, SecondLevelKey]
GetAttListTest: !GetAtt [ResourceName, AttName]
GetAttStringTest: !GetAtt ResourceName.AttName
IfTest: !If
- Condition
- ValueIfTrue
- ValueIfFalse
ImportValueTest: !ImportValue ImportName
JoinTest: !Join
- ' '
- - hello
- world
NotTest: !Not [Condition]
OrTest: !Or
- Condition1
- Condition2
RefTest: !Ref ResourceName
SelectTest: !Select [0, [1, 2, 3]]
SplitTest: !Split [',', 'foo/bar']
SubTest: !Sub
- '$foo'
- foo: bar
NestedTest: !If
- !And
- !Not [!Condition MyCondition]
- !Join
- ' '
- - !Ref MyResource
- !Sub
- $foo
- foo: !GetAZs us-east-1
- ValueIfTrue
- ValueIfFalse
"""
self.obj = {
'AndTest': cfn_yaml_tags.And(['Condition1', 'Condition2']),
'AZsTest': cfn_yaml_tags.GetAZs('us-east-1'),
'Base64Test': cfn_yaml_tags.Base64('abc'),
'ConditionTest': cfn_yaml_tags.Condition('MyCondition'),
'EqualsTest': cfn_yaml_tags.Equals(['Value1', 'Value2']),
'FindInMapTest': cfn_yaml_tags.FindInMap(['MapName', 'TopLevelKey', 'SecondLevelKey']),
'GetAttListTest': cfn_yaml_tags.GetAtt(['ResourceName', 'AttName']),
'GetAttStringTest': cfn_yaml_tags.GetAtt('ResourceName.AttName'),
'IfTest': cfn_yaml_tags.If(['Condition', 'ValueIfTrue', 'ValueIfFalse']),
'ImportValueTest': cfn_yaml_tags.ImportValue('ImportName'),
'JoinTest': cfn_yaml_tags.Join([' ', ['hello', 'world']]),
'NotTest': cfn_yaml_tags.Not(['Condition']),
'OrTest': cfn_yaml_tags.Or(['Condition1', 'Condition2']),
'RefTest': cfn_yaml_tags.Ref('ResourceName'),
'SelectTest': cfn_yaml_tags.Select([0, [1, 2, 3]]),
'SplitTest': cfn_yaml_tags.Split([',', 'foo/bar']),
'SubTest': cfn_yaml_tags.Sub(['$foo', {'foo': 'bar'}]),
'NestedTest': cfn_yaml_tags.If([
cfn_yaml_tags.And([
cfn_yaml_tags.Not([cfn_yaml_tags.Condition('MyCondition')]),
cfn_yaml_tags.Join([
' ',
[
cfn_yaml_tags.Ref('MyResource'),
cfn_yaml_tags.Sub([
'$foo',
{
'foo': cfn_yaml_tags.GetAZs('us-east-1'),
},
])
]
]),
]),
'ValueIfTrue',
'ValueIfFalse',
])
}
def test_load(self):
loaded_obj = yaml.load(self.doc)
self.assertEqual(loaded_obj, self.obj)
def test_dump(self):
dumped = yaml.dump(self.obj)
def test_json(self):
json.JSONEncoder().encode({'Fn::ImportValue': 'ImportName'})
dumped = cfn_yaml_tags.JSONFromYAMLEncoder().encode(self.obj)
json_obj = json.loads(dumped)
self.assertEqual(json_obj['GetAttListTest']['Fn::GetAtt'], ['ResourceName', 'AttName'])
self.assertEqual(json_obj['GetAttStringTest']['Fn::GetAtt'], ['ResourceName', 'AttName'])
ref_obj = {'RefTest': cfn_yaml_tags.Ref('ResourceName.AttName')}
dumped = cfn_yaml_tags.JSONFromYAMLEncoder().encode(ref_obj)
json_obj = json.loads(dumped)
self.assertIn('Fn::GetAtt', json_obj['RefTest'])
self.assertNotIn('Ref', json_obj['RefTest'])
self.assertEqual(json_obj['RefTest']['Fn::GetAtt'], ['ResourceName', 'AttName'])
def test_safe_load_fail(self):
with self.assertRaises(yaml.constructor.ConstructorError):
yaml.safe_load(self.doc)
def test_safe_load_ok(self):
cfn_yaml_tags.mark_safe()
loaded_obj = yaml.safe_load(self.doc)
self.assertEqual(loaded_obj, self.obj)
def test_safe_dump_fail(self):
with self.assertRaises(yaml.representer.RepresenterError):
dumped = yaml.safe_dump(self.obj)
def test_safe_dump_ok(self):
cfn_yaml_tags.mark_safe()
dumped = yaml.safe_dump(self.doc)
if __name__ == '__main__':
unittest.main() | 2.265625 | 2 |
prologlib/builtin/iso.py | gpiancastelli/prologlib | 0 | 12791548 | from ..parser import Atomic, Variable, Compound, List
from ..parser import isvariable, isatom, isnumber, islist, ispartiallist, iscallable
from ..core import BuiltIn
###
### Term unification (ISO 8.2)
###
class Unify_2(BuiltIn):
"""'='(?term, ?term)
If X and Y are NSTO (Not Subject To Occur-check) then '='(X, Y) is true
iff X and Y are unifiable."""
def execute(self, x, y):
# TODO prologlib crashes if you attempt to unify two STO terms by =/2
# instead of using the proper unify_with_occur_check/2 predicate.
return self.unify(x, y)
class NotUnifiable_2(BuiltIn):
"""'\\='(@term, @term)
If X and Y are NSTO (Not Subject To Occur-check) then '\\='(X, Y) is true
iff X and Y are not unifiable."""
def execute(self, x, y):
from .. import core
return core.unify(x, y) is None
###
### Type testing (ISO 8.3)
###
class Var_1(BuiltIn):
'''var(@term)
var(X) is true iff X is a member of the set V.'''
def execute(self, x):
return isvariable(x)
class Atom_1(BuiltIn):
'''atom(@term)
atom(X) is true iff X is a member of the set A.'''
def execute(self, x):
return isatom(x)
class Integer_1(BuiltIn):
'''integer(@term)
integer(X) is true iff X is a member of the set I.'''
def execute(self, x):
return x.arity == 0 and isinstance(x.value, int)
class Float_1(BuiltIn):
'''float(@term)
float(X) is true iff X is a member of the set F.'''
def execute(self, x):
return x.arity == 0 and isinstance(x.value, float)
class Atomic_1(BuiltIn):
'''atomic(@term)
atomic(X) is true if X is a member of the set A or I
or F and is false if X is a member of the set V or CT.'''
def execute(self, x):
return isinstance(x, Atomic)
class Compound_1(BuiltIn):
'''compound(@term)
compound(X) is true iff X is a member of the set CT.'''
def execute(self, x):
return isinstance(x, (Compound, List))
class Nonvar_1(BuiltIn):
'''nonvar(@term)
nonvar(X) is true iff X is not a member of the set V.'''
def execute(self, x):
return not isvariable(x)
class Number_1(BuiltIn):
'''number(@term)
number(X) is true if X is a member of the set I or F
and is false if X is a member of the set V, A, or CT.'''
def execute(self, x):
return isnumber(x)
###
### Term comparison (ISO 8.4)
###
class TermLessThanOrEqual_2(BuiltIn):
"""'@=<'(@term, @term)
Test the ordering of two terms. '@=<'(X, Y) is true iff
X preceeds Y or X and Y are identical terms."""
def execute(self, x, y):
# The Python __eq__ method does not hold Prolog
# semantics for anonymous variables
if (isvariable(x) and isvariable(y) and
x.name == '_' and y.name == '_'):
return True
return x <= y
class TermIdentical_2(BuiltIn):
"""'=='(@term, @term)
Test the ordering of two terms. '=='(X, Y) is true iff
X and Y are identical terms."""
def execute(self, x, y):
# The Python __eq__ method does not hold Prolog
# semantics for anonymous variables
if (isvariable(x) and isvariable(y) and
x.name == '_' and y.name == '_'):
return False
return x == y
class TermNotIdentical_2(BuiltIn):
"""'\=='(@term, @term)
Test the ordering of two terms. '\=='(X, Y) is true iff
X and Y are not identical terms."""
def execute(self, x, y):
# The Python __ne__ method does not hold Prolog
# semantics for anonymous variables
if (isvariable(x) and isvariable(y) and
x.name == '_' and y.name == '_'):
return True
return x != y
class TermLessThan_2(BuiltIn):
"""'@<'(@term, @term)
Test the ordering of two terms. '@<'(X, Y) is true iff
X preceeds Y."""
def execute(self, x, y):
return x < y
class TermGreaterThan_2(BuiltIn):
"""'@>(@term, @term)
Test the ordering of two terms. '@>'(X, Y) is true iff
Y preceeds X."""
def execute(self, x, y):
return x > y
class TermGreaterThanOrEqual_2(BuiltIn):
"""'@>=(@term, @term)
Test the ordering of two terms. '@>='(X, Y) is true iff
Y preceeds X or Y and X are identical terms."""
def execute(self, x, y):
# The Python __eq__ method does not hold Prolog
# semantics for anonymous variables
if (isvariable(x) and isvariable(y) and
x.name == '_' and y.name == '_'):
return False
return x >= y
###
### Term creation and decomposition (ISO 8.5)
###
class Functor_3(BuiltIn):
'''functor(-nonvar, +atomic, +integer)
functor(+nonvar, ?atomic, ?integer)
functor(Term, Name, Arity) is true iff:
* Term is a compound term with a functor whose identifier
is Name and arity Arity, or
* Term is an atomic term equal to Name and Arity is 0.'''
def execute(self, term, name, arity):
if isvariable(term) and isvariable(name):
self.throw_instantiation_error()
if isvariable(term) and isvariable(arity):
self.throw_instantiation_error()
if isvariable(term) and not isinstance(arity.value, int):
self.throw_type_error('integer', arity)
# TODO Missing max_arity related error
if isvariable(term) and arity.value < 0:
self.throw_domain_error('not_less_than_zero', arity)
if isvariable(term) and not isinstance(name, Atomic):
self.throw_type_error('atomic', name)
if isvariable(term) and not isatom(name) and arity.value > 0:
self.throw_type_error('atom', name)
if isinstance(term, Atomic):
return self.unify(term, name) and self.unify(arity, Atomic(0))
if isinstance(term, (Compound, List)):
return (self.unify(Atomic(term.name), name) and
self.unify(Atomic(term.arity), arity))
if isinstance(term, Variable):
if isinstance(name, Atomic) and arity.value == 0:
return self.unify(term, name)
if isatom(name) and arity.value > 0:
t = (Variable('_') for i in range(arity.value))
c = Compound(name.name, *t)
return self.unify(term, c)
return False
class Arg_3(BuiltIn):
'''arg(+integer, +compound_term, ?term)
arg(N, Term, Arg) is true iff the Nth argument of Term is Arg.'''
def execute(self, n, term, arg):
if isvariable(n) or isvariable(term):
self.throw_instantiation_error()
if not isinstance(n.value, int):
self.throw_type_error('integer', n)
if not isinstance(term, Compound):
self.throw_type_error('compound', term)
if n.value < 0:
self.throw_domain_error('not_less_than_zero', n)
if n.value >= len(term.value):
return False
return self.unify(arg, term.value[n.value])
class Univ_2(BuiltIn):
"""'=..'(+nonvar, ?list)
'=..'(-nonvar, +list)
'=..'(Term, List) is true iff:
* Term is an atomic term and List is the list whose only
element is Term, or
* Term is a compound term and List is the list whose head
is the functor name of Term and whose tail is a list of the
arguments of Term."""
def execute(self, term, elements):
if isvariable(term) and ispartiallist(elements):
self.throw_instantiation_error()
if not islist(elements) and not ispartiallist(elements):
self.throw_type_error('list', elements)
if isvariable(term) and islist(elements) and isvariable(elements.head):
self.throw_instantiation_error()
if islist(elements) and not isatom(elements.head) and len(elements) > 1:
self.throw_type_error('atom', elements.head)
if islist(elements) and isinstance(elements.head, Compound) and len(elements) > 1:
self.throw_type_error('atomic', elements.head)
if isvariable(term) and elements == List.EMPTY_LIST:
self.throw_domain_error('non_empty_list', elements)
# TODO Missing max_arity related error
if isinstance(term, Atomic):
l = List(term)
return self.unify(elements, l)
if isinstance(term, Compound):
l = List.from_list([Atomic(term.name)] + list(term.value[1:]))
return self.unify(elements, l)
if isinstance(term, Variable):
# elements is a list
if elements.name == '.' and elements.arity == 2:
if len(elements) == 1:
t = elements.head
return self.unify(term, t)
elif len(elements) > 1:
name = elements.head.name
t = Compound(name, *elements.as_list()[1:])
return self.unify(term, t)
else:
return False
else:
return False
class CopyTerm_2(BuiltIn):
'''copy_term(?term, ?term)
copy_term(Term_1, Term_2) is true iff Term_2 unifies with
a term T which is a renamed copy of Term_1.'''
def execute(self, t1, t2):
from .. import core
#t = core.renamed_copy(t1)
t = t1._copy_term()
# Can't directly use BuiltIn.unify because the bindings
# between the renamed copy of t1 and t2 retain validity
# only in the context of the copy_term/2 built-in
mgu = core.unify(t2, t)
if mgu is not None:
if mgu:
t2.apply(mgu)
# Do not propagate renamed term variables bindings
# outside the context of the copy_term/2 built-in
if t2.name in mgu:
# Still preserve the binding for t2 just in
# case t2 were a renamed variable (e.g. coming
# from a clause renaming)
temp = mgu[t2.name]
mgu.reduce()
mgu.update({t2.name : temp})
else:
mgu.reduce()
self.substitution.update(mgu)
return True
return False
###
### Arithmetic evaluation (ISO 8.6)
### Simple arithmetic functors (ISO 9.1)
### Other arithmetic functors (ISO 9.3)
### Bitwise functors (ISO 9.4)
###
class Is_2(BuiltIn):
"""is(?term, @evaluable)
'is'(Result, Expression) is true iff the value of evaluating
Expression as an expression is Result."""
def execute(self, result, expression):
if isvariable(expression):
self.throw_instantiation_error()
c = evaluate_expression(expression)
return self.unify(result, Atomic(c))
def evaluate_expression(term):
# TODO No overflow/underflow errors
# TODO No undefined errors
if isvariable(term):
from ..core import PrologInstantiationError
raise PrologInstantiationError()
if term.arity == 0 and term._isnumber():
return term.value
if isinstance(term, Compound):
from ..core import deref
args = (evaluate_expression(deref(a)) for a in term.value[1:])
pi = term.predicate_indicator()
functor = search_evaluable_functor(pi)
if not functor:
from ..core import PrologTypeError
raise PrologTypeError('evaluable', Atomic(pi))
return functor(*args)
from ..core import PrologTypeError
raise PrologTypeError('number', term)
def search_evaluable_functor(name):
import math
import operator
d = {'+/2' : operator.add, '*/2' : operator.mul, '-/2' : operator.sub,
'-/1' : operator.neg, '//2' : divide, '///2' : intdivide,
'mod/2' : module, 'rem/2' : module, 'floor/1' : math.floor,
'round/1' : round, 'ceiling/1' : math.ceil, 'truncate/1' : math.trunc,
'float/1' : float, 'abs/1' : operator.abs, 'sign/1' : sign,
'float_integer_part/1' : float_integer_part,
'float_fractional_part/1' : float_fractional_part,
'**/2' : power, 'sin/1' : math.sin, 'cos/1' : math.cos,
'atan/1' : math.atan, 'exp/1' : math.exp, 'log/1' : logarithm,
'sqrt/1' : squareroot,
'>>/2' : rightshift, '<</2' : leftshift,
'/\\/2' : bitand, '\\//2' : bitor, '\\/1' : bitnot}
return d.get(name)
def divide(x, y):
'''Redefined w.r.t. Python because in ISO Prolog div(x, y)
with x and y integers is equivalent to intdiv(x, y). Also,
we need to manage ZeroDivisionError errors on our own.'''
if not y:
from ..core import PrologEvaluationError
raise PrologEvaluationError('zero_divisor')
if isinstance(x, int) and isinstance(y, int):
return x // y
return x / y
def intdivide(x, y):
'''Redefined w.r.t. Python because in ISO Prolog x // y
is valid only when x and y are integers. Also, we need to
manage ZeroDivisionError errors on our own.'''
if not y:
from ..core import PrologEvaluationError
raise PrologEvaluationError('zero_divisor')
if not isinstance(x, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(x))
if not isinstance(y, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(y))
return x // y
def module(x, y):
'''Redefined w.r.t. Python because in ISO Prolog mod(x, y)
is valid only when x and y are integers. Also, we need to
manage ZeroDivisionError errors on our own.'''
if not y:
from ..core import PrologEvaluationError
raise PrologEvaluationError('zero_divisor')
if not isinstance(x, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(x))
if not isinstance(y, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(y))
return x % y
def sign(x):
'''Redefined w.r.t. Python because in ISO Prolog sign(x)
must return the same type of number as its input.'''
if not x:
return 0 if isinstance(x, int) else 0.0
from math import copysign
s = copysign(1, x)
return int(s) if isinstance(x, int) else s
def float_integer_part(x):
'''Redefined w.r.t. Python because in ISO Prolog
float_integer_part(x) is valid only when x is a float.'''
if not isinstance(x, float):
from ..core import PrologTypeError
raise PrologTypeError('float', Atomic(x))
from math import modf
f, i = modf(x)
return i
def float_fractional_part(x):
'''Redefined w.r.t. Python because in ISO Prolog
float_fractional_part(x) is valid only when x is a float.'''
if not isinstance(x, float):
from ..core import PrologTypeError
raise PrologTypeError('float', Atomic(x))
from math import modf
f, i = modf(x)
return f
def power(x, y):
'''Redefined w.r.t. Python because in ISO Prolog x ** y
with x < 0 is defined only when y is an integer, and
always returns a float. Also, we need to manage
ZeroDivisionError errors on our own.'''
if x < 0 and isinstance(y, float):
from ..core import PrologEvaluationError
raise PrologEvaluationError('undefined')
if not x and y < 0:
from ..core import PrologEvaluationError
raise PrologEvaluationError('undefined')
return float(x ** y)
def logarithm(x):
'''Redefined w.r.t. Python because we need to manage
ValueError errors (e.g. for log(0)) on our own.'''
if not x:
from ..core import PrologEvaluationError
raise PrologEvaluationError('undefined')
from math import log
return log(x)
def squareroot(x):
'''Redefined w.r.t. Python because we need to manage
ValueError errors (e.g. for x < 0) on our own.'''
if x < 0:
from ..core import PrologEvaluationError
raise PrologEvaluationError('undefined')
from math import sqrt
return sqrt(x)
def rightshift(n, s):
'''Redefined w.r.t. Python because we need to manage
TypeError errors (e.g. n as float) on our own.'''
if not isinstance(n, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(n))
if not isinstance(s, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(s))
return n >> s
def leftshift(n, s):
'''Redefined w.r.t. Python because we need to manage
TypeError errors (e.g. n as float) on our own.'''
if not isinstance(n, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(n))
if not isinstance(s, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(s))
return n << s
def bitand(x, y):
'''Redefined w.r.t. Python because we need to manage
TypeError errors (e.g. x or y as float) on our own.'''
if not isinstance(x, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(n))
if not isinstance(y, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(s))
return x & y
def bitor(x, y):
'''Redefined w.r.t. Python because we need to manage
TypeError errors (e.g. x or y as float) on our own.'''
if not isinstance(x, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(n))
if not isinstance(y, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(s))
return x | y
def bitnot(x):
'''Redefined w.r.t. Python because we need to manage
TypeError errors (e.g. x or y as float) on our own.'''
if not isinstance(x, int):
from ..core import PrologTypeError
raise PrologTypeError('integer', Atomic(x))
return ~x
###
### Arithmetic comparison (ISO 8.7)
###
class ArithmeticEqual_2(BuiltIn):
"""'=:='(@evaluable, @evaluable)
'=:='(E1, E2) is true iff evaluating E1 and E2 as expressions
the corresponding arithmetic values are equal."""
def execute(self, e1, e2):
if isvariable(e1) or isvariable(e2):
self.throw_instantiation_error()
v1 = evaluate_expression(e1)
v2 = evaluate_expression(e2)
return v1 == v2
class ArithmeticNotEqual_2(BuiltIn):
"""'=\='(@evaluable, @evaluable)
'=\='(E1, E2) is true iff evaluating E1 and E2 as expressions
the corresponding arithmetic values are not equal."""
def execute(self, e1, e2):
if isvariable(e1) or isvariable(e2):
self.throw_instantiation_error()
v1 = evaluate_expression(e1)
v2 = evaluate_expression(e2)
return v1 != v2
class ArithmeticLessThan_2(BuiltIn):
"""'<'(@evaluable, @evaluable)
'<'(E1, E2) is true iff evaluating E1 and E2 as expressions
the corresponding arithmetic value of E1 is less than the
corresponding arithmetic value of E2."""
def execute(self, e1, e2):
if isvariable(e1) or isvariable(e2):
self.throw_instantiation_error()
v1 = evaluate_expression(e1)
v2 = evaluate_expression(e2)
return v1 < v2
class ArithmeticLessThanOrEqual_2(BuiltIn):
"""'=<'(@evaluable, @evaluable)
'=<'(E1, E2) is true iff evaluating E1 and E2 as expressions
the corresponding arithmetic value of E1 is less than or
equal to the corresponding arithmetic value of E2."""
def execute(self, e1, e2):
if isvariable(e1) or isvariable(e2):
self.throw_instantiation_error()
v1 = evaluate_expression(e1)
v2 = evaluate_expression(e2)
return v1 <= v2
class ArithmeticGreaterThan_2(BuiltIn):
"""'>'(@evaluable, @evaluable)
'>'(E1, E2) is true iff evaluating E1 and E2 as expressions
the corresponding arithmetic value of E1 is greater than
the corresponding arithmetic value of E2."""
def execute(self, e1, e2):
if isvariable(e1) or isvariable(e2):
self.throw_instantiation_error()
v1 = evaluate_expression(e1)
v2 = evaluate_expression(e2)
return v1 > v2
class ArithmeticGreaterThanOrEqual_2(BuiltIn):
"""'>='(@evaluable, @evaluable)
'>='(E1, E2) is true iff evaluating E1 and E2 as expressions
the corresponding arithmetic value of E1 is greater than or
equal to the corresponding arithmetic value of E2."""
def execute(self, e1, e2):
if isvariable(e1) or isvariable(e2):
self.throw_instantiation_error()
v1 = evaluate_expression(e1)
v2 = evaluate_expression(e2)
return v1 >= v2
###
### Clause retrival and information (ISO 8.8)
###
class Clause_2(BuiltIn):
'''clause(+head, ?callable_term)
clause(Head, Body) is true iff:
* the predicate of Head is public, and
* there is a clause in the database which corresponds
to a term H :- B which unifies with Head :- Body.'''
def execute(self, head, body):
if isvariable(head):
self.throw_instantiation_error()
if not iscallable(head):
self.throw_type_error('callable', head)
if not (isvariable(body) or iscallable(body)):
self.throw_type_error('callable', body)
self.clauses = []
procedure = self.kb.search(head)
if not procedure:
return False
if not procedure._public:
pi = Compound('/', Atomic(head.name), Atomic(head.arity))
self.throw_permission_error('access', 'private_procedure', pi)
from .. import core
for clause in procedure.clauses():
h, b = convert_clause_to_term(clause.head(), clause.body())
if (core.unify(h, head) is not None and
core.unify(b, body) is not None):
self.clauses.append(Compound('clause', h, b))
return self.pick_clause(head, body)
def reexecute(self, head, body):
self.reset_substitution()
return self.pick_clause(head, body)
def pick_clause(self, head, body):
if not self.clauses:
return False
c = self.clauses.pop(0)
return self.unify(Compound('clause', head, body), c)
def convert_clause_to_term(head, body):
return (convert_to_term(head), convert_to_term(body))
def convert_to_term(head):
if head.arity == 0:
return Atomic(head.name)
from ..core import renamed_copy
return renamed_copy(head)
class CurrentPredicate_1(BuiltIn):
'''current_predicate(?predicate_indicator)
current_predicate(PI) is true iff PI is a predicate indicator
for one of the user-defined procedures in the database.'''
def execute(self, pi):
if not isvariable(pi) and not (pi.name == '/' and pi.arity == 2):
self.throw_type_error('predicate_indicator', pi)
self.indicators = []
for i in self.kb:
n, a = i.split('/')
indicator = Compound('/', Atomic(n), Atomic(int(a)))
from .. import core
if core.unify(pi, indicator) is not None:
self.indicators.append(indicator)
return self.pick_indicator(pi)
def reexecute(self, pi):
self.reset_substitution()
return self.pick_indicator(pi)
def pick_indicator(self, pi):
if not self.indicators:
return False
# the order in which predicate indicators are found by
# current_predicate/1 is implementation dependent
i = self.indicators.pop()
return self.unify(pi, i)
###
### Clause creation and destruction (ISO 8.9)
###
class Asserta_1(BuiltIn):
'''asserta(@clause)
asserta(Clause) is true. It is used to add Clause to the
database before all existing clauses of the procedure whose
predicate is equal to the functor of the head of Clause.'''
def execute(self, clause):
head = clause.value[1] if clause.predicate_indicator() == ':-/2' else clause
if isvariable(head):
self.throw_instantiation_error()
if isnumber(head):
self.throw_type_error('callable', head)
# errors on the conversion of the clause body to a
# goal and on access permission to a user-defined
# procedure are handled directly by the database
from ..builtin import search_builtin
if search_builtin(head):
pi = Compound('/', Atomic(head.name), Atomic(head.arity))
self.throw_permission_error('modify', 'static_procedure', pi)
self.kb.assert_clause(clause, append=False)
return True
class Assertz_1(BuiltIn):
'''assertz(@clause)
assertz(Clause) is true. It is used to add Clause to the
database after all existing clauses of the procedure whose
predicate is equal to the functor of the head of Clause.'''
def execute(self, clause):
head = clause.value[1] if clause.predicate_indicator() == ':-/2' else clause
if isvariable(head):
self.throw_instantiation_error()
if isnumber(head):
self.throw_type_error('callable', head)
# errors on the conversion of the clause body to a
# goal and on access permission to a user-defined
# procedure are handled directly by the database
from ..builtin import search_builtin
if search_builtin(head):
pi = Compound('/', Atomic(head.name), Atomic(head.arity))
self.throw_permission_error('modify', 'static_procedure', pi)
self.kb.assert_clause(clause, append=True)
return True
class Retract_1(BuiltIn):
'''retract(+clause)
retract(Clause) is true iff the database contains at least
one dynamic procedure with a clause Clause which unifies
with Head :- Body. It is used to remove those unifying
clauses from the database.'''
def execute(self, clause):
if clause.predicate_indicator() == ':-/2':
head = clause.value[1]
body = clause.value[2]
else:
head = clause
body = Atomic.TRUE
if isvariable(head):
self.throw_instantiation_error()
if isnumber(head):
self.throw_type_error('callable', head)
# error on access permission to a user-defined
# procedure is handled directly by the database
from ..builtin import search_builtin
if search_builtin(head):
pi = Compound('/', Atomic(head.name), Atomic(head.arity))
self.throw_permission_error('modify', 'static_procedure', pi)
self.clauses_to_unify = []
self.clauses_to_remove = []
procedure = self.kb.search(head)
if not procedure:
return False
from .. import core
for clause in procedure.clauses():
h, b = convert_clause_to_term(clause.head(), clause.body())
if (core.unify(h, head) is not None and
core.unify(b, body) is not None):
self.clauses_to_unify.append(Compound('clause', h, b))
self.clauses_to_remove.append(clause)
return self.pick_clause(head, body)
def reexecute(self, clause):
self.reset_substitution()
if clause.predicate_indicator() == ':-/2':
head = clause.value[1]
body = clause.value[2]
else:
head = clause
body = Atomic.TRUE
return self.pick_clause(head, body)
def pick_clause(self, head, body):
if not self.clauses_to_remove:
return False
self.kb.retract(self.clauses_to_remove.pop(0))
c = self.clauses_to_unify.pop(0)
return self.unify(Compound('clause', head, body), c)
class Abolish_1(BuiltIn):
'''abolish(@predicate_indicator)
abolish(Pred) is true. It is used to remove from the database
the procedure specified by the predicate indicator Pred and
all its clauses, leaving the database in the same state as if
the procedure identified by Pred had never existed.'''
def execute(self, pi):
if isvariable(pi):
self.throw_instantiation_error()
if pi.name == '/' and pi.arity == 2:
name, arity = pi.value[1:]
if isvariable(name) or isvariable(arity):
self.throw_instantiation_error()
if not isinstance(arity.value, int):
self.throw_type_error('integer', arity)
if not isatom(name):
self.throw_type_error('atom', name)
# TODO Missing max_arity related errors
if arity.value < 0:
self.throw_domain_error('not_less_than_zero', arity)
# error on access permission to a user-defined
# procedure is handled directly by the database
t = tuple(Variable('_') for i in range(arity.value))
c = Compound(name.name, *t)
from ..builtin import search_builtin
if search_builtin(c):
self.throw_permission_error('modify', 'static_procedure', pi)
else:
self.throw_type_error('predicate_indicator', pi)
self.kb.abolish(pi)
return True
###
### All solutions (ISO 8.10)
###
class Findall_3(BuiltIn):
'''findall(?term, +callable_term, ?list)
findall(Template, Goal, Instances) is true iff Instances
unifies with the list of values to which a variable X not
occurring in Template or Goal would be instantiated by
successive re-executions of "call(Goal), X=Template" after
systematic replacement of all variables in X by new
variables.'''
def execute(self, template, goal, instances):
if isvariable(goal):
self.throw_instantiation_error()
if isnumber(goal):
self.throw_type_error('callable', goal)
if (not isvariable(instances) and
(not islist(instances) and not ispartiallist(instances))):
self.throw_type_error('list', instances)
from .. import core
caller = core.Caller()
caller._kb = self.kb
values = []
result = caller.solve(goal)
while result:
from copy import deepcopy
v = ground(deepcopy(template), caller.currsubst())
#values.append(core.renamed_copy(v))
values.append(v._copy_term())
result = caller.solve_next()
values = List.EMPTY_LIST if not values else List.from_list(values)
return self.unify(values, instances)
def ground(term, mgu):
if isinstance(term, Variable):
if not term.value:
value = mgu.get(term.name)
if value:
return value
else:
return ground(term.binding(), mgu)
if isinstance(term, Compound):
args = []
for arg in term.value[1:]:
args.append(ground(arg, mgu))
return Compound(term.name, *args)
return term
class Bagof_3(BuiltIn):
'''bagof(?term, +callable_term, ?list)
bagof(Template, Goal, Instances) assembles as a list the
solutions of Goal for each different instantiation of the
free variables in it. The elements of each list are in
order of solution, but the order in which each list is
found is undefined.'''
def execute(self, template, goal, instances):
fvs = free_variable_set(goal, template)
self.witness = Compound('witness', *fvs) if fvs else Atomic('witness')
g = iterated_goal_term(goal)
findall = Findall_3(self.kb)
findall.execute(Compound('+', self.witness, template), g, Variable('S'))
s = findall.substitution['S']
self.s = self._create_solution_list(s)
if not self.s:
return False
return self.pick_bag(template, goal, instances)
def reexecute(self, template, goal, instances):
self.reset_substitution()
if not self.s:
return False
return self.pick_bag(template, goal, instances)
def pick_bag(self, template, goal, instances):
wt = self.s[0]
wt_list = [e for e in self.s if isvariant(wt.value[1], e.value[1])]
t_list = [e.value[2] for e in wt_list]
s_next = [e for e in self.s if e not in wt_list]
from .. import core
for wwtt, t in zip(wt_list, t_list):
ww = wwtt.value[1]
#from copy import deepcopy
#subst = core.unify(ww, deepcopy(self.witness))
subst = core.unify(ww, self.witness)
ww.apply(subst)
t.apply(subst)
self.substitution.update(subst)
t_list = List.from_list(t_list)
self.s = s_next
return self.unify(t_list, instances)
def _create_solution_list(self, s):
return [] if s == List.EMPTY_LIST else s.as_list()
class Setof_3(Bagof_3):
'''setof(?term, +callable_term, ?list)
setof/3 assembles as a list the solutions of a goal for each different
instantiation of the free variables in that goal. Each list is a sorted
list, but the order in which each list is found is undefined.'''
def _create_solution_list(self, s):
solutions = [] if s == List.EMPTY_LIST else s.as_list()
solutions = list(set(solutions))
solutions.sort()
return solutions
###
### Logic and control (ISO 8.15)
###
class Not_1(BuiltIn):
"""not(@callable_term)
not(Term) is true iff call(Term) is false."""
# """'\\+'(@callable_term)
#
# '\\+'(Term) is true iff call(Term) is false."""
def execute(self, term):
if isvariable(term):
self.throw_instantiation_error()
if isnumber(term):
self.throw_type_error('callable', term)
from .. import core
caller = core.Caller()
caller._kb = self.kb
result = caller.solve(term)
return not result
class Repeat_0(BuiltIn):
'''repeat
repeat is true. repeat is re-executable.
'''
def execute(self):
return True
def reexecute(self):
return True
###
### Atomic term processing (ISO 8.16)
###
class AtomLength_2(BuiltIn):
'''atom_length(+atom, ?integer)
atom_length(Atom, Length) is true iff integer Length
equals the number of characters of the name of the
atom Atom.'''
def execute(self, atom, length):
if isvariable(atom):
self.throw_instantiation_error()
if not isatom(atom):
self.throw_type_error('atom', atom)
if (not isvariable(length) and
not (isnumber(length) and isinstance(length.value, int))):
self.throw_type_error('integer', length)
if isnumber(length) and length.value < 0:
self.throw_domain_error('not_less_than_zero', length)
size = Atomic(len(atom.name))
return self.unify(length, size)
class AtomConcat_3(BuiltIn):
'''atom_concat(?atom, ?atom, +atom)\natom_concat(+atom, +atom, -atom)
atom_concat(Atom_1, Atom_2, Atom_12) is true iff characters
of the name of the atom Atom_12 are the result of concatenating
the characters of the name of the atom Atom_2 to the characters
of the name of the atom Atom_1.'''
def execute(self, atom1, atom2, atom12):
if isvariable(atom1) and isvariable(atom12):
self.throw_instantiation_error()
if isvariable(atom2) and isvariable(atom12):
self.throw_instantiation_error()
if not isvariable(atom1) and not isatom(atom1):
self.throw_type_error('atom', atom1)
if not isvariable(atom2) and not isatom(atom2):
self.throw_type_error('atom', atom2)
if not isvariable(atom12) and not isatom(atom12):
self.throw_type_error('atom', atom12)
if isvariable(atom1) and isvariable(atom2):
s = atom12.name
self.data = [(s[:i], s[i:], s) for i in range(len(s) + 1)]
elif isvariable(atom1):
s = atom12.name
if not s.endswith(atom2.name):
return False
else:
i = s.index(atom2.name)
self.data = [(s[:i], s[i:], s)]
elif isvariable(atom2):
s = atom12.name
if not s.startswith(atom1.name):
return False
else:
i = len(atom1.name)
self.data = [(s[:i], s[i:], s)]
else:
n1 = atom1.name
n2 = atom2.name
self.data = [(n1, n2, n1 + n2)]
return self.pick_data(atom1, atom2, atom12)
def reexecute(self, atom1, atom2, atom12):
self.reset_substitution()
if not self.data:
return False
return self.pick_data(atom1, atom2, atom12)
def pick_data(self, atom1, atom2, atom12):
c = self.data.pop(0)
return (self.unify(atom1, Atomic(c[0])) and
self.unify(atom2, Atomic(c[1])) and
self.unify(atom12, Atomic(c[2])))
class SubAtom_5(BuiltIn):
'''sub_atom(+atom, ?integer, ?integer, ?integer, ?atom)
sub_atom(Atom, Before, Length, After, Sub_atom) is true iff atom Atom can
be broken into three pieces, AtomL, Sub_atom, and AtomR, such that Before
is the number of characters of the name of AtomL, Length is the number of
characters of the name of Sub_atom, and After is the number of characters
of the name of AtomR.'''
def execute(self, atom, before, length, after, subatom):
if isvariable(atom):
self.throw_instantiation_error()
if not isvariable(atom) and not isatom(atom):
self.throw_type_error('atom', atom)
if not isvariable(subatom) and not isatom(subatom):
self.throw_type_error('atom', subatom)
if (not isvariable(before) and
not (isnumber(before) and isinstance(before.value, int))):
self.throw_type_error('integer', before)
if (not isvariable(length) and
not (isnumber(length) and isinstance(length.value, int))):
self.throw_type_error('integer', length)
if (not isvariable(after) and
not (isnumber(after) and isinstance(after.value, int))):
self.throw_type_error('integer', after)
if isnumber(before) and before.value < 0:
self.throw_domain_error('not_less_than_zero', before)
if isnumber(length) and length.value < 0:
self.throw_domain_error('not_less_than_zero', length)
if isnumber(after) and after.value < 0:
self.throw_domain_error('not_less_than_zero', after)
n = atom.name
start = before.value if isinstance(before, Atomic) else 0
end = len(n) - (after.value if isinstance(after, Atomic) else 0)
self.data = []
while start <= end:
for i in range(start, end + 1):
self.data.append((n[start:i], start))
start += 1
if isinstance(before, Atomic):
self.data = [(d, p) for (d, p) in self.data if n.index(d, p) == before.value]
if isinstance(length, Atomic):
self.data = [(d, p) for (d, p) in self.data if len(d) == length.value]
if isinstance(after, Atomic):
self.data = [(d, p) for (d, p) in self.data if len(n) - n.index(d, p) - len(d) == after.value]
if isinstance(subatom, Atomic):
self.data = [(d, p) for (d, p) in self.data if d == subatom.value]
if not self.data:
return False
return self.pick_data(atom, before, length, after, subatom)
def reexecute(self, atom, before, length, after, subatom):
self.reset_substitution()
if not self.data:
return False
return self.pick_data(atom, before, length, after, subatom)
def pick_data(self, atom, before, length, after, subatom):
s, p = self.data.pop(0)
b = atom.name.index(s, p)
l = len(s)
a = len(atom.name) - (b + l)
return (self.unify(before, Atomic(b)) and
self.unify(length, Atomic(l)) and
self.unify(after, Atomic(a)) and
self.unify(subatom, Atomic(s)))
class AtomChars_2(BuiltIn):
'''atom_chars(+atom, ?character_list)\natom_chars(-atom, +character_list)
atom_chars(Atom, List) is true iff List is a list whose elements
are the one-char atoms whose names are the successive characters
of the name of atom Atom.'''
def execute(self, atom, charlist):
if not isvariable(atom) and not isatom(atom):
self.throw_type_error('atom', atom)
if isvariable(atom):
if isvariable(charlist):
self.throw_instantiation_error()
if not islist(charlist) and not ispartiallist(charlist):
self.throw_type_error('list', charlist)
for element in charlist.as_list():
if isvariable(element):
self.throw_instantiation_error()
if isatom(element) and len(element.name) != 1:
self.throw_type_error('character', element)
if isvariable(atom):
from ..core import deref
chars = [deref(c).name for c in charlist.as_list()]
return self.unify(atom, Atomic(''.join(chars)))
elif isvariable(charlist) or islist(charlist) or ispartiallist(charlist):
chars = [Atomic(c) for c in atom.name]
return self.unify(charlist, List.from_list(chars))
else:
chars = [c.name for c in charlist.as_list()]
return atom.name == ''.join(chars)
class AtomCodes_2(BuiltIn):
'''atom_codes(+atom, ?character_code_list)\natom_codes(-atom, +character_code_list)
atom_codes(Atom, List) is true iff List is a list whose elements
correspond to the successive characters of the name of atom Atom,
and the value of each element is the character code for the
corresponding character of the name.'''
def execute(self, atom, codelist):
if not isvariable(atom) and not isatom(atom):
self.throw_type_error('atom', atom)
if isvariable(atom):
if ispartiallist(codelist):
self.throw_instantiation_error()
if not islist(codelist) and not ispartiallist(codelist):
self.throw_type_error('list', codelist)
for element in codelist.as_list():
if isvariable(element):
self.throw_instantiation_error()
if not isvariable(element):
try:
chr(element.value)
except UnicodeDecodeError:
self.throw_representation_error(element)
if isvariable(atom):
chars = [chr(code.value) for code in codelist.as_list()]
return self.unify(atom, Atomic(''.join(chars)))
elif isvariable(codelist) or ispartiallist(codelist):
codes = [Atomic(ord(char)) for char in atom.name]
return self.unify(codelist, List.from_list(codes))
else:
chars = [chr(code.value) for code in codelist.as_list()]
return atom.name == ''.join(chars)
class CharCode_2(BuiltIn):
'''char_code(+character, ?character_code)\nchar_code(-character, +character_code)
char_code(Char, Code) is true iff the character code for the one-char atom
Char is Code.'''
def execute(self, char, code):
if isvariable(char) and isvariable(code):
self.throw_instantiation_error()
if not isvariable(char) and len(char.name) != 1:
self.throw_type_error('character', char)
if not isvariable(code) and not isinstance(code.value, int):
self.throw_type_error('integer', code)
if not isvariable(code):
try:
chr(code.value)
except UnicodeDecodeError:
self.throw_representation_error(code)
if isvariable(char):
c = chr(code.value)
return self.unify(char, Atomic(c))
elif isvariable(code):
c = ord(char.name)
return self.unify(code, Atomic(c))
else:
return ord(char.name) == code.value
class NumberChars_2(BuiltIn):
'''number_chars(+number, ?character_list)\nnumber_chars(-number, +character_list)
number_chars(Number, List) is true iff List is a list whose elements are
the one-char atoms corresponding to a character sequence of Number which
could be output.'''
def execute(self, number, charlist):
if isvariable(number) and ispartiallist(charlist):
self.throw_instantiation_error()
if isvariable(number):
for element in charlist.as_list():
if isvariable(element):
self.throw_instantiation_error()
if not isvariable(number) and not isnumber(number):
self.throw_type_error('number', number)
if isvariable(number) and not islist(charlist) and not ispartiallist(charlist):
self.throw_type_error('list', charlist)
if islist(charlist):
for element in charlist.as_list():
if isatom(element) and len(element.name) != 1:
self.throw_type_error('character', element)
if isvariable(number) or islist(charlist):
from ..parser import PrologParser, InvalidTermException
s = ''.join([char.name for char in charlist.as_list()])
try:
# the parser needs an End Token
n = PrologParser(s + '.').read_term()
return self.unify(number, n)
except InvalidTermException as e:
self.throw_syntax_error(Atomic(s))
else:
chars = list(str(number.value)) # FIXME this should use write_canonical/1
lst = [Atomic(c) for c in chars]
return self.unify(charlist, List.from_list(lst))
class NumberCodes_2(BuiltIn):
'''number_codes(+number, ?character_code_list)\nnumber_codes(-number, ?character_code_list)
number_codes(Number, List) is true iff List is a list whose elements are
the character codes corresponding to a character sequence of Number which
could be output.'''
def execute(self, number, codelist):
if isvariable(number) and ispartiallist(codelist):
self.throw_instantiation_error()
if isvariable(number):
for element in codelist.as_list():
if isvariable(element):
self.throw_instantiation_error()
if not isvariable(number) and not isnumber(number):
self.throw_type_error('number', number)
if isvariable(number) and not islist(codelist) and not ispartiallist(codelist):
self.throw_type_error('list', codelist)
if islist(codelist):
for element in codelist.as_list():
if not isvariable(element):
try:
chr(element.value)
except UnicodeDecodeError:
self.throw_representation_error(element)
if isvariable(number) or islist(codelist):
from ..parser import PrologParser, InvalidTermException
s = ''.join([chr(code.value) for code in codelist.as_list()])
try:
# the parser needs an End Token
n = PrologParser(s + '.').read_term()
return self.unify(number, n)
except InvalidTermException as e:
self.throw_syntax_error(Atomic(s))
else:
chars = list(str(number.value)) # FIXME this should use write_canonical/1
lst = [Atomic(ord(c)) for c in chars]
return self.unify(codelist, List.from_list(lst))
###
### Implementation defined hooks (ISO 8.17)
###
class SetPrologFlag_2(BuiltIn):
'''set_prolog_flag(+flag, @nonvar)
A goal set_prolog_flag(Flag, Value) enables the value associated with a
Prolog flag to be altered.'''
def execute(self, flag, value):
if isvariable(flag) or isvariable(value):
self.throw_instantiation_error()
if not isvariable(flag) and not isatom(flag):
self.throw_type_error('atom', flag)
from .. import core # for flags
if flag.name not in core._FLAGS:
self.throw_domain_error('prolog_flag', flag)
f = core._FLAGS[flag.name]
if len(f.allowed) == 1:
self.throw_permission_error('modify', 'flag', flag)
if value.name not in f.allowed:
culprit = Compound('+', flag, value)
self.throw_domain_error('flag_value', culprit)
core._FLAGS[flag.name] = f._replace(value=value.name)
return True
class CurrentPrologFlag_2(BuiltIn):
'''current_prolog_flag(?flag, ?term)
current_prolog_flag(Flag, Value) is true iff Flag is a flag supported by
the processor, and Value is the value currently associated with it.'''
def execute(self, flag, value):
from .. import core # for flags
if not isvariable(flag) and not isatom(flag):
self.throw_type_error('atom', flag)
if isatom(flag) and not core._FLAGS[flag.name]:
self.throw_domain_error('prolog_flag', flag)
self.flags = {f for f in core._FLAGS.values() if core.unify(flag, Atomic(f.name)) is not None}
if not self.flags:
return False
return self.pick_flag(flag, value)
def reexecute(self, flag, value):
self.reset_substitution()
if not self.flags:
return False
return self.pick_flag(flag, value)
def pick_flag(self, flag, value):
f = self.flags.pop()
return self.unify(flag, Atomic(f.name)) and self.unify(value, Atomic(f.value))
class Halt_0(BuiltIn):
'''halt
halt neither succeeds nor fails. It has the side effect of exiting from the
processor and returning to whatever system invoked Prolog.'''
def execute(self):
exit(0)
class Halt_1(BuiltIn):
'''halt(+integer)
halt(X) neither succeeds nor fails. It has the side effect of exiting from
the processor and returning to whatever system invoked Prolog, passing the
value of X as a message.'''
def execute(self, x):
if isvariable(x):
self.throw_instantiation_error()
if not isvariable(x) and not isnumber(x) and not isinstance(x.value, int):
self.throw_type_error('integer', x)
exit(x.value)
# Utility functions
def free_variable_set(t, v):
'''The free variable set FV of a term T with respect to
a term V is a set of variables defined as the set
difference of the variable set of T and BV where BV is
a set of variables defined as the union of the variable
set of V and the existential variable set of T.'''
vst = variable_set(t)
vsv = variable_set(v)
est = existential_variable_set(t)
return vst.difference(vsv.union(est))
# TODO This should be distributed onto the Term hierarchy classes
def variable_set(term):
'''The variable set Sv of a term T is a set of variables
defined recursively as follows:
* if T is an atomic term, then Sv is the empty set
* else if T is a variable then Sv is {T}
* else if T is a compound term then Sv is the union of
the variable sets for each of the arguments of T.'''
from ..core import deref
if isinstance(term, Variable):
if term.isfree():
return {term}
else:
term = deref(term)
if isinstance(term, Atomic):
return set()
s = set()
if isinstance(term, Compound):
for arg in term.value[1:]:
s.update(variable_set(arg))
else: # a list
for e in term.as_list():
s.update(variable_set(e))
return s
def existential_variable_set(term):
'''The existential variables set EV of a term T is a set
of variables defined recursively as follows:
* if T is a variable or an atomic term, then EV is the
empty set
* else if T unifies with ^(V, G) then EV is the union
of the variable set of V and the existential variables
set of the term G
* else EV is the empty set.'''
s = set()
if isinstance(term, Atomic) or isvariable(term):
return s
if term.name == '^' and term.arity == 2:
s.update(variable_set(term.value[1]))
s.update(existential_variable_set(term.value[2]))
return s
return s
def iterated_goal_term(term):
'''The iterated goal term G of a term T is a term defined
recursively as follows:
* if T unifies with ^(_, Goal) then G is the iterated
goal term of Goal
* else G is T.'''
if term.name == '^' and term.arity == 2:
return iterated_goal_term(term.value[2])
return term
def isvariant(t, v):
'''Two terms are variants if there is a bijection s of
the variables of the former to the variables of the
latter such that the latter term results from replacing
each variable X in the former by Xs.'''
from ..core import deref
t = deref(t)
v = deref(v)
if isinstance(t, Atomic) and isinstance(v, Atomic):
return t == v
if isvariable(t) and isvariable(v):
return True
if isinstance(t, Compound) and isinstance(v, Compound):
if t.name != v.name or t.arity != v.arity:
return False
bijection = {}
for a1, a2 in zip(t.value[1:], v.value[1:]):
if isvariable(a1) and isvariable(a2) and not a1.name.startswith('_'):
a = bijection.get(a1)
if a is not None and a2 != a:
return False
else:
bijection[a1] = a2
else:
if not isvariant(a1, a2):
return False
return True
return False
PREDICATES = {
# Term unification (ISO 8.2)
'=/2' : Unify_2,
'\=/2' : NotUnifiable_2,
# Type testing (ISO 8.3)
'var/1' : Var_1,
'atom/1' : Atom_1,
'integer/1' : Integer_1,
'float/1' : Float_1,
'atomic/1' : Atomic_1,
'compound/1' : Compound_1,
'nonvar/1' : Nonvar_1,
'number/1' : Number_1,
# Term comparison (ISO 8.4)
'@=</2' : TermLessThanOrEqual_2,
'==/2' : TermIdentical_2,
'\==/2' : TermNotIdentical_2,
'@</2' : TermLessThan_2,
'@>/2' : TermGreaterThan_2,
'@>=/2' : TermGreaterThanOrEqual_2,
# Term creation and decomposition (ISO 8.5)
'functor/3' : Functor_3,
'arg/3' : Arg_3,
'=../2' : Univ_2,
'copy_term/2' : CopyTerm_2,
# Arithmetic evaluation (ISO 8.6)
'is/2' : Is_2,
# Arithmetic comparison (ISO 8.7)
'=:=/2' : ArithmeticEqual_2,
'=\=/2' : ArithmeticNotEqual_2,
'</2' : ArithmeticLessThan_2,
'=</2' : ArithmeticLessThanOrEqual_2,
'>/2' : ArithmeticGreaterThan_2,
'>=/2' : ArithmeticGreaterThanOrEqual_2,
# Clause retrival and information (ISO 8.8)
'clause/2' : Clause_2,
'current_predicate/1' : CurrentPredicate_1,
# Clause creation and destruction (ISO 8.9)
'asserta/1' : Asserta_1,
'assertz/1' : Assertz_1,
'retract/1' : Retract_1,
'abolish/1' : Abolish_1,
# All solutions (ISO 8.10)
'findall/3' : Findall_3,
'bagof/3' : Bagof_3,
'setof/3' : Setof_3,
# Logic and control (ISO 8.15)
# FIXME \+ does not work because of what is probably a parser
# bug: the operator's "scope" is much wider than the single
# goal, even when using parentheses!
'\+/1' : Not_1, 'not/1' : Not_1,
'repeat/0' : Repeat_0,
# Atomic term processing (ISO 8.16)
'atom_length/2' : AtomLength_2,
'atom_concat/3' : AtomConcat_3,
'sub_atom/5' : SubAtom_5,
'atom_chars/2' : AtomChars_2,
'atom_codes/2' : AtomCodes_2,
'char_code/2' : CharCode_2,
'number_chars/2' : NumberChars_2,
'number_codes/2' : NumberCodes_2,
# Implementation defined hooks (ISO 8.17)
'set_prolog_flag/2' : SetPrologFlag_2,
'current_prolog_flag/2' : CurrentPrologFlag_2,
'halt/0' : Halt_0,
'halt/1' : Halt_1
}
| 2.53125 | 3 |
ros/dataset_to_rosbag.py | sn0wflake/gta | 4,498 | 12791549 | #!/usr/bin/env python
from itertools import izip
import numpy as np
import h5py
from progress.bar import Bar
import sys
import rospy
import rosbag
from sensor_msgs.msg import Imu, Image
def main():
if len(sys.argv) < 2:
print("Usage: {} dataset_name".format(sys.argv[0]))
exit(1)
file_name = sys.argv[1]
log_file = h5py.File('../dataset/log/{}.h5'.format(file_name))
camera_file = h5py.File('../dataset/camera/{}.h5'.format(file_name))
zipped_log = izip(
log_file['times'],
log_file['fiber_accel'],
log_file['fiber_gyro'])
with rosbag.Bag('{}.bag'.format(file_name), 'w') as bag:
bar = Bar('Camera', max=len(camera_file['X']))
for i, img_data in enumerate(camera_file['X']):
m_img = Image()
m_img.header.stamp = rospy.Time.from_sec(0.01 * i)
m_img.height = img_data.shape[1]
m_img.width = img_data.shape[2]
m_img.step = 3 * img_data.shape[2]
m_img.encoding = 'rgb8'
m_img.data = np.transpose(img_data, (1, 2, 0)).flatten().tolist()
bag.write('/camera/image_raw', m_img, m_img.header.stamp)
bar.next()
bar.finish()
bar = Bar('IMU', max=len(log_file['times']))
for time, v_accel, v_gyro in zipped_log:
m_imu = Imu()
m_imu.header.stamp = rospy.Time.from_sec(time)
[setattr(m_imu.linear_acceleration, c, v_accel[i]) for i, c in enumerate('xyz')]
[setattr(m_imu.angular_velocity, c, v_gyro[i]) for i, c in enumerate('xyz')]
bag.write('/fiber_imu', m_imu, m_imu.header.stamp)
bar.next()
bar.finish()
if __name__ == "__main__":
main()
| 2.234375 | 2 |
pokemon_entities/models.py | A1exander-Pro/pokemon_go_lesson | 0 | 12791550 | from django.db import models
class Pokemon(models.Model):
title = models.CharField(max_length=200, verbose_name="Русское название")
title_en = models.CharField(max_length=200, verbose_name="Английское название", blank=True)
title_jp = models.CharField(max_length=200, verbose_name="Японское название", blank=True)
image = models.ImageField(verbose_name="Картинка покемона", null=True, blank=True)
description = models.TextField(verbose_name="Описание", blank=True)
previous_evolution = models.ForeignKey("self", verbose_name="Из кого эволюционирует",
on_delete=models.SET_NULL, null=True, blank=True,
related_name="evolution")
def __str__(self):
return self.title
class PokemonEntity(models.Model):
pokemon = models.ForeignKey(Pokemon, on_delete=models.CASCADE, related_name="entities")
lat = models.FloatField(verbose_name="Lat")
lon = models.FloatField(verbose_name="Lon")
appeared_at = models.DateTimeField(null=True, verbose_name="Appeared at", blank=True)
disappeared_at = models.DateTimeField(null=True, verbose_name="Disappeared at", blank=True)
level = models.IntegerField(null=True, verbose_name="Level", blank=True)
health = models.IntegerField(null=True, verbose_name="Health", blank=True)
strength = models.IntegerField(null=True, verbose_name="Strength", blank=True)
defence = models.IntegerField(null=True, verbose_name="Defence", blank=True)
stamina = models.IntegerField(null=True, verbose_name="Stamina", blank=True)
def __str__(self):
return f'{self.pokemon}, lvl: {self.level}'
| 2.078125 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.