seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
42117272334
|
from django.conf.urls import url
from django.contrib import admin
from users import views as usersViews
from bookmark import views as bookmarkViews
urlpatterns = [
url(r'^login', usersViews.login),
url(r'^logout', usersViews.logout),
url(r'^register', usersViews.register),
url(r'^bookmark/$', bookmarkViews.index),
url(r'^bookmark/form/(?P<id>[0-9]+)/$', bookmarkViews.edit),
url(r'^bookmark/form/$', bookmarkViews.new),
url(r'^bookmark/delete/(?P<id>[0-9]+)/$', bookmarkViews.delete),
url(r'^users/', usersViews.list),
url(r'^$', usersViews.home),
]
|
jlneto15/bookmark
|
web/app/urls.py
|
urls.py
|
py
| 589 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4558179615
|
import subprocess
from sanic import Sanic, response
# import os
app = Sanic(__name__)
app.ctx.restarting = False
@app.route("/")
async def test(_):
return response.html(open("index.html", encoding='utf-8').read())
def is_github_request(request):
# check if the request is from github, with the api key, the curl command is at the bottom of the file
return request.headers.get("Authorization") == "Bearer " + "ABC123"
@app.route("/restart", methods=["POST"])
def webhook(request):
# github actions posts to this endpoint, its @ the bottom of the file
# check if the request is from github, with the api key, the curl command is at the bottom of the file
if not is_github_request(request):
return response.text("Not Authorized", status=401)
subprocess.call(["git", "pull"])
return response.text("Restarting")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000, auto_reload=True)
# whats command to create requirements.txt
|
sooswastaken/continuous-integration
|
server.py
|
server.py
|
py
| 994 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22189104094
|
import numpy as np
import cv2 as cv
capture = cv.VideoCapture(0)
lastNorm = 0.0
lastCounter = 0
counter = 0
currentState = 0
onList = []
offList = []
onDuration = 0
offDuration = 0
if not capture.isOpened():
print("Cannot open camera")
exit()
while True:
# Capture frame-by-frame
ret, frame = capture.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
hsv = cv.cvtColor(frame,cv.COLOR_BGR2HSV)
currentNorm = np.linalg.norm(frame)
diffNorm = currentNorm - lastNorm
#print("currentNorm " ,currentNorm)
#print("diffNorm " ,diffNorm)
if (diffNorm > 20000 and currentState == 0):
currentState = 1;
print( "on - was off for " , (counter - lastCounter ), " frames and " , ((counter - lastCounter)/30) ," seconds" )
offDuration = (counter - lastCounter)/30
offList.append(offDuration)
#for v in offList:
# print(v + " ")
lastCounter = counter
if (diffNorm < -20000 and currentState == 1):
currentState = 0
print("off - was on for " ,counter - lastCounter , " frames and " , (counter - lastCounter)/30 , " seconds" )
onDuration = (counter - lastCounter)/30
onList.append(onDuration)
#for v in onList:
# print( v + " " )
lastCounter = counter
lastNorm = currentNorm
counter += 1
# Display the resulting frame
cv.imshow('frame', gray)
if cv.waitKey(1) == ord('q'):
break
# When everything done, release the capture
capture.release()
cv.destroyAllWindows()
|
musaceylan/handy_codes
|
on_off_detector.py
|
on_off_detector.py
|
py
| 1,577 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3863123970
|
import socket
import os
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(("scrap",55554))
pid=os.fork()
if pid > 0:
while True:
data=s.recv(4096)
print(data.decode("utf-8"),end="")
else:
while True:
cmd=input()+"\n"
s.send(cmd.encode("utf-8"))
|
fusillator/babysteps64
|
socket_test.py
|
socket_test.py
|
py
| 352 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22713126804
|
import os
import sys
from os import listdir
from PIL import Image
import mysql.connector as database
#Environment variables / default values
DB_HOST = os.getenv('DB_HOST','localhost')
DB_USER = os.getenv('DB_USER','root')
DB_PASSWORT = os.getenv('DB_PASSWORT','secret')
FILE_PATH = os.getenv('FILE_PATH','files')
patharray = [FILE_PATH+"/folder1",FILE_PATH+"/folder2",FILE_PATH+"/folder3",FILE_PATH+"/folder4"]
# Connect to MariaDB Platform
try:
connection = database.connect(
user=DB_USER,
password=DB_PASSWORT,
host=DB_HOST,
database="testdatabase"
)
except database.Error as e:
print("Error connecting to MariaDB Platform: {e}")
sys.exit(1)
def Is_id_checked(id):
print("Check flag for Upload " + id)
statement = "SELECT uploadid, filechecked FROM uploads WHERE uploadid=" + id
cursor = connection.cursor()
cursor.execute(statement)
for (uploadid) in cursor:
return uploadid[1]
def Set_id_checked(id):
print("Upload " + id + " is checked. Update flag")
statement = "UPDATE uploads SET filechecked = 1 WHERE uploadid = " + id
cursor = connection.cursor()
cursor.execute(statement)
connection.commit();
def check_all():
for path in patharray:
for filename in listdir(path):
if(filename=="corrupt"):
break
if(Is_id_checked(filename[:-4]) == 0):
imagepath = path + '/' + filename
if filename.endswith('.gif') or filename.endswith('.png') or filename.endswith('.jpg'):
try:
img = Image.open(imagepath)
img.verify()
Set_id_checked(filename[:-4])
except (IOError, SyntaxError) as e:
print('Bad file:', imagepath)
#os.remove(imagepath)
os.replace(path + '/' + filename, path + '/' + "corrupt/" + filename)
if filename.endswith('.mp4'):
result = os.system("ffmpeg -v error -i " + imagepath + ' -f null ' + './' + filename + ' >/dev/null 2>&1')
if result != 0:
print('Bad file:', imagepath)
##os.remove(imagepath)
os.replace(path + '/' + filename, path + '/' + "corrupt/" + filename)
else:
Set_id_checked(filename[:-4])
check_all()
|
AnSieger/find_corrupt_media_files
|
checkimages.py
|
checkimages.py
|
py
| 2,239 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19327457883
|
a='india is my country'
a=a+' '
count=0
maximum=0
word=''
for i in range(len(a)):
if a[i]==' ':
if count>maximum:
maximum=count
longestword=word
count=0
word=''
else:
count=count+1
word=word+a[i]
print(maximum,longestword)
|
NARIKODANHRIDUL/python-sample-codes
|
lab 11/lab 11 5.py
|
lab 11 5.py
|
py
| 313 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7924878639
|
from tools import adaptive, parse
import numpy as np
from imutils import resize
import matplotlib
import matplotlib.pyplot as plt
import cv2 as cv
import argparse
matplotlib.use('TKAgg')
PATH = "images/adaptive/{}"
function_map = {
'mean': adaptive.threshold_mean,
'median': adaptive.threshold_median
}
parser = argparse.ArgumentParser()
parser.add_argument('--filepath', '-f',
type=str, help=parse.HELP_FILEPATH)
parser.add_argument('--block_size', '-w', default=(3, 3),
type=parse.tuple_type, help=parse.HELP_WINDOW)
parser.add_argument('--function', '-fc', default='mean',
type=str, help=parse.HELP_FUNCTION, choices=function_map.keys())
parser.add_argument('--resize_width', '-rsz', default=400,
type=int, help=parse.HELP_RSZ)
parser.add_argument('--save_filename', '-svf', default=None,
type=str, help=parse.HELP_SAVE)
matplotlib.use('TKAgg')
if __name__ == '__main__':
args = parser.parse_args()
image = cv.imread(args.filepath, 0)
image = resize(image, args.resize_width)
adaptive_thresh = adaptive.adaptive_threshold(image, args.block_size, function_map[args.function])
filename = args.save_filename
if not args.save_filename:
filename = "{}_adaptive_block_{}_{}x{}.png"
filename = filename.format(
args.filepath.split("/")[-1].split(".")[0], args.function,
args.block_size[0], args.block_size[1]
)
cv.imwrite(PATH.format(filename), adaptive_thresh)
fig, ax = plt.subplots(1, 2)
ax[0].imshow(image, cmap='gray')
ax[1].imshow(adaptive_thresh, cmap='gray')
plt.show()
|
YvesAugusto/project_vc
|
adaptive.py
|
adaptive.py
|
py
| 1,682 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5125390370
|
"""
Demonstration of the GazeTracking library.
Check the README.md for complete documentation.
"""
import time
import threading
import cv2
import numpy as np
from gaze_tracking import GazeTracking
import sys
from PyQt5 import QtCore
from PyQt5.QtCore import QCoreApplication
from PyQt5.QtGui import QImage, QPixmap, QCloseEvent
from PyQt5.QtWidgets import QApplication, QStackedWidget
from GUImain.GUIframe import MyApp
import keyboard
import keyboard_event as event
import database_func as db
end_sig = False
img_num = 0
esc = False
webcam = cv2.VideoCapture(0)
R_top = 0
L_top = 0
C_top = 0
R_bottom = 0
L_bottom = 0
C_bottom = 0
avg_top_right = 0
avg_top_left = 0
avg_bottom_right = 0
avg_bottom_left = 0
avg_top_center = 0
avg_bottom_center = 0
total_left_hor_gaze = 0
total_right_hor_gaze = 0
total_top_ver_gaze = 0
total_bottom_ver_gaze = 0
sectionA =0
sectionB =0
sectionC =0
sectionD =0
sectionE =0
sectionF =0
section = "None"
count = 1
test_count = 1
flag = 0
gaze = GazeTracking()
#GUI
app = QApplication(sys.argv)
gui = MyApp()
gui.Stack.setCurrentWidget(gui.stack1)
gui.currentStack = 1
gui.name_btn.clicked.connect(gui.change_display)
def Section(where):
global sectionA, sectionB, sectionC, sectionD, sectionE, sectionF
if where == "A":
sectionA += 1
return sectionA
elif where == "B":
sectionB += 1
return sectionB
elif where == "C":
sectionC += 1
return sectionC
elif where == "D":
sectionD += 1
return sectionD
elif where == "E":
sectionE += 1
return sectionE
elif where == "F":
sectionF += 1
return sectionF
def Thread_run():
print(section, ":", Section(section))
thread = threading.Timer(1, Thread_run)
thread.daemon = True
thread.start()
return thread
thread = Thread_run()
while True:
#GUI
if gui.quit_sig:
sys.exit()
if bool(gui.start_btn.isChecked()):
# We get a new frame from the webcam
_, frame = webcam.read()
new_frame = np.zeros((500, 500, 3), np.uint8)
gaze.refresh(frame)
frame, loc1, loc2 = gaze.annotated_frame()
text = ""
'''
#draw face guide line
red_color = (0, 0, 255)
guide_x1 = 150
guide_y1 = 100
guide_w = 300
guide_h = 300
face_line = cv2.rectangle(frame, (guide_x1, guide_y1), (guide_x1 + guide_w, guide_y1 + guide_h), red_color, 3)
'''
#GUI
#if bool(gui.start_btn.isChecked()):
if test_count < 50:
cv2.circle(frame, (25, 25), 25, (0, 0, 255), -1)
if gaze.horizontal_ratio() != None and gaze.vertical_ratio() != None:
total_left_hor_gaze += gaze.horizontal_ratio()
total_top_ver_gaze += gaze.vertical_ratio()
test_count += 1
print("hor ratio1:", gaze.horizontal_ratio())
print("ver ratio1:", gaze.vertical_ratio())
elif 50 <= test_count < 100:
cv2.circle(frame, (610, 25), 25, (0, 0, 255), -1)
if gaze.horizontal_ratio() != None and gaze.vertical_ratio() != None:
total_right_hor_gaze += gaze.horizontal_ratio()
total_top_ver_gaze += gaze.vertical_ratio()
test_count += 1
print("hor ratio2:", gaze.horizontal_ratio())
print("ver ratio2:", gaze.vertical_ratio())
elif 100 <= test_count < 150:
cv2.circle(frame, (25, 450), 25, (0, 0, 255), -1)
if gaze.horizontal_ratio() != None and gaze.vertical_ratio() != None:
total_left_hor_gaze += gaze.horizontal_ratio()
total_bottom_ver_gaze += gaze.vertical_ratio()
test_count += 1
print("hor ratio3:", gaze.horizontal_ratio())
print("ver ratio3:", gaze.vertical_ratio())
elif 150 <= test_count < 200:
cv2.circle(frame, (610, 450), 25, (0, 0, 255), -1)
if gaze.horizontal_ratio() != None and gaze.vertical_ratio() != None:
total_right_hor_gaze += gaze.horizontal_ratio()
total_bottom_ver_gaze += gaze.vertical_ratio()
test_count += 1
print("hor ratio4:", gaze.horizontal_ratio())
print("ver ratio4:", gaze.vertical_ratio())
gaze_time = int(time.time())
save_loc1 = loc1
save_loc2 = loc2
else:
if flag == 0:
avg_left_hor_gaze = total_left_hor_gaze / 100
avg_right_hor_gaze = total_right_hor_gaze / 100
avg_top_ver_gaze = total_top_ver_gaze / 100
avg_bottom_ver_gaze = total_bottom_ver_gaze / 100
print(avg_left_hor_gaze, avg_right_hor_gaze, avg_top_ver_gaze, avg_bottom_ver_gaze)
flag = 1
if gaze.is_blinking():
text = "Blinking"
if gaze.is_top_right(avg_right_hor_gaze, avg_top_ver_gaze):
new_frame[:] = (0, 200, 227)
text = "Looking top right"
section = "A"
elif gaze.is_top_left(avg_left_hor_gaze, avg_top_ver_gaze):
new_frame[:] = (0, 0, 255)
text = "Looking top left"
section = "B"
elif gaze.is_bottom_right(avg_right_hor_gaze, avg_top_ver_gaze):
new_frame[:] = (255, 0, 170)
text = "Looking bottom right"
section = "C"
elif gaze.is_bottom_left(avg_left_hor_gaze, avg_top_ver_gaze):
new_frame[:] = (0, 255, 0)
text = "Looking bottom left"
section = "D"
elif gaze.is_top_center(avg_top_ver_gaze, avg_right_hor_gaze, avg_left_hor_gaze):
new_frame[:] = (0, 104, 250)
text = "Looking top center"
section = "E"
elif gaze.is_bottom_center(avg_top_ver_gaze, avg_right_hor_gaze, avg_left_hor_gaze):
new_frame[:] = (255, 0, 0)
text = "Looking bottom center"
section = "F"
cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
left_pupil = gaze.pupil_left_coords()
right_pupil = gaze.pupil_right_coords()
cv2.putText(frame, "Left pupil: " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.rectangle(frame, save_loc1, save_loc2, (0, 0, 255), 2)
if test_count < 200:
cv2.namedWindow("Frame", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Frame", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
#cv2.imshow("New Frame", new_frame)
cv2.imshow("Frame", frame)
else:
cv2.destroyAllWindows()
#database
if keyboard.is_pressed('down') or keyboard.is_pressed('up'):
gaze_time = int(time.time()) - gaze_time
img_num = img_num + 1
gaze_info = [gui.name, img_num, sectionA, sectionB, sectionC, sectionD, sectionE, sectionF, gaze_time]
end_sig = event.tracking_con(gaze_info)
sectionA = 0
sectionB = 0
sectionC = 0
sectionD = 0
sectionE = 0
sectionF = 0
gaze_time = time.time()
elif keyboard.is_pressed('esc'):
print('esc press')
gaze_time = int(time.time()) - gaze_time
img_num = img_num + 1
gaze_info = [gui.name, img_num, sectionA, sectionB, sectionC, sectionD, sectionE, sectionF, gaze_time]
esc = event.tracking_end(gaze_info)
# GUI
gui.start = True
qformat = QImage.Format_Indexed8
if len(frame.shape) == 3:
if frame.shape[2] == 4: # RGBA
qformat = QImage.Format_RGBA8888
else: # RGB
qformat = QImage.Format_RGB888
out_image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], qformat)
out_image = out_image.rgbSwapped()
gui.face_label.setAlignment(QtCore.Qt.AlignCenter)
gui.face_label.setPixmap(QPixmap.fromImage(out_image))
elif gui.start:
if not end_sig:
if not esc:
gaze_time = int(time.time()) - gaze_time
img_num = img_num + 1
gaze_info = [gui.name, img_num, sectionA, sectionB, sectionC, sectionD, sectionE, sectionF, gaze_time]
end_sig = event.tracking_end(gaze_info)
thread.cancel()
cv2.destroyAllWindows()
gui.Stack.setCurrentWidget(gui.stack3)
gui.currentStack = 3
info = db.select_user_info(gui.name)
gaze_num = 0
for gaze in info:
gaze_num = gaze_num + 1
num = 1
loop = 1000
end_sig = True
if loop == 1000:
img_path = "data/"+gui.name+"_"+str(num)+".png"
print(img_path)
graph = QPixmap(img_path)
graph = graph.scaledToWidth(800)
gui.graph_label.setPixmap(graph)
num = num + 1
if num > gaze_num:
num = 1
loop = 0
loop = loop + 1
if cv2.waitKey(1) == 27:
break
total_gaze = R_top + L_top + C_top + R_bottom + L_bottom + C_bottom
# print("Top Gaze ratio : ", round(R_top/total_gaze, 2), round(L_top/total_gaze,2), round(C_top/total_gaze,2))
# print("Bottom Gaze ratio: ", round(R_bottom/total_gaze,2), round(L_bottom/total_gaze,2), round(C_bottom/total_gaze,2))
cv2.destroyAllWindows()
|
jinho17/eye_tracking_project
|
eye_tracking/Eyetracking/Eyetracking0501.py
|
Eyetracking0501.py
|
py
| 10,223 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19707721662
|
class Solution:
def maxSubarraySumCircular(self, nums):
d=nums
d+=nums
ans=max(nums)
for i in range(len(nums)//2):
if(d[i]<=0):
continue
tmp=d[i]
if(tmp>ans):
ans=tmp
for k in range(1,len(nums)//2):
tmp+=d[i+k]
if(tmp>ans):
ans=tmp
return ans
s=Solution()
print(s.maxSubarraySumCircular([1,-2,3,-2]))
|
admaxpanda/LeetCode
|
918. Maximum Sum Circular Subarray.py
|
918. Maximum Sum Circular Subarray.py
|
py
| 476 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30395906732
|
#https://www.codingame.com/training/easy/max-area
#MAX AREA
import math
from operator import itemgetter
n=int(input())
a=[int(i) for i in input().split()]
area=0
for i in range(max(a),min(a),-1):
b=[x for x in range(n) if a[x]>=i]
#print(b)
area=max(area,(max(b)-min(b))*i)
print(area)
|
AllanccWang/CodingGame
|
classic puzzle-easy/max-area.py
|
max-area.py
|
py
| 298 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22634603016
|
#using while loop
num=int(input("Enter the Number: "))
temp=num
fact=1
while temp != 1:
fact=fact*temp
temp-=1
print(f"Factorial of a Number is {fact}")
#using Math Module
print("Factorial Using Math Module")
from math import factorial
n=int(input("Enter the Number: "))
print(f"Factorial of a Number is {factorial(n)}")
|
arironman/MSU-Python
|
ex-8/37.py
|
37.py
|
py
| 354 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18231210208
|
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy import units as u
import glob
import concurrent.futures
# Read in catalog data
catalog = pd.read_csv('observations.txt', delimiter=' ')
master_catalog = pd.read_csv('master_catalog_jan_2023.csv', delimiter=',')
print(master_catalog)
print('Starting loop...')
# Define a function to process a single field
def process_field(index):
global master_catalog, catalog, field_data, master_catalog_coords, catalog_coord
idx, d2d, _ = master_catalog_coords[index].match_to_catalog_sky(catalog_coord)
# Get the field ID and check if the corresponding file exists
field_id = catalog.loc[idx]['FieldID']
file_path = f'cleaned_half/c{field_id}p.ascd'
if not glob.glob(file_path):
print(f'Error: File not found for FieldID {field_id}')
return
# Read in file data
if field_id not in field_data:
field_data[field_id] = pd.read_csv(file_path, delimiter=' ')
file_data = field_data[field_id]
print('+=================+')
print('Processing : ', field_id, '(', index, ' / ', total, ')')
file_coords = SkyCoord(ra=file_data['RA'], dec=file_data['Dec'], unit=(u.hourangle, u.deg))
idx, _, _ = master_catalog_coords[index].match_to_catalog_sky(file_coords)
i_value = file_data.loc[idx, 'i']
g_value = file_data.loc[idx, 'g']
# Add i and g values to catalog
master_catalog.at[index, 'i'] = i_value
master_catalog.at[index, 'g'] = g_value
print('Coords Processed')
# Loop over unique FieldIDs
counter = 0
master_catalog_coords = SkyCoord(ra=master_catalog['RADEG'], dec=master_catalog['DECDEG'], unit=(u.deg, u.deg))
total = len(master_catalog_coords)
ra_deg = (catalog['RAh']) * 15 + (catalog['RAm']) * 0.25 + (catalog['RAs']) * 0.00416667
dec_deg = (catalog['DEd']) + ((catalog['DEm']) / 60) + ((catalog['DEs']) / 3600)
catalog_coord = SkyCoord(ra=ra_deg * u.deg, dec=dec_deg * u.deg)
field_data = {}
# Define the number of threads to use
num_threads = 8
# Process the fields using multiple threads
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
futures = []
for index in range(total):
future = executor.submit(process_field, index)
futures.append(future)
concurrent.futures.wait(futures)
# Write out updated catalog
print('Outputting to csv')
master_catalog.to_csv('cleaned_half/master_updated.csv', index=False)
print('Done!')
|
WilliamOrringe/Indentifying-Candidate-Star-Clusters-in-M31
|
multi.py
|
multi.py
|
py
| 2,519 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38931021069
|
from typing import Callable
from PySide2.QtWidgets import QListWidget, QAbstractItemView, QAction, QPushButton, QListWidgetItem, QGridLayout
from PySide2.QtCore import QSize, QThread, Signal, Slot, Qt
import damaker
from damaker.pipeline import *
import damaker_gui
import damaker_gui.widgets as widgets
class PipelineWidget(QListWidget, widgets.ITabWidget):
name: str = "Pipeline"
icon: str = u":/flat-icons/icons/flat-icons/timeline.svg"
def __init__(self, parent=None, operations=[]):
super().__init__(parent)
self.setSpacing(3)
self.setDragDropMode(QAbstractItemView.InternalMove)
self.setVerticalScrollMode(QListWidget.ScrollMode.ScrollPerPixel)
self.setContextMenuPolicy(Qt.ActionsContextMenu)
act = QAction("Remove", self)
act.triggered.connect(self.removeOperation)
self.addAction(act)
for op in operations:
self.addOperation(op)
self.pipelineThread = PipelineRunnerThread()
def tabEnterFocus(self):
damaker_gui.MainWindow.Instance.operationList.connectPipeline(self)
def tabExitFocus(self):
damaker_gui.MainWindow.Instance.operationList.disconnectPipeline(self)
def runPipeline(self):
self.pipelineThread.setPipeline(self)
self.pipelineThread.stopped.connect(self.stopPipeline)
self.pipelineThread.start()
# self.pipelineThread.run()
def stopPipeline(self):
self.pipelineThread.terminate()
print("(Pipeline ended 🟡)")
def addOperation(self, op: Operation):
print(f"Operation '{op.name}' added ✔")
item = QListWidgetItem(op.name)
item.op = op.copy()
self.addItem(item)
# op_widget = widgets.OperationWidget(op=op, pipeline=self, layoutType=QGridLayout, batchMode=True)
# item.setSizeHint(QSize(op_widget.width(), op_widget.height()))
def addOpfromFunc(self, func: Callable):
self.addOperation(Operation(func, [], func.__name__))
def removeOperation(self):
self.takeItem(self.currentRow())
class PipelineRunnerThread(QThread):
stopped = Signal()
def __init__(self, pipeline: PipelineWidget=None):
super(PipelineRunnerThread, self).__init__()
self.pipeline = pipeline
def setPipeline(self, pipeline: PipelineWidget):
self.pipeline = pipeline
@Slot()
def run(self):
if self.pipeline is None:
pass
self.setPriority(QThread.HighPriority)
operations: list[widgets.OperationWidget] = []
for i in range(self.pipeline.count()):
# operations.append(self.pipeline.itemWidget(self.pipeline.item(i)))
operations.append(self.pipeline.item(i).op)
print("[Starting Pipeline 🚀]")
step = 1
for op in operations:
if not op.enabled:
continue
print(f'-- [{step}] âž¡ {op.name} --')
op.run()
step += 1
print("[Pipeline finished 🟢]")
self.stopped.emit()
|
subski/DAMAKER
|
damaker_gui/widgets/PipelineWidget.py
|
PipelineWidget.py
|
py
| 3,049 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18245658241
|
import pandas as pd
import dataprofiler as dp
import numpy as np
from pymongo import MongoClient
try:
conn = MongoClient()
except:
print("Could not connct to Mongo DB")
db = conn.database
collection = db.my_gfg_collection
data = {
"calories": [420,380, 390,390, 80, 350],
"duration": [50,45,40,40,np.nan ,50]
}
target_df = pd.DataFrame(data)
profile = dp.Profiler(target_df)
report = profile.report(report_options={"output_format":"pretty"})
data_stats = report["data_stats"]
collection.insert_one(data_stats)
column_list_df = []
ext_data = []
for n in data_stats:
column_list = list(n.keys())
for col_name in column_list:
if col_name not in column_list_df:
column_list_df.append(col_name)
else:
continue
ext_data.append(list(n.values()))
data = pd.DataFrame(ext_data,columns=column_list_df)
stats_df=pd.DataFrame.from_records(data.statistics.dropna().tolist())
sum_df = pd.concat([data, stats_df], axis=1)
data_stats = report["global_stats"]
print(data_stats)
print(sum_df)
|
arbecker620/DataQuality
|
DataQuality.py
|
DataQuality.py
|
py
| 1,082 |
python
|
en
|
code
| 2 |
github-code
|
6
|
12731828005
|
#Visualizing data with Matplotlib
#Matplotlib config
#import matplotlib as mpl
#mpl.rcParams['lines.linewidth'] = 2
#mpl.rcParams['lines.color'] = 'r'
#plt.rcParams['figure.figsize'] = (8,4)
#plt.gcf().set_size_inches(8,4)
#example 1
import numpy as np
import pandas as pd
from datetime import date
import matplotlib.pyplot as plt
hoy = date.today()
x = pd.period_range(hoy,periods=200,freq='d')
x = x.to_timestamp().to_pydatetime()
y = np.random.randn(200,3).cumsum(0)
#three plots in one figure
'''
plots = plt.plot(x,y)
plt.legend(plots,('foo','bar','mongo'),loc='best',
framealpha=0.25,prop={'size':'small','family':'monospace'})
plt.gcf().set_size_inches(8,4)
plt.title('Random Trends')
plt.xlabel('Date')
plt.ylabel('Cummulative sum')
plt.grid(True)
plt.figtext(0.995,0.01,'\u00a9 Acme designs 2020',ha='right',va='bottom')
plt.tight_layout()
plt.savefig('mpl_3lines_custom.svg')
'''
# a plot insert with figure.axes
'''
fig = plt.figure(figsize=(8,4))
#Main axes
ax = fig.add_axes([0.1,0.1,0.8,0.8])
ax.set_title('Main axes with insert child axes')
ax.plot(x,y[:,0])#selects the 1st col of numpy rand y-data
ax.set_xlabel('Date')
ax.set_ylabel('Cummulative Sum')
# inserted axes
ax = fig.add_axes([0.15,0.15,0.3,0.3])
ax.plot(x,y[:,1],color='g')
ax.set_xticks([]);#removes the xticks of subplot
plt.savefig('subplots.png')
'''
# another subplot
fig, axes = plt.subplots(nrows=3,ncols=1,sharex=True,sharey=True,figsize=(8,8))
labelled_data = zip(y.transpose(),('foo','bar','mongo'),('b','g','r'))
fig.suptitle('3 random trends',fontsize=16)
for i, ld in enumerate(labelled_data):
ax = axes[i]
ax.plot(x, ld[0], label=ld[1], color=ld[2])
ax.set_ylabel('Cummulative sum')
ax.legend(loc='upper left',framealpha=0.5,prop={'size':'small'})
ax.grid(True)
axes[-1].set_xlabel('Date')
fig.text(0.995,0.01,'\u00a9 Acme designs 2020',ha='right',va='bottom')
fig.tight_layout()
print('Today is',hoy)
plt.savefig('3rand_subplots.png')
|
ndlopez/learn_python
|
source/plot_test.py
|
plot_test.py
|
py
| 1,965 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34385625845
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 10 17:20:34 2017
@author: admin
"""
from table import TABLE
from copy import deepcopy
import random, sub_structure
import numpy as np
from value_function import NODE
from vv import values
values[()]='d'
def divine(table,row):
temp=[]
new=[]
situation=deepcopy(table)
judge=situation[row]
for i in range(len(judge)):
if judge[i]==-1:
temp.append(i)
if temp==[]:
new=[judge]
else:
if temp[0]==0:
new.append(())
else:
new.append(judge[0:temp[0]])
if temp[len(temp)-1]+1==len(judge) :
new.append(())
else:
new.append(judge[(temp[len(temp)-1]+1):(len(judge))])
for i in range(0,len(temp)-2):
new.append(judge[(temp[i]+1):(temp[i+1])])
return new
def returnrowvalue(table):
dim=15
summary={}
summ=[]
tem=0
maxj=-100000
res=[]
temp=[]
for i in range(dim):
divide=divine(table,i)
for x in divide:
if len(x)>9:
for i in range(len(x)-8):
temp.append(x[i:(i+9)])
for i in range(len(x)-8):
tem=vv[values[tuple(temp[i])]]
if tem>maxj:
maxj=tem
res=temp[i]
summ.append(values[tuple(res)])
else:
summ.append(values[tuple(x)])
newsum=set(summ)
for item in newsum:
summary[item]=summ.count(item)
return summary
|
youki-cao/Gomoku-AI
|
Code_AI_Gomoku/newmethod(1).py
|
newmethod(1).py
|
py
| 1,661 |
python
|
en
|
code
| 0 |
github-code
|
6
|
45526650896
|
btc_positions = [{'symbol: BTC', 'margin_balance: 0.15', 'margin_position: 0.05', 'margin_frozen: 0.01'},
{'symbol: BTC2', 'margin_balance: 0.15', 'margin_position: 0.05', 'margin_frozen: 0.01'}]
def test(value):
"""
value: sets of strings
"""
d = dict(s.split(': ') for s in p) # turn set of strings to dict
d = sorted(d.items(), key=lambda x: x[1], reverse=True) # reverse sort
l = '-' * 15 # dashed line
fin = f'{l}\n'
for v in d:
fin += '='.join(v)
fin += '\n'
fin += l
return fin
# iterate through btc_positions
for v in btc_positions:
text = test(v)
|
DeulinYakov/TelegramBotCollege
|
tests.py
|
tests.py
|
py
| 640 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6858043580
|
### Introduction to Pytorch Workflow timestamp to where it starts (link: https://www.youtube.com/watch?v=V_xro1bcAuA): 4:22:01
import torch
from torch import nn #nn contains all of pytorch's building blocks for neuro networks, pytorch documentation has a lot of building blocks for all sorts of layers
#you can combine layers in all the ways imaginable to make a beuro network model to do what you want it to do
import matplotlib.pyplot as plt
from pathlib import Path
"""
preparing and loading data (data can be almost anything in machine learning, like images, csv, videos, audio, text, or even dna)
machine learning is a game of 2 major parts: (that can be further subdivided into many other parts)
1. get data into a numerical representation (tensors)
2. build a model to learn patterns in that numerical representation
"""
# making data using a linear regression formula:
#creating known parameters: (in an actual dataset scraped from the internet, these won't be given, these are what the model is going to figure out)
weight = 0.7
bias = 0.3
#create:
start = 0
end = 1
step = 0.02
X = torch.arange(start, end, step).unsqueeze(dim=1) #x is usually used as a tensor, and we need the extra dimension for something later
y = weight * X + bias #the machine won't know this and will have to figure this out for itself, the y variable is the target
print(X[:10], y[:10], len(X), len(y))
## spliting data into training and test sets (one of the most important concepts in machine learning in general)
"""
visualizing the three datasets by comparing it to a school course:
training set: you can compare this to the course materials at a university that you would learn throughout the year, the model too would learn patterns from here
validation set: you can compare this to a practice exam, which would tune the model patterns/adjust the model's patterns (not always needed)
Test set: you can compare this to a final exam: which would see if the model is ready to be implimented/tests the model's performance on data it hasn't seen before
Goal: generalization (the ability for a machine learning model to perform well on data it hasn't seen before)
amount of data used for training set: ~60-80% (always needed)
amount of data used for validation set: 10-20% (not always needed)
amount of data used for test set: 10-20% (always needed)
"""
#create a train/test split/set (set and split mean the same thing in this case)
train_split = int(0.8 * len(X))
X_train, y_train = X[:train_split], y[:train_split] #gets all the data that's previous to that index
X_test, y_test = X[train_split:], y[train_split:] #gets all the data that is past that index
print(len(X_train), len(y_train), len(X_test), len(y_test)) #prints the amount of training features, training lables, testing features, testing lables
#NOTE: you can also use the sklearn/scikit module to split the training data in a more random way
## building a function to visualize the data
def plot_prediction(train_data = X_train,
train_lables = y_train,
test_data = X_test,
test_lables = y_test,
predictions = None):
"""
Plots training data, test data, and compares predictions
"""
plt.figure(figsize=(10, 7))
#plot training data in blue
plt.scatter(train_data, train_lables, c="blue", s=4, label="Training Data")
#plot testing data in green
plt.scatter(test_data, test_lables, c="green", s=4, label="Testing Data")
if predictions != None:
#plot the predictions if they exist
plt.scatter(test_data, predictions, c="red", s=4, label="Predictions")
plt.legend(prop={"size": 14})
plt.show()
## building a model:
class LinearRegressionModel(nn.Module): # <- almost everything in pytorch inherits from nn, for more info: https://pytorch.org/docs/stable/generated/torch.nn.Module.html
def __init__(self):
super().__init__() #start with random parameters, then update them to fit the training data, by running it through the formula it'll adjust the data to fit the linear regression formula
self.weight = nn.Parameter(torch.randn(1,
requires_grad=True, #gradient descent = true
dtype=torch.float))
self.bias = nn.Parameter(torch.randn(1,
requires_grad=True,
dtype=torch.float)) #we might also initialize a layer or a list of layers for our model to use
# Forward method to define the computation in a model:x is a parameter/input value, as you can see
def forward(self, x: torch.Tensor) -> torch.Tensor: #x is the input data (of torch.Tensor datatype), and this function is going to return a tensor datatype
return self.weight * x + self.bias #this is the linear regression formula, forward is what defines the opperation that a module does
### any subclass of nn.module needs to override the forward() method from model since it defines the computation of the model
"""
what the model does:
- Starts with random values (weights and biases)
- looks at training data and adjusts the random values to better represent/get closer to the ideal values (weight and bias values of our original formula)
How does it do it:
1. Gradient Descent
2. Back Propagation
also check out the pytorch cheatsheet by googling pytorch cheatsheet
Model building essentials:
- torch.nn: contains all of the building materials for computational graphs (a neuro networks can be considered a computational graph)
- torch.nn.Parameter(): what parameters our model should try and learn, often a pytorch layer from pytorch.nn will set these for us
- torch.nn.Module: the base class for all neuro network modules, if you subclass it, you should override forward()
- torch.optim: this references the optimizer in pytorch, they will help with gradient descent and contains various optimization algorithms
- torch.data.Dataset: represents a map between the key (label) and sample (feature) pairs of your data, such as images and their associated labels
- torch.data.DataLoader: creates a python iterable over a torch Dataset, allowing you to iterate over your data
- torchvision.transforms: for pictures and vision into data into models
- torchmetrics:
- def forward(): all nn.Module subclasses require you to override this, as previously stated, this method defines what happens in the forward computation
"""
## Checking the contents of our model:
#to check the parameters of our model, we can use .parameters():
#sets tha seed so the values won't vary and results will stay consistant, without this, the tensor values in the LinearRegressionModel would be random every time (which is what we want, but for educational purposes that's not needed here)
torch.manual_seed(42)
#initialize model
model = LinearRegressionModel()
print(list(model.parameters()))
#list named parameters: (a parameter is something that the model sets itself/is present in the "()" incase i'm dum and forgot)
print(model.state_dict()) #the name comes from the self.weight and self.bias i think
## making predictions using torch.inference_mode()
#context manager, its good to make this a habit since it turns off gradient tracking since when we're doing predictions, which makes it a lot faster in larger data sets
#there's also torch.no_grad() but inference_mode is the prefered
with torch.inference_mode():
y_preds = model(X_test)
print(f"Predictions: {y_preds}\nTest Data: {y_test}")
plot_prediction(predictions=y_preds)
"""## Training the model (moving from unknown/random parameters closer to the actual accurate parameters, aka moving from a poor representation of the data to a better one)
The loss function tells us how wrong our model's predictions are
- note that a loss function can also be refered to as a cost function or a criterion in different areas
Things we need to train:
- Loss function - a function that measures how wrong our model's predictions are compared to the idea outputs, the lower the better
- Optimizer - takes into account the loss of a model and adjusts the model's parameters (e.g. weight and bias) to improve the loss function
For pytorch specifically
, we need:
- a training loop
- a testing look
you can check out all the loss functions in the pytorch documentation: https://pytorch.org/docs/stable/nn.html#loss-functions
"""
#choosing and implimenting a loss function and a optimizer:
#using L1Loss/Mean Absolute Error (taking the absolute difference between all the expected value/ideal value and the actual value and returns its average)
#measures how wrong our data is
loss_fn = nn.L1Loss()
#setup an optimizer (using a Stoch(SGD) algorithm)
#an optimizer adjusts the parameters according to the loss function to reduce the loss
optimizer = torch.optim.SGD(model.parameters(), #the parameters that its going to take a look at/optimize
lr= 0.01) #learning rate: one of the most important hyperparameter (we set) you can set (regular parameters are set by the code)
#general idea of how optimizers work: it first increases the value in one direction, if the loss increases, then it increases in the other direction until the best value is achieved
"""
The learning rate (lr) is how mcuh it adjusts the parameters given to reduce the loss function/optimize the values, so the smaller the lr, the smaller the change in the parameter
the larget the learning rate, the larger the change int he parameter, if the lr is too bigthen it might skip over the optimal value, but if its too smal, then it'll take too
long to optimize
Q&A:
which loss function and optimizer should I use?
this depends on the context, with experience you'll get an idea of what works and what doesn't with your particular data set
ex. a regression problem would require something like a loss function of nn.L1Loss() and an optimizer like torch.optim.SGD()
but for classification problems like classifying whether or not a photo is of a dog or a cat, you'll likely want to use a loss function of nn.BCELoss() (binary cross entropy loss)
"""
## Building a training Loop (and a testing loop):
"""
steps:
0. looping through the data
1. forward pass (moving our data through the forward() method), also called forward propagation, moving in the opposite direction of a back propagation
2. calculate the loss: compare the forward pass predictions to the ground truth labels
3. optimizer zero grad
4. Back propagation (loss backwards?): data moves backwards through the network to calculate the gradients of each of the parameters of the model with respect to loss
5. optimizer step: use the optimizer to adjust the model's parameters to try to improve the loss
"""
#an epoch is one loop through the data, a hyper parameter because we set it ourselves
epochs = 200
#track different values and tracks model progress, used to plot model progress later on, useful for comparing with future experiments
epoch_count = []
loss_values = []
test_loss_values = []
print(model.state_dict())
for epoch in range(epochs):
#set the model to training mode, training mode sets all paramaters that requires gradients to require gradients, requires_grad=True
model.train()
#forward pass:
y_pred = model(X_train)
#loss function:
loss = loss_fn(y_pred, y_train) #predictions first then target
print(f"Loss: {loss}")
#optimizer zero_grad()
optimizer.zero_grad()
#4. back propagation on loss with respect to the parameters of the model
loss.backward()
#Optimizer, we want to step towards a gradient with a slope of 0 (slope of the loss function) or as low as possible, this is gradient descent and pytorch is doing this for you
#in torch autograd
optimizer.step() #by default how the optimizer changes will accumulate through the loop, so we have to zero them above (shown in step 3) for the next iteration of the loop
### testing
model.eval() #evaluation mode, turns off training, starts testing
#this turns off different stuff in the model that's not used for testing (essentially its like dropout/batch norm layers, read docs for more info)
with torch.inference_mode(): #turns off gradient tracking for inference and a couple of other stuff to make testing faster. torch.no_grad() does the same but slower
#1. foward pass:
test_pred = model(X_test)
#2. loss calculation:
test_loss = loss_fn(test_pred, y_test) #y_test is the test labels, calculates the testing loss value
if epoch % 10 == 0:
epoch_count.append(epoch)
loss_values.append(loss)
test_loss_values.append(test_loss)
print(f"Epoch: {epoch} | Loss: {loss} | Test loss: {test_loss}")
print(model.state_dict())
#matplotlib works with numpy, not working with the gpu because i don't have one so i can skip the "".cpu()"".numpy() part and just go right to .numpy
plt.plot(torch.tensor(epoch_count).numpy(), torch.tensor(loss_values).numpy(), label="Train loss")
plt.plot(torch.tensor(epoch_count).numpy(), torch.tensor(test_loss_values).numpy(), label="Test loss")
plt.title("Training and test loss curves")
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.show()
#there is also learning rate scheduling, which is basically starting with big steps in the learning rate, then slowly lowering it,like reacing for the coin at the backofthe couch
#the lowest point is the convergence, its the point where the loss function is at its minimum
#the steps in the loop can be turned into a function, do later, first build intuition for it
with torch.inference_mode():
y_preds_new = model(X_test)
plot_prediction(predictions=y_preds_new)
## Saving models:
"""
there are 3 main methods you should know about when it comes to saving and loading: (https://pytorch.org/tutorials/beginner/saving_loading_models.html)
1. torch.save(): saves a serialized object to disk, uses the python pickle library's utility for serialization. Models, tensors, and dictionaries are all kinds of objects that
can be saved using this function, its recommended to save the state_dict, but you can also save the entire model
2. torch.load(): uses the pickle module to unpickle facilities to deserialize object files to memory, in the process also facilitates the device that the data is being loaded
into
3. torch.nn.Module.load_state_dict(): Loads a model's parameter dictionary using a deserialized state_dict, for more info, check out the website linked above
"""
#create model directory:
MODEL_PATH = Path("models")
MODEL_PATH.mkdir(parents=True, exist_ok=True)
#create a model save path
MODEL_NAME = "01_pytorch_workflow_tutorial.pth" #the .pth is for saving a pytorch model
MODEL_SAVE_PATH = MODEL_PATH / MODEL_NAME
#saving only the model's state_dict(): (the model's weights and biases and etc)
print(f"saving model to: {MODEL_SAVE_PATH}")
torch.save(obj=model.state_dict(),
f=MODEL_SAVE_PATH)
## Loading a model into a new instance of the model:
new_model = LinearRegressionModel()
#loading the state dict/loading the pre-trained values to replace the random values
new_model.load_state_dict(torch.load(f=MODEL_SAVE_PATH)) #loads all the state dictionaries like the weights and biases and etc
#making predictions using the loaded model:
new_model.eval()
with torch.inference_mode():
new_model_pred = new_model(X_test)
y_preds = model(X_test) #incase the y_preds value was changed
##compare the predictions/forward() calculations of both models, they should be the same since the values would be the same
print(new_model_pred == y_preds)
##continued in Workflow_ractice.py
##more info on loading and saving models on the pytorch docs: https://pytorch.org/tutorials/beginner/saving_loading_models.html
|
attackGoose/AI-Notebook-and-projects
|
pytorch/Learning stuff/2_Learning_pytorch_workflow.py
|
2_Learning_pytorch_workflow.py
|
py
| 15,967 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34357687621
|
import argparse, subprocess, socket, json, io, os, notify2
verbose = False
def get_metadata_id_from(meta): # meta is a user/application definable metapackage
data = call_application(meta) # application meta will create a timestamp entry of the relavent metadata, and then pass back the id number to log in the database
id = data.decode('utf-8')
return id
def user_notify(message): # message string, not message like the protocol
notify2.init('Assistant')
n = notify2.Notification("Notification",
message,
"notification-message-im" # Icon name
)
n.show()
def call_database(flags): # is this too specific to sql? it could be an important distinction for de/serial purpose
# the naming scheme needs to be formalized
COMMAND = os.getenv("ASST_COMMAND")
database = os.getenv("ASST_DATABASE") # the location of the database module
# this is just a stop gap for now. Everything needs to be cleaned up
if COMMAND == None:
COMMAND = "python3"
if database == None:
database = "sqldatabase.py"
command = [COMMAND,database]
for item in flags:
command.append(item)
notify(command)
process = subprocess.run(command,stdout=subprocess.PIPE,stderr=subprocess.PIPE) # the returned value
if process.returncode is not 0: # this should return the serialized data
notify("The following command returned a non-zero code: %s"%(command))
notify("RETURN CODE: %d"%(process.returncode))
notify("STDERR: %s"%(process.stderr))
data = process.stdout
return data # it is returned, since selflib is a part of the calling program, not called over command line.
def call_application(flags): # this can be put into call_database, but I don't need it to call env variables again. is this bad?
COMMAND = os.getenv("ASST_COMMAND")
database = os.getenv("ASST_DATABASE")
notify("COMMAND: %s"%(COMMAND))
notify("DATABASE: %s"%(database))
notify("call_application called. calling %s"%(flags))
command = [COMMAND]
for item in flags:
command.append(item)
process = subprocess.run(command,stdout=subprocess.PIPE)
data = process.stdout # Don't decode. This could allow for binary responses with graphics, etc
return data # this data is returned, since this is a library called within an application. should have a unxi sample program built in
def message_server(data): # isn't this exactly what I am looking to do w/ call_application?
# Send the information to the server
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(data + "\n", "utf-8"), ("localhost", 9999))
def notify(string):
if verbose == True:
print(string)
def loud_notify(tag, string):
if verbose == True:
print("---START "+tag+"---")
print(string)
print("---END "+tag+"---")
def serialize(message):
data = json.dumps(message)
return data
def deserialize(data):
message = json.loads(data)
return message
def read_config_file():
print("This feature is not yet setup")
# run a core associated component. deserialize so it can be networked.
def run_with_return(command): # commands structure is a list. ["python3","program.py","-s","someinput"] broken up into atoms, rather than a long string. It could just be "python3 program.py -s someinput"] though...
loud_notify("Running Component", command)
pipe = pipes.Template()
f = pipe.open('pipefile','w')
subprocess.run(command, stdout=f)
f.close()
f = pipe.open('pipefile','r')
string = f.read()
return(deserialize(string))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="main udp server and router")
parser.add_argument('-v', dest='verbose', help="print verbose output", action='store_true')
parser.add_argument('-d', dest='deserialize', help="deserialize string input")
parser.add_argument('-s', dest='serialize', help="deserialize string input")
args = parser.parse_args()
if args.verbose is True:
verbose = True
if args.deserialize is not None:
deserialize(args.deserialize)
if args.serialize is not None:
serialize(args.serialize)
|
Tadashi-Hikari/Sapphire-Assistant-Framework-Python
|
assistant/selflib.py
|
selflib.py
|
py
| 4,305 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74923077307
|
import numpy as np
import pyautogui
import time
import imutils
import cv2
import mediapipe as mp
from pynput.keyboard import Key, Controller
keyboard = Controller()
mp_hands = mp.solutions.hands
hands = mp_hands.Hands()
mp_draw = mp.solutions.drawing_utils
cap = cv2.VideoCapture(0)
finger_tips = [8, 12, 16, 20]
thumb_tip = 4
thumb_status = False
def draw_circle(tip, lm_list, state=False):
x, y = int(lm_list[tip].x * w), int(lm_list[tip].y * h)
if state == True:
cv2.circle(img, (x, y), 15, (255, 0, 0), cv2.FILLED)
elif state == False:
cv2.circle(img, (x, y), 15, (0, 255, 0), cv2.FILLED)
def finger_count(lm_list):
global finger_tips
finger_fold_status = []
for tip in finger_tips:
# getting the landmark tip position and drawing blue circle
# writing condition to check if finger is folded i.e checking if finger tip starting value is smaller than finger starting position which is inner landmark. for index finger
# if finger folded changing color to green
state = lm_list[tip - 3].y < lm_list[tip].y
draw_circle(tip, lm_list, state=state)
if state:
finger_fold_status.append(True)
else:
finger_fold_status.append(False)
return finger_fold_status
def screenshot():
image = pyautogui.screenshot("screenshot.png")
time.sleep(0.5)
def readimg(name):
img = cv2.imread(name)
cv2.imshow("screenshot", imutils.resize(img, width=600))
def trigger_sign(finger_status):
# check if the correct sign is done
# put ring finger down to take screenshot
if finger_status == [False, False, True, False]:
print("ss ") # take screenshot
screenshot()
readimg("screenshot.png")
while True:
ret, img = cap.read()
img = cv2.flip(img, 1)
h, w, c = img.shape
results = hands.process(img)
if results.multi_hand_landmarks:
for hand_landmark in results.multi_hand_landmarks:
# accessing the landmarks by their position
lm_list = []
for id, lm in enumerate(hand_landmark.landmark):
lm_list.append(lm)
# array to hold true or false if finger is folded
# checking if all fingers are folded
finger_fold_status = finger_count(lm_list)
trigger_sign(finger_fold_status)
print(finger_fold_status)
mp_draw.draw_landmarks(
img,
hand_landmark,
mp_hands.HAND_CONNECTIONS,
mp_draw.DrawingSpec((0, 0, 255), 2, 2),
mp_draw.DrawingSpec((0, 255, 0), 4, 2),
)
cv2.imshow("hand tracking", img)
key = cv2.waitKey(1)
if key == 32:
break
cv2.destroyAllWindows()
|
diganta121/PRO-C109
|
take_screenshot.py
|
take_screenshot.py
|
py
| 2,789 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36895861215
|
from __future__ import print_function
from builtins import chr
from builtins import zip
from builtins import map
from builtins import str
from builtins import filter
from builtins import range
from builtins import object
import getopt
import gzip
import locale
import operator
import os
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import traceback
import zlib
from functools import reduce
from glob import glob
import cfvtest
if hasattr(locale, 'getpreferredencoding'):
preferredencoding = locale.getpreferredencoding() or 'ascii'
else:
preferredencoding = 'ascii'
def is_encodable(s, enc=preferredencoding):
try:
s.encode(enc)
return True
except UnicodeError:
return False
fmt_info = {
# name:
# (hascrc, hassize, cancreate, available, istext, preferredencoding, iscoreutils)
'sha512':
(1, 0, 1, 1, 1, preferredencoding, 1),
'sha384':
(1, 0, 1, 1, 1, preferredencoding, 1),
'sha256':
(1, 0, 1, 1, 1, preferredencoding, 1),
'sha224':
(1, 0, 1, 1, 1, preferredencoding, 1),
'sha1':
(1, 0, 1, 1, 1, preferredencoding, 1),
'md5':
(1, 0, 1, 1, 1, preferredencoding, 1),
'bsdmd5':
(1, 0, 1, 1, 1, preferredencoding, 0),
'sfv':
(1, 0, 1, 1, 1, preferredencoding, 0),
'sfvmd5':
(1, 0, 1, 1, 1, preferredencoding, 0),
'csv':
(1, 1, 1, 1, 1, preferredencoding, 0),
'csv2':
(0, 1, 1, 1, 1, preferredencoding, 0),
'csv4':
(1, 1, 1, 1, 1, preferredencoding, 0),
'crc':
(1, 1, 1, 1, 1, preferredencoding, 0),
'par':
(1, 1, 0, 1, 0, 'utf-16-le', 0),
'par2':
(1, 1, 0, 1, 0, preferredencoding, 0),
'torrent':
(1, 1, 1, 1, 0, 'utf-8', 0),
}
def fmt_hascrc(f):
return fmt_info[f][0]
def fmt_hassize(f):
return fmt_info[f][1]
def fmt_cancreate(f):
return fmt_info[f][2]
def fmt_available(f):
return fmt_info[f][3]
def fmt_istext(f):
return fmt_info[f][4]
def fmt_preferredencoding(f):
return fmt_info[f][5]
def fmt_iscoreutils(f):
return fmt_info[f][6]
def allfmts():
return list(fmt_info.keys())
def allavailablefmts():
return list(filter(fmt_available, allfmts()))
def allcreatablefmts():
return list(filter(fmt_cancreate, allavailablefmts()))
def coreutilsfmts():
return list(filter(fmt_iscoreutils, allfmts()))
class rcurry(object):
def __init__(self, func, *args, **kw):
self.curry_func = func
self.curry_args = args
self.curry_kw = kw
def __call__(self, *_args, **_kwargs):
kw = self.curry_kw.copy()
kw.update(_kwargs)
return self.curry_func(*(_args + self.curry_args), **kw)
def pathfind(p, path=os.environ.get('PATH', os.defpath).split(os.pathsep)):
for d in path:
if os.path.exists(os.path.join(d, p)):
return 1
def pathjoin_and_mkdir(*components):
"""Join components of a filename together and create directory to contain the file, if needed.
"""
result = os.path.join(*components)
path = os.path.split(result)[0]
if not os.path.exists(path):
os.makedirs(path)
return result
def readfile(fn, textmode=False):
if textmode:
mode = 't'
else:
mode = 'b'
with open(fn, 'r' + mode) as f:
d = f.read()
return d
def writefile(fn, data):
with open(fn, 'wb') as f:
if data:
f.write(data)
def writefile_and_reopen(fn, data):
"""Write data to file, close, and then reopen readonly, and return the fd.
This is for the benefit of windows, where you need to close and reopen the
file as readonly in order for it to be openable simultaneously.
"""
writefile(fn, data)
f = open(fn, 'rb')
return f
class stats(object):
ok = 0
failed = 0
def logr(text):
logfile.write(text)
def log(text):
logr(text + '\n')
def test_log_start(cmd, kw):
log('*** testing ' + cmd + (kw and ' ' + str(kw) or ''))
def test_log_finish(cmd, s, r, output, kw):
if r:
stats.failed += 1
print('\n>>> failed test:', cmd, (kw and ' ' + str(kw) or ''))
if output is not None:
print(output)
result = 'FAILED'
if not isinstance(r, int) or r != 1:
result += ' (%s)' % r
else:
stats.ok += 1
sys.stdout.write('.')
sys.stdout.flush()
result = 'OK'
result_str = '%s (%s)' % (result, s)
log(result_str)
if r:
print(result_str)
traceback_str = '\n'.join(traceback.format_stack())
log(traceback_str)
print(traceback_str)
if run_exit_early:
sys.exit(1)
log('')
def test_log_results(cmd, s, o, r, kw):
"""
cmd=command being tested (info only)
s=return status
o=output
r=result (false=ok, anything else=fail (anything other than 1 will be printed))
"""
test_log_start(cmd, kw)
log(o)
test_log_finish(cmd, s, r, o, kw)
def test_external(cmd, test):
# TODO: replace this with subprocess
from subprocess import getstatusoutput
s, o = getstatusoutput(cmd)
r = test(s, o)
test_log_results(cmd, s, o, r, None)
def test_generic(cmd, test, **kw):
# s, o = cfvtest.runcfv(cmd)
s, o = cfvtest.runcfv(*(cmd,), **kw)
r = test(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd, s, o, r, kw)
class cst_err(Exception):
pass
def cfv_stdin_test(cmd, file):
s1 = s2 = None
o1 = o2 = ''
r = 0
try:
s1, o1 = cfvtest.runcfv(cmd + ' ' + file)
if s1:
raise cst_err(2)
s2, o2 = cfvtest.runcfv(cmd + ' -', stdin=file)
if s2:
raise cst_err(3)
x = re.search(r'^([^\r\n]*)' + re.escape(file) + r'(.*)$[\r\n]{0,2}^-: (\d+) files, (\d+) OK. [\d.]+ seconds, [\d.]+K(/s)?$', o1, re.M | re.DOTALL)
if not x:
raise cst_err(4)
x2 = re.search(r'^' + re.escape(x.group(1)) + r'[\t ]*' + re.escape(x.group(2)) + r'$[\r\n]{0,2}^-: (\d+) files, (\d+) OK. [\d.]+ seconds, [\d.]+K(/s)?$', o2, re.M)
if not x2:
raise cst_err(5)
except cst_err as er:
r = er
test_log_results('stdin/out of ' + cmd + ' with file ' + file, (s1, s2), o1 + '\n' + o2, r, None)
def cfv_stdin_progress_test(t, file):
s1 = s2 = None
o1 = o2 = c1 = c2 = ''
r = 0
dir = tempfile.mkdtemp()
try:
try:
cf1 = os.path.join(dir, 'cf1.' + t)
cf2 = os.path.join(dir, 'cf2.' + t)
s1, o1 = cfvtest.runcfv('%s --progress=yes -C -t %s -f %s %s' % (cfvcmd, t, cf1, file))
if s1:
raise cst_err(2)
s2, o2 = cfvtest.runcfv('%s --progress=yes -C -t %s -f %s -' % (cfvcmd, t, cf2), stdin=file)
if s2:
raise cst_err(3)
if t != 'csv2': # csv2 has only filesize, hence checksum never happens, so no progress
x = re.match(re.escape(file) + r' : (\.{20}[-\b.#\\|/]*)[ \r\n]+' + '\x1b\\[K' + re.escape(cf1) + r': (\d+) files, (\d+) OK. [\d.]+ seconds, [\d.]+K(/s)?$', o1, re.M | re.DOTALL)
if not x:
raise cst_err(4)
x2 = re.match(r' : (\.[-\b.#/|\\]*)[\t ]*[ \r\n]+' + '\x1b\\[K' + re.escape(cf2) + r': (\d+) files, (\d+) OK. [\d.]+ seconds, [\d.]+K(/s)?$', o2, re.M)
if not x2:
raise cst_err(5)
if t == 'crc':
c1 = readfile(cf1, textmode=True).replace(file, ' ' * len(file))
else:
c1 = readfile(cf1, textmode=True).replace(file, '')
c2 = readfile(cf2, textmode=True)
c1 = remove_varying_comments(t, c1)
c2 = remove_varying_comments(t, c2)
if c1 != c2:
raise cst_err(6)
except cst_err as er:
r = er
test_log_results('progress=yes stdin/out of ' + t + ' with file ' + file, (s1, s2), o1 + '\n' + o2 + '\n--\n' + c1 + '\n' + c2, r, None)
finally:
shutil.rmtree(dir)
def rx_test(pat, str):
if re.search(pat, str):
return 0
return 1
def status_test(s, o, expected=0):
if s == expected:
return 0
return 1
rx_Begin = r'^(?:.* )?(\d+) files, (\d+) OK'
rx_unv = r', (\d+) unverified'
rx_notfound = r', (\d+) not found'
rx_ferror = r', (\d+) file errors'
rx_bad = r', (\d+) bad(crc|size)'
rx_badcrc = r', (\d+) badcrc'
rx_badsize = r', (\d+) badsize'
rx_cferror = r', (\d+) chksum file errors'
rx_misnamed = r', (\d+) misnamed'
rx_End = r'(, \d+ differing cases)?(, \d+ quoted filenames)?. [\d.]+ seconds, [\d.]+K(/s)?$'
rxo_TestingFrom = re.compile(r'^testing from .* \((.+?)\b.*\)[\n\r]*$', re.M)
def optionalize(s):
return '(?:%s)?' % s
rx_StatusLine = rx_Begin + ''.join(map(optionalize, [rx_badcrc, rx_badsize, rx_notfound, rx_ferror, rx_unv, rx_cferror, rx_misnamed])) + rx_End
class OneOf(object):
def __init__(self, *possibilities):
self.possible = possibilities
def __eq__(self, a):
return a in self.possible
def __repr__(self):
return 'OneOf' + repr(self.possible)
def intize(s):
return s and int(s) or 0
def icomp(foo):
exp, act = foo
if exp == -1:
return False
return exp != act
def tail(s):
# the last line might not be what we want, since stdout and stderr can get mixed up in some cases.
# return string.split(s,'\n')[-1]
lines = s.splitlines()
lines.reverse()
for line in lines:
if re.search(rx_StatusLine, line):
return line
return ''
re_sfv_comment = re.compile('^; Generated by .* on .*$', re.M | re.I)
re_crc_comment = re.compile('^Generated at: .*$', re.M | re.I)
def remove_varying_comments(t, text):
if t in ('sfv', 'sfvmd5'):
text = re_sfv_comment.sub('', text, 1)
elif t == 'crc':
text = re_crc_comment.sub('', text, 1)
return text
def cfv_test(s, o, op=operator.gt, opval=0):
x = re.search(rx_Begin + rx_End, tail(o))
if s == 0 and x and x.group(1) == x.group(2) and op(int(x.group(1)), opval):
return 0
return 1
def cfv_substatus_test(s, o, unv=0, notfound=0, badcrc=0, badsize=0, cferror=0, ferror=0):
expected_status = (badcrc and 2) | (badsize and 4) | (notfound and 8) | (ferror and 16) | (unv and 32) | (cferror and 64)
if s & expected_status == expected_status and not s & 1:
return 0
return 'bad status expected %s got %s' % (expected_status, s)
def cfv_status_test(s, o, unv=0, notfound=0, badcrc=0, badsize=0, cferror=0, ferror=0):
expected_status = (badcrc and 2) | (badsize and 4) | (notfound and 8) | (ferror and 16) | (unv and 32) | (cferror and 64)
if s == expected_status:
return 0
return 'bad status expected %s got %s' % (expected_status, s)
def cfv_all_test(s, o, files=-2, ok=0, unv=0, notfound=0, badcrc=0, badsize=0, cferror=0, ferror=0, misnamed=0):
x = re.search(rx_StatusLine, tail(o))
if x:
if files == -2:
files = reduce(operator.add, [ok, badcrc, badsize, notfound, ferror])
expected = [files, ok, badcrc, badsize, notfound, ferror, unv, cferror, misnamed]
actual = list(map(intize, x.groups()[:9]))
if not list(filter(icomp, zip(expected, actual))):
sresult = cfv_status_test(s, o, unv=unv, notfound=notfound, badcrc=badcrc, badsize=badsize, cferror=cferror, ferror=ferror)
if sresult:
return sresult
return 0
return 'expected %s got %s' % (expected, actual)
return 'status line not found in output'
def cfv_unv_test(s, o, unv=1):
x = re.search(rx_Begin + rx_unv + rx_End, tail(o))
if s != 0 and x and x.group(1) == x.group(2) and int(x.group(1)) > 0:
if unv and int(x.group(3)) != unv:
return 1
return 0
return 1
def cfv_unvonly_test(s, o, unv=1):
x = re.search(rx_Begin + rx_unv + rx_End, tail(o))
if s != 0 and x and int(x.group(3)) == unv:
return 0
return 1
def cfv_notfound_test(s, o, unv=1):
x = re.search(rx_Begin + rx_notfound + rx_End, tail(o))
if s != 0 and x and int(x.group(2)) == 0 and int(x.group(1)) > 0:
if int(x.group(3)) != unv:
return 1
return 0
return 1
def cfv_cferror_test(s, o, bad=1):
x = re.search(rx_Begin + rx_cferror + rx_End, tail(o))
if s != 0 and x and int(x.group(3)) > 0:
if bad > 0 and int(x.group(3)) != bad:
return 1
return 0
return 1
def cfv_bad_test(s, o, bad=-1):
x = re.search(rx_Begin + rx_bad + rx_End, tail(o))
if s != 0 and x and int(x.group(1)) > 0 and int(x.group(3)) > 0:
if bad > 0 and int(x.group(3)) != bad:
return 1
return 0
return 1
def cfv_typerestrict_test(s, o, t):
matches = rxo_TestingFrom.findall(o)
if not matches:
return 1
for match in matches:
if match != t:
return 1
return 0
def cfv_listdata_test(s, o):
if s == 0 and re.search('^data1\0data2\0data3\0data4\0$', o, re.I):
return 0
return 1
def joincurpath(f):
return os.path.join(os.getcwd(), f)
def cfv_listdata_abs_test(s, o):
if s == 0 and re.search('^' + re.escape('\0'.join(map(joincurpath, ['data1', 'data2', 'data3', 'data4']))) + '\0$', o, re.I):
return 0
return 1
def cfv_listdata_unv_test(s, o):
if s == 32 and re.search('^testfix.csv\0unchecked.dat\0$', o, re.I):
return 0
return 1
def cfv_listdata_bad_test(s, o):
if s & 6 and not s & ~6 and re.search('^(d2.)?test4.foo\0test.ext.end\0test2.foo\0test3\0$', o, re.I):
return 0
return 1
def cfv_version_test(s, o):
x = re.search(r'cfv v([\d.]+(?:\.dev\d+)?) -', o)
with open(os.path.join(cfvtest.testpath, os.pardir, 'Changelog'), 'rt') as f:
x3 = re.search(r' v([\d.]+(?:\.dev\d+)?):', f.readline())
if x:
log('cfv: ' + x.group(1))
if x3:
log('Changelog: ' + x3.group(1))
# if os.path.isdir(os.path.join(os.pardir, 'debian')):
# with open(os.path.join(os.pardir, 'debian', 'changelog'), 'rt') as f:
# x4 = re.search(r'cfv \(([\d.]+)-\d+\) ', f.readline())
# if x4:
# log('deb changelog: ' + x4.group(1))
# if not x or not x4 or x4.group(1) != x.group(1):
# return 1
if x and x3 and x.group(1) == x3.group(1):
return 0
return 1
def cfv_cftypehelp_test(s, o, expected):
if s != expected:
return 1
for tname in allfmts() + ['auto']:
if o.count(tname) < 1:
return 'type %s not found in output' % tname
return 0
def cfv_nooutput_test(s, o, expected=0):
if s != expected:
return 1
if o:
return 'output: %s' % (repr(o),)
return 0
def T_test(f, extra=None):
cmd = cfvcmd
if extra:
cmd += ' ' + extra
test_generic(cmd + ' -T -f test' + f, cfv_test)
test_generic(cmd + ' -i -T -f test' + f, cfv_test) # all tests should work with -i
test_generic(cmd + ' -m -T -f test' + f, cfv_test) # all tests should work with -m
test_generic(cmd + ' -T --list0=ok -f test' + f, cfv_listdata_test, stderr='/dev/null')
test_generic(cmd + ' -T --showpaths=n-r --list0=ok -f test' + f, cfv_listdata_test, stderr='/dev/null')
test_generic(cmd + ' -T --showpaths=n-a --list0=ok -f test' + f, cfv_listdata_test, stderr='/dev/null')
test_generic(cmd + ' -T --showpaths=a-a --list0=ok -f test' + f, cfv_listdata_test, stderr='/dev/null')
test_generic(cmd + ' -T --showpaths=2-a --list0=ok -f test' + f, cfv_listdata_test, stderr='/dev/null')
test_generic(cmd + ' -T --showpaths=y-r --list0=ok -f test' + f, cfv_listdata_test, stderr='/dev/null')
test_generic(cmd + ' -T --showpaths=y-a --list0=ok -f test' + f, cfv_listdata_abs_test, stderr='/dev/null')
test_generic(cmd + ' -T --showpaths=1-a --list0=ok -f test' + f, cfv_listdata_abs_test, stderr='/dev/null')
# ensure all verbose stuff goes to stderr:
test_generic(cmd + ' -v -T --list0=ok -f test' + f, cfv_listdata_test, stderr='/dev/null')
test_generic(cmd + ' -v -T --list0=unverified -f test' + f + ' unchecked.dat testfix.csv data1', cfv_listdata_unv_test, stderr='/dev/null')
# test progress stuff.
def progress_test(s, o):
if cfv_test(s, o):
return 1
if o.find('.' * 10) < 0:
return 2
return 0
def noprogress_test(s, o):
if cfv_test(s, o):
return 1
if o.find('.' * 10) >= 0:
return 2
return 0
if f.endswith('.csv2'): # csv2 has only filesize, hence checksum never happens, so no progress
test_generic(cmd + ' -T --progress=yes -f test' + f, noprogress_test)
else:
# test handling of COLUMNS env var #TODO: should actually check that the value is being respected...
os.environ['COLUMNS'] = '40'
try:
test_generic(cmd + ' -T --progress=yes -f test' + f, progress_test)
os.environ['COLUMNS'] = 'foobar'
test_generic(cmd + ' -T --progress=yes -f test' + f, progress_test)
finally:
del os.environ['COLUMNS']
test_generic(cmd + ' -T --progress=yes -f test' + f, progress_test)
test_generic(cmd + ' -T --progress=auto -f test' + f, noprogress_test)
test_generic(cmd + ' -T --progress=no -f test' + f, noprogress_test)
def gzC_test(f, extra=None, verify=None, t=None, d=None):
cmd = cfvcmd
if not t:
t = f
tmpd = tempfile.mkdtemp()
try:
f2 = os.path.join(tmpd, 'test.C.' + f + '.tmp.gz')
f = os.path.join(tmpd, 'test.C.' + f + '.gz')
if extra:
cmd += ' ' + extra
test_generic('%s -q -C -t %s -zz -f - %s' % (cmd, t, d), status_test, stdout=f2)
test_generic('%s -C -f %s %s' % (cmd, f, d), cfv_test)
try:
with gzip.open(f, 'rt') as ifd1:
if1 = ifd1.read()
except (IOError, zlib.error) as e:
if1 = '%s: %s' % (f, e)
try:
with gzip.open(f2, 'rt') as ifd2:
if2 = ifd2.read()
except (IOError, zlib.error) as e:
if2 = '%s: %s' % (f2, e)
if1 = remove_varying_comments(t, if1)
if2 = remove_varying_comments(t, if2)
r = if1 != if2
if r:
o = 'FILE1 %s:\n%s\nFILE2 %s:\n%s\n' % (f, if1, f2, if2)
else:
o = ''
test_log_results('zcompare %s %s' % (f, f2), r, o, r, None)
test_generic('%s -T -f %s' % (cmd, f), cfv_test)
test_generic('%s -zz -T -f -' % cmd, cfv_test, stdin=f)
if verify:
verify(f)
finally:
shutil.rmtree(tmpd)
def C_test(f, extra=None, verify=None, t=None, d='data?'):
gzC_test(f, extra=extra, t=t, d=d)
cmd = cfvcmd
if not t:
t = f
cfv_stdin_test(cmd + ' -t' + f + ' -C -f-', 'data4')
cfv_stdin_progress_test(f, 'data4')
tmpd = tempfile.mkdtemp()
try:
f = os.path.join(tmpd, 'test.C.' + f)
fgz = os.path.join(tmpd, f + '.gz')
if extra:
cmd += ' ' + extra
test_generic('%s -C -f %s %s' % (cmd, f, d), cfv_test)
test_generic('%s -T -f %s' % (cmd, f), cfv_test)
test_generic('%s -T -f -' % cmd, cfv_test, stdin=f)
with gzip.open(fgz, mode='wb') as of:
with open(f, 'rb') as in_file:
of.write(in_file.read())
test_generic('%s -zz -t%s -T -f -' % (cmd, t), cfv_test, stdin=fgz)
if verify:
verify(f)
finally:
shutil.rmtree(tmpd)
tmpd = tempfile.mkdtemp()
try:
test_generic('%s -p %s -C -f %s' % (cmd, tmpd, f), rcurry(cfv_test, operator.eq, 0))
finally:
os.rmdir(tmpd)
def C_test_encoding(enc):
d = tempfile.mkdtemp()
try:
with open(os.path.join(d, 'aoeu'), 'wt') as f2:
f2.write('a')
with open(os.path.join(d, 'kakexe'), 'wt') as f2:
f2.write('ba')
with open(os.path.join(d, 'foo bar.baz'), 'wt') as f2:
f2.write('baz')
test_generic(cfvcmd + ' --encoding=%s -v -C -p %s -t %s' % (enc, d, t), rcurry(cfv_all_test, ok=3))
test_generic(cfvcmd + ' --encoding=%s -v -T -p %s' % (enc, d,), rcurry(cfv_all_test, ok=3))
finally:
shutil.rmtree(d)
C_test_encoding('cp500')
C_test_encoding('utf-16be')
C_test_encoding('utf-16')
def create_funkynames(t, d, chr, deep):
num = 0
for i in range(1, 256):
n = chr(i)
if n in (os.sep, os.altsep):
continue
if fmt_istext(t) and len(('a' + n + 'a').splitlines()) > 1: # if n is a line separator (note that in unicode, this is more than just \r and \n)
continue
if t == 'torrent' and n in ('/', '\\'):
continue # 'ValueError: path \ disallowed for security reasons'
# if t == 'torrent' and n in ('~',): n = 'foo'+n; #same
# if n == os.curdir: n = 'foo'+n # can't create a file of name '.', but 'foo.' is ok.
# if t in ('sfv','sfvmd5') and n==';': n = 'foo'+n; # ';' is comment character in sfv files, filename cannot start with it.
if t == 'crc' and n.isspace():
n += 'foo' # crc format can't handle trailing whitespace in filenames
n = '%02x' % i + n
try:
if deep:
os.mkdir(os.path.join(d, n))
try:
f = open(os.path.join(d, n, n), 'wb')
except Exception:
# if making the dir succeeded but making the file fails, remove the dir so it won't confuse the tests which count the number of items in the top dir.
os.rmdir(os.path.join(d, n))
raise
else:
f = open(os.path.join(d, n), 'wb')
# important that all the funky files be two bytes long,
# since that is the torrent piece size needed in order for
# the undecodable filenames without raw test to work.
# (If the piece size doesn't match the file size, then some
# files that it can find will still be marked bad since it
# can't find the rest of the piece.)
f.write(b'%02x' % i)
f.close()
except (EnvironmentError, UnicodeError):
pass # stupid filesystem doesn't allow the character we wanted, oh well.
else:
num += 1
return num
def C_funkynames_test(t):
def fschr(i):
return os.fsdecode(b'%c' % i)
def is_fmtencodable(s, enc=fmt_preferredencoding(t)):
return is_encodable(s, enc)
def is_fmtokfn(s):
if fmt_istext(t):
return len(('a' + s + 'a').splitlines()) == 1
return True
for deep in (0, 1):
d = tempfile.mkdtemp()
try:
num = create_funkynames(t, d, chr, deep=deep)
# numencodable = len(filter(lambda fn: os.path.exists(os.path.join(d,fn)), os.listdir(d)))
numencodable = len(list(filter(is_fmtencodable, os.listdir(d))))
# cfv -C, unencodable filenames on disk, ferror on unencodable filename and ignore it
numunencodable = num - numencodable
cfn = os.path.join(d, 'funky%s.%s' % (deep and 'deep' or '', t))
test_generic(cfvcmd + '%s -v -C -p %s -t %s -f %s' % (deep and ' -rr' or '', d, t, cfn), rcurry(cfv_all_test, files=num, ok=numencodable, ferror=numunencodable))
test_generic(cfvcmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=numencodable, ok=numencodable))
test_generic(cfvcmd + ' -v -u -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=numencodable, ok=numencodable, unv=numunencodable))
os.unlink(cfn)
# cfv -C, unencodable filenames on disk, with --encoding=<something else> (eg, utf8), should work.
cfn = os.path.join(d, 'funky%s.%s' % (deep and 'deep' or '', t))
test_generic(cfvcmd + '%s --encoding=utf-8 -v -C -p %s -t %s -f %s' % (deep and ' -rr' or '', d, t, cfn), rcurry(cfv_all_test, files=num, ok=num))
test_generic(cfvcmd + ' -v --encoding=utf-8 -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=num, ok=num))
test_generic(cfvcmd + ' -v --encoding=utf-8 -u -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=num, ok=num, unv=0))
finally:
shutil.rmtree(d)
d3 = tempfile.mkdtemp()
try:
cnum = create_funkynames(t, d3, fschr, deep=deep)
ulist = os.listdir(d3)
numundecodable = 0 # listdir always returns filenames of type str if we use a path of type str (and this is what we do)
okcnum = len(ulist) - numundecodable
dcfn = os.path.join(d3, 'funky3%s.%s' % (deep and 'deep' or '', t))
# cfv -C, undecodable filenames on disk, with --encoding=raw just put everything in like before
test_generic(cfvcmd + '%s --encoding=raw -v --piece_size_pow2=1 -C -p %s -t %s -f %s' % (deep and ' -rr' or '', d3, t, dcfn), rcurry(cfv_all_test, files=cnum, ok=cnum))
# cfv -T, undecodable filenames on disk and in CF (same names), with --encoding=raw, read CF as raw strings and be happy
test_generic(cfvcmd + ' --encoding=raw -v -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, files=cnum, ok=cnum))
test_generic(cfvcmd + ' --encoding=raw -v -u -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, files=cnum, ok=cnum, unv=0))
# cfv -T, undecodable filenames on disk and in CF (same names), without raw, cferrors
test_generic(cfvcmd + ' -v -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_substatus_test, cferror=1)) # rcurry(cfv_all_test,ok=okcnum,cferror=numundecodable))
test_generic(cfvcmd + ' -v -u -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_substatus_test, cferror=1, unv=1)) # rcurry(cfv_all_test,ok=okcnum,cferror=numundecodable,unv=numundecodable))
test_generic(cfvcmd + ' -v -m -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_substatus_test, cferror=1)) # rcurry(cfv_all_test,ok=okcnum,cferror=numundecodable))
test_generic(cfvcmd + ' -v -m -u -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_substatus_test, cferror=1, unv=1)) # rcurry(cfv_all_test,ok=okcnum,cferror=numundecodable,unv=numundecodable))
# TODO: needs "deep" -s
if not deep:
renamelist = []
numrenamed = 0
for fn in os.listdir(d3):
if os.path.join(d3, fn) == dcfn:
continue
newfn = 'ren%3s' % numrenamed
renamelist.append((fn, newfn))
os.rename(os.path.join(d3, fn), os.path.join(d3, newfn))
if deep:
os.rename(os.path.join(d3, newfn, fn), os.path.join(d3, newfn, newfn))
numrenamed += 1
# cfv -T, correct filenames on disk, undecodable filenames in CF: check with -s, with --encoding=raw, read CF as raw strings and be happy
if t != 'torrent':
test_generic(cfvcmd + ' --encoding=raw -v -s -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=cnum, misnamed=numrenamed))
if fmt_hassize(t):
test_generic(cfvcmd + ' --encoding=raw -v -m -s -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=cnum, misnamed=numrenamed))
cnum += 1
# okcnum += 1
ulist = os.listdir(d3)
okcnum = len(list(filter(is_fmtencodable, ulist)))
numerr = len(ulist) - okcnum
dcfn = os.path.join(d3, 'funky3%s2.%s' % (deep and 'deep' or '', t))
test_generic(cfvcmd + '%s -v -C -p %s -t %s -f %s' % (deep and ' -rr' or '', d3, t, dcfn), rcurry(cfv_all_test, ok=okcnum, ferror=numerr))
for fn, newfn in renamelist:
if deep:
os.rename(os.path.join(d3, newfn, newfn), os.path.join(d3, newfn, fn))
os.rename(os.path.join(d3, newfn), os.path.join(d3, fn))
# cfv -T, undecodable filenames on disk, correct filenames in chksum file. want to check with -s, fix with -sn
if fmt_hassize(t):
test_generic(cfvcmd + ' -v -m -s -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=okcnum, misnamed=numrenamed))
if t != 'torrent': # needs -s support on torrents
test_generic(cfvcmd + ' -v -s -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=okcnum, misnamed=numrenamed))
if fmt_hascrc(t):
test_generic(cfvcmd + ' -v -s -n -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=okcnum, misnamed=numrenamed))
test_generic(cfvcmd + ' -v -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=okcnum))
finally:
shutil.rmtree(d3)
d3 = tempfile.mkdtemp()
try:
cnum = create_funkynames(t, d3, fschr, deep=deep)
ulist = os.listdir(d3)
okcnum = len(list(filter(is_fmtokfn, list(filter(is_fmtencodable, ulist)))))
numerr = len(ulist) - okcnum
dcfn = os.path.join(d3, 'funky3%s3.%s' % (deep and 'deep' or '', t))
# cfv -C, undecodable(and/or unencodable) filenames on disk: without raw, ferror on undecodable filename and ignore it
test_generic(cfvcmd + '%s -v -C -p %s -t %s -f %s' % (deep and ' -rr' or '', d3, t, dcfn), rcurry(cfv_all_test, files=cnum, ok=okcnum, ferror=numerr))
test_generic(cfvcmd + ' -v -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=okcnum))
test_generic(cfvcmd + ' -v -u -T -p %s -f %s' % (d3, dcfn), rcurry(cfv_all_test, ok=okcnum, unv=numerr))
finally:
shutil.rmtree(d3)
def ren_test(f, extra=None, verify=None, t=None):
join = os.path.join
dir = tempfile.mkdtemp()
try:
dir2 = join(dir, 'd2')
basecmd = cfvcmd + ' -r -p ' + dir
if extra:
basecmd += ' ' + extra
cmd = basecmd + ' --renameformat="%(name)s-%(count)i%(ext)s"'
os.mkdir(dir2)
fls = [join(dir, 'test.ext.end'),
join(dir, 'test2.foo'),
join(dir, 'test3'),
join(dir2, 'test4.foo')]
flsf = [join(dir, 'test.ext-%i.end'),
join(dir, 'test2-%i.foo'),
join(dir, 'test3-%i'),
join(dir2, 'test4-%i.foo')]
flsf_1 = [join(dir, 'test.ext.end-%i'),
join(dir, 'test2.foo-%i'),
join(dir2, 'test4.foo-%i')]
flsf_2 = [join(dir, 'test3-%i')]
def flsw(t):
for fl in fls:
with open(fl, 'wb') as f2:
f2.write(t)
def flscmp(t, n, fls):
for fl in fls:
fn = n is not None and fl % n or fl
try:
with open(fn, 'rb') as f2:
d = f2.read()
r = d != t
o = repr(d)
except IOError as e:
r = 1
o = str(e)
test_log_results('cmp %s for %s' % (fn, t.decode('ascii')), r, o, r, None)
flsw(b'hello')
test_generic('%s -C -t %s' % (cmd, f), cfv_test)
flsw(b'1')
test_generic(basecmd + ' --showpaths=0 -v -T --list0=bad', cfv_listdata_bad_test, stderr='/dev/null')
test_generic(basecmd + ' --showpaths=0 -q -T --list0=bad', cfv_listdata_bad_test)
test_generic('%s -Tn' % cmd, cfv_bad_test)
flsw(b'11')
test_generic('%s -Tn' % cmd, cfv_bad_test)
flsw(b'123')
test_generic('%s -Tn' % cmd, cfv_bad_test)
flsw(b'63')
test_generic(cmd + ' --renameformat="%(fullname)s" -Tn', cfv_bad_test) # test for formats without count too
flsw(b'hello')
test_generic('%s -Tn' % cmd, cfv_test)
flscmp(b'1', 0, flsf)
flscmp(b'11', 1, flsf)
flscmp(b'123', 2, flsf)
flscmp(b'63', 1, flsf_1)
flscmp(b'63', 3, flsf_2)
flscmp(b'hello', None, fls)
finally:
shutil.rmtree(dir)
def search_test(t, test_nocrc=0, extra=None):
cfn = os.path.join(os.getcwd(), 'test.' + t)
hassize = fmt_hassize(t)
if test_nocrc:
hascrc = 0
cmd = cfvcmd + ' -m'
else:
hascrc = fmt_hascrc(t)
cmd = cfvcmd
if extra:
cmd += ' ' + extra
if not hascrc and not hassize:
# if using -m and type doesn't have size, make sure -s doesn't do anything silly
d = tempfile.mkdtemp()
try:
for n, n2 in zip(list(range(1, 5)), list(range(4, 0, -1))):
shutil.copyfile('data%s' % n, os.path.join(d, 'fOoO%s' % n2))
test_generic(cmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
test_generic(cmd + ' -v -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
test_generic(cmd + ' -v -s -n -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
test_generic(cmd + ' -v -s -u -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4, unv=4))
finally:
shutil.rmtree(d)
# then return, since all the following tests would be impossible.
return
d = tempfile.mkdtemp()
try:
def dont_find_same_file_twice_test(s, o):
if not (o.count('fOoO3') == 1 and o.count('fOoO4') == 1):
return str((o.count('fOoO3'), o.count('fOoO4')))
return cfv_all_test(s, o, ok=4, misnamed=4)
test_generic(cmd + ' -v -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
for n, n2 in zip(list(range(1, 5)), list(range(4, 0, -1))):
shutil.copyfile('data%s' % n, os.path.join(d, 'fOoO%s' % n2))
test_generic(cmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
test_generic(cmd + ' -v -s -T -p %s -f %s' % (d, cfn), dont_find_same_file_twice_test)
test_generic(cmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
test_generic(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=4, misnamed=4))
test_generic(cmd + ' -v -u -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=4))
finally:
shutil.rmtree(d)
# the following tests two things:
# 1) that it will copy/link to a file that is already OK rather than just renaming it again
# 2) that it doesn't use the old cached value of a file's checksum before it got renamed out of the way.
d = tempfile.mkdtemp()
try:
misnamed1 = misnamed2 = 4
if hassize and hascrc:
experrs = {'badcrc': 1, 'badsize': 2}
elif hassize:
experrs = {'badsize': 2, 'ok': 1}
misnamed1 = 3
misnamed2 = OneOf(3, 4) # this depends on what order os.listdir finds stuff. (could be 3 or 4)
else: # if hascrc:
experrs = {'badcrc': 3}
test_generic(cmd + ' -v -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
for n, n2 in zip([1, 3, 4], [4, 2, 1]):
shutil.copyfile('data%s' % n, os.path.join(d, 'data%s' % n2))
test_generic(cmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=1, **experrs))
test_generic(cmd + ' -v -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=4, misnamed=misnamed1))
test_generic(cmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=1, **experrs))
test_generic(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=4, misnamed=misnamed2))
test_generic(cmd + ' -v -u -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=4))
finally:
shutil.rmtree(d)
# test whether ferrors during searching are ignored
if hasattr(os, 'symlink'):
d = tempfile.mkdtemp()
try:
for n, n2 in zip([4], [2]):
shutil.copyfile('data%s' % n, os.path.join(d, 'foo%s' % n2))
for n in string.ascii_lowercase:
os.symlink('noexist', os.path.join(d, n))
test_generic(cmd + ' -v -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=1, misnamed=1, notfound=3))
test_generic(cmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, notfound=4))
test_generic(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=1, misnamed=1, notfound=3))
test_generic(cmd + ' -v -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=1, notfound=3))
finally:
shutil.rmtree(d)
# test if an error while renaming a misnamed file is properly handled
d = tempfile.mkdtemp()
ffoo = fdata4 = None
try:
with open('data4', 'rb') as f:
ffoo = writefile_and_reopen(os.path.join(d, 'foo'), f.read())
# note that we leave the file open. This is because windows
# allows renaming of files in a readonly dir, but doesn't allow
# renaming of open files. So if we do both the test will work
# on both nix and win.
os.chmod(d, stat.S_IRUSR | stat.S_IXUSR)
try:
os.rename(os.path.join(d, 'foo'), os.path.join(d, 'foo2'))
print('rename of open file in read-only dir worked? skipping this test.')
except EnvironmentError:
# if the rename failed, then we're good to go for these tests..
test_generic(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=4, ok=1, misnamed=1, ferror=1, notfound=3))
os.chmod(d, stat.S_IRWXU)
fdata4 = writefile_and_reopen(os.path.join(d, 'data4'), '')
os.chmod(d, stat.S_IRUSR | stat.S_IXUSR)
test_generic(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=4, ok=1, misnamed=1, ferror=2, notfound=3))
finally:
os.chmod(d, stat.S_IRWXU)
if ffoo:
ffoo.close()
if fdata4:
fdata4.close()
shutil.rmtree(d)
# test if misnamed stuff and/or renaming stuff doesn't screw up the unverified file checking
d = tempfile.mkdtemp()
try:
shutil.copyfile('data4', os.path.join(d, 'foo'))
test_generic(cmd + ' -v -uu -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=4, ok=1, misnamed=1, notfound=3, unv=0))
test_generic(cmd + ' -v -uu -s -n -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=4, ok=1, misnamed=1, notfound=3, unv=0))
open(os.path.join(d, 'data1'), 'wb').close()
if hassize:
experrs = {'badsize': 1}
else:
experrs = {'badcrc': 1}
test_generic(cmd + ' -v -uu -s -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=4, ok=1, misnamed=0, notfound=2, unv=0, **experrs))
test_generic(cmd + ' -v -uu -s -n -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, files=4, ok=1, misnamed=0, notfound=2, unv=1, **experrs))
finally:
shutil.rmtree(d)
if fmt_cancreate(t):
# test deep handling
d = tempfile.mkdtemp()
try:
dcfn = os.path.join(d, 'deep.' + t)
os.mkdir(os.path.join(d, 'aOeU.AoEu'))
os.mkdir(os.path.join(d, 'aOeU.AoEu', 'boO.FaRr'))
shutil.copyfile('data1', os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'DaTa1'))
test_generic(cmd + ' -v -rr -C -p %s -t %s -f %s' % (d, t, dcfn), rcurry(cfv_all_test, files=1, ok=1))
os.rename(os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'DaTa1'), os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'Foo1'))
shutil.copyfile('data4', os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'DaTa1'))
test_generic(cmd + ' -v -s -T -p %s -f %s' % (d, dcfn), rcurry(cfv_all_test, files=1, ok=1, misnamed=1))
shutil.rmtree(os.path.join(d, 'aOeU.AoEu'))
os.mkdir(os.path.join(d, 'AoEu.aOeU'))
os.mkdir(os.path.join(d, 'AoEu.aOeU', 'BOo.fArR'))
shutil.copyfile('data4', os.path.join(d, 'AoEu.aOeU', 'BOo.fArR', 'dAtA1'))
shutil.copyfile('data1', os.path.join(d, 'AoEu.aOeU', 'BOo.fArR', 'Foo1'))
test_generic(cmd + ' -i -v -s -T -p %s -f %s' % (d, dcfn), rcurry(cfv_all_test, files=1, ok=1, misnamed=1))
if hassize:
experrs = {'badsize': 1}
else:
experrs = {'badcrc': 1}
test_generic(cmd + ' -i -v -T -p %s -f %s' % (d, dcfn), rcurry(cfv_all_test, files=1, ok=0, **experrs))
test_generic(cmd + ' -i -v -s -n -T -p %s -f %s' % (d, dcfn), rcurry(cfv_all_test, files=1, ok=1, misnamed=1))
test_generic(cmd + ' -i -v -T -p %s -f %s' % (d, dcfn), rcurry(cfv_all_test, files=1, ok=1))
finally:
shutil.rmtree(d)
if fmt_cancreate(t) and hassize:
d = tempfile.mkdtemp()
try:
dcfn = os.path.join(d, 'foo.' + t)
os.mkdir(os.path.join(d, 'aoeu'))
dirsize = os.path.getsize(os.path.join(d, 'aoeu'))
with open(os.path.join(d, 'idth'), 'wb') as f:
f.write(b'a' * dirsize)
test_generic(cmd + ' -v -C -p %s -t %s -f %s' % (d, t, dcfn), rcurry(cfv_all_test, files=1, ok=1))
os.remove(os.path.join(d, 'idth'))
os.rename(os.path.join(d, 'aoeu'), os.path.join(d, 'idth'))
def dont_find_dir_test(s, o):
if not o.count('idth') == 1:
return str((o.count('idth'),))
return cfv_all_test(s, o, ok=0, notfound=1)
test_generic(cmd + ' -v -m -T -p %s -f %s' % (d, dcfn), dont_find_dir_test) # test not finding non-file things in normal mode
test_generic(cmd + ' -v -m -s -T -p %s -f %s' % (d, dcfn), dont_find_dir_test) # test not finding non-file things in search mode
finally:
shutil.rmtree(d)
def quoted_search_test():
d = tempfile.mkdtemp()
try:
join = os.path.join
with open(join(d, 'foo.sfv'), 'w') as f:
f.write(r""""data1" B2A9E441
"/data4" FA323C6D
"aa1/data1" B2A9E441
"c:/aa1/data4" FA323C6D
"aa3/data3" 841ADFA2
"\aa3\data4" FA323C6D
"c:\aa4\bb4\data1" B2A9E441
"aa4/bb4/data4" FA323C6D""")
shutil.copyfile('data1', pathjoin_and_mkdir(d, 'foo1'))
shutil.copyfile('data4', pathjoin_and_mkdir(d, 'foo4'))
shutil.copyfile('data1', pathjoin_and_mkdir(d, 'aa1', 'foo1'))
shutil.copyfile('data4', pathjoin_and_mkdir(d, 'aa1', 'foo4'))
shutil.copyfile('data3', pathjoin_and_mkdir(d, 'aa3', 'foo3'))
shutil.copyfile('data4', pathjoin_and_mkdir(d, 'aa3', 'foo4'))
shutil.copyfile('data1', pathjoin_and_mkdir(d, 'aa4', 'bb4', 'foo1'))
shutil.copyfile('data4', pathjoin_and_mkdir(d, 'aa4', 'bb4', 'foo4'))
test_generic(cfvcmd + r' -v --unquote=yes --strippaths=0 --fixpaths \\/ -s -T -p ' + d, rcurry(cfv_all_test, ok=8, misnamed=8))
finally:
shutil.rmtree(d)
def symlink_test():
dir = tempfile.mkdtemp()
dir1 = 'd1'
dir2 = 'd2'
try:
os.mkdir(os.path.join(dir, dir1))
os.mkdir(os.path.join(dir, dir2))
if hasattr(os, 'symlink'):
os.symlink(os.path.join(os.pardir, dir2), os.path.join(dir, dir1, 'l2'))
os.symlink(os.path.join(os.pardir, dir1), os.path.join(dir, dir2, 'l1'))
test_generic(cfvcmd + ' -l -r -p ' + dir, rcurry(cfv_test, operator.eq, 0))
test_generic(cfvcmd + ' -L -r -p ' + dir, rcurry(cfv_test, operator.eq, 0))
test_generic(cfvcmd + ' -l -r -C -p ' + dir, rcurry(cfv_test, operator.eq, 0))
test_generic(cfvcmd + ' -L -r -C -p ' + dir, rcurry(cfv_test, operator.eq, 0))
open(os.path.join(dir, dir1, 'foo'), 'wb').close()
open(os.path.join(dir, dir2, 'bar'), 'wb').close()
def r_unv_test(s, o):
if cfv_unvonly_test(s, o, 2):
return 1
if o.count('not verified') != 1:
return 1
return 0
test_generic(cfvcmd + ' -l -r -u -p ' + dir, r_unv_test)
test_generic(cfvcmd + ' -L -r -u -p ' + dir, r_unv_test)
test_generic(cfvcmd + ' -l -u -p ' + dir, r_unv_test)
test_generic(cfvcmd + ' -L -u -p ' + dir, r_unv_test)
def r_unv_verbose_test(s, o):
if cfv_unvonly_test(s, o, 2):
return 1
if o.count('not verified') != 2:
return 1
return 0
test_generic(cfvcmd + ' -l -uu -p ' + dir, r_unv_verbose_test)
test_generic(cfvcmd + ' -L -uu -p ' + dir, r_unv_verbose_test)
test_generic(cfvcmd + ' -l -r -uu -p ' + dir, r_unv_verbose_test)
test_generic(cfvcmd + ' -L -r -uu -p ' + dir, r_unv_verbose_test)
finally:
shutil.rmtree(dir)
def deep_unverified_test():
dir = tempfile.mkdtemp()
try:
join = os.path.join
a = 'a'
a_C = join(a, 'C')
B = 'B'
B_ushallow = join(B, 'ushallow')
B_ushallow_d = join(B_ushallow, 'd')
u = 'u'
u_u2 = join(u, 'u2')
e = 'e'
e_es = join(e, 'es')
e2 = 'e2'
e2_e2s = join(e2, 'e2s')
e2_e2u = join(e2, 'e2u')
for d in a, a_C, B, B_ushallow, B_ushallow_d, u, u_u2, e, e_es, e2, e2_e2s, e2_e2u:
os.mkdir(join(dir, d))
datafns = ('DATa1', 'UnV1',
join(a, 'dAta2'), join(a, 'Unv2'), join(a_C, 'dATa4'), join(a_C, 'unV4'),
join(B, 'daTA3'), join(B, 'uNv3'),
join(B_ushallow, 'uNvs'), join(B_ushallow_d, 'unvP'), join(B_ushallow_d, 'datA5'),
join(u, 'uNVu'), join(u, 'UnvY'), join(u_u2, 'UNVX'),
join(e2_e2s, 'DaTaE'), join(e2_e2u, 'unVe2'),)
lower_datafns = list(map(lambda s: s.lower(), datafns))
for fn in datafns:
open(join(dir, fn), 'wb').close()
with open(join(dir, 'deep.md5'), 'wt') as f:
s = ('d41d8cd98f00b204e9800998ecf8427e *%s\n' * 6) % (
os.path.join('b', 'DaTa3'),
os.path.join('B', 'ushAllOw', 'D', 'daTa5'),
os.path.join('a', 'c', 'DatA4'),
os.path.join('A', 'dATA2'),
os.path.join('E2', 'e2S', 'DAtae'),
'daTA1')
f.write(s)
def r_test(s, o):
if cfv_test(s, o, operator.eq, 6):
return 1
if o.count('not verified') != 0:
return 1
return 0
def r_unv_test(s, o):
if cfv_unvonly_test(s, o, 10):
return 1
if o.count('not verified') != 8:
return 1
if o.find(os.path.join('e', '*')) >= 0:
return 1
if o.find(os.path.join('e2', '*')) >= 0:
return 1
return 0
def r_unv_verbose_test(s, o):
if cfv_unvonly_test(s, o, 10):
return 1
if o.count('not verified') != 10:
return 1
if o.find('*') >= 0:
return 1
return 0
test_generic(cfvcmd + ' -i -U -p ' + dir, r_test)
test_generic(cfvcmd + ' -i -u -p ' + dir, r_unv_test)
test_generic(cfvcmd + ' -i -uu -p ' + dir, r_unv_verbose_test)
test_generic(cfvcmd + ' -i -U -p ' + dir + ' ' + ' '.join(lower_datafns), r_test)
test_generic(cfvcmd + ' -i -u -p ' + dir + ' ' + ' '.join(lower_datafns), r_unv_verbose_test)
test_generic(cfvcmd + ' -i -uu -p ' + dir + ' ' + ' '.join(lower_datafns), r_unv_verbose_test)
finally:
shutil.rmtree(dir)
def test_encoding_detection():
datad = tempfile.mkdtemp()
d = tempfile.mkdtemp()
try:
datafns = ['data1', 'data3', 'data4']
destfns = [
'\u0061', # LATIN SMALL LETTER A
'\u00c4', # LATIN CAPITAL LETTER A WITH DIAERESIS
'\u03a0', # GREEK CAPITAL LETTER PI
'\u0470', # CYRILLIC CAPITAL LETTER PSI
'\u2605', # BLACK STAR
'\u3052', # HIRAGANA LETTER GE
'\u6708', # CJK UNIFIED IDEOGRAPH-6708
]
BOM = '\uFEFF'
utfencodings = ['utf-8', 'utf-16le', 'utf-16be', 'utf-32le', 'utf-32be', ]
fnerrs = fnok = 0
for i, destfn in enumerate(destfns):
srcfn = datafns[i % len(datafns)]
try:
shutil.copyfile(srcfn, os.path.join(datad, destfn))
except (EnvironmentError, UnicodeError):
fnerrs += 1
else:
fnok += 1
for t in allcreatablefmts():
if fmt_istext(t):
utf8cfn = os.path.join(d, 'utf8nobom.' + t)
test_generic(cfvcmd + ' -C --encoding=utf-8 -p %s -t %s -f %s' % (datad, t, utf8cfn), rcurry(cfv_all_test, ok=fnok))
chksumdata = readfile(utf8cfn).decode('utf-8')
for enc in utfencodings:
bommedcfn = os.path.join(d, enc + '.' + t)
try:
writefile(bommedcfn, (BOM + chksumdata).encode(enc))
except LookupError:
pass
else:
test_generic(cfvcmd + ' -T -p %s -t %s -f %s' % (datad, t, bommedcfn), rcurry(cfv_all_test, ok=fnok))
test_generic(cfvcmd + ' -T -p %s -f %s' % (datad, bommedcfn), rcurry(cfv_all_test, ok=fnok))
finally:
shutil.rmtree(d)
shutil.rmtree(datad)
def test_encoding2():
"""Non-trivial (actual non-ascii characters) encoding test.
These tests will probably always fail unless you use a unicode locale and python 2.3+.
"""
d = tempfile.mkdtemp()
d2 = tempfile.mkdtemp()
try:
cfn = os.path.join(d, '\u3070\u304B.torrent')
shutil.copyfile('testencoding2.torrent.foo', cfn)
datafns = [
('data1', '\u2605'),
('data2', '\u2606'),
('data3', '\u262E'),
('data4', '\u2600'),
]
fnerrs = fnok = 0
for srcfn, destfn in datafns:
try:
shutil.copyfile(srcfn, os.path.join(d2, destfn))
except (EnvironmentError, UnicodeError):
fnerrs += 1
else:
fnok += 1
test_generic(cfvcmd + ' -q -T -p ' + d, rcurry(cfv_status_test, notfound=fnok, ferror=fnerrs))
test_generic(cfvcmd + ' -v -T -p ' + d, rcurry(cfv_all_test, ok=0, notfound=fnok, ferror=fnerrs))
bakad = os.path.join(d, '\u3070\u304B')
os.mkdir(bakad)
for srcfn, destfn in datafns:
try:
shutil.copyfile(srcfn, os.path.join(bakad, destfn))
except (EnvironmentError, UnicodeError):
pass
test_generic(cfvcmd + ' -q -m -T -p ' + d, rcurry(cfv_status_test, ferror=fnerrs))
test_generic(cfvcmd + ' -v -m -T -p ' + d, rcurry(cfv_all_test, ok=fnok, ferror=fnerrs))
test_generic(cfvcmd + ' -v -m -u -T -p ' + d, rcurry(cfv_all_test, ok=fnok, ferror=fnerrs, unv=0))
if not fnerrs:
# if some of the files can't be found, checking of remaining files will fail due to missing pieces
test_generic(cfvcmd + ' -q -T -p ' + d, rcurry(cfv_status_test))
test_generic(cfvcmd + ' -v -T -p ' + d, rcurry(cfv_all_test, ok=4))
test_generic(cfvcmd + ' -v -u -T -p ' + d, rcurry(cfv_all_test, ok=4, unv=0))
raw_fnok = 0
files_fnok = files_fnerrs = 0
raw_files_fnok = raw_files_fnerrs = 0
dirn = list(filter(lambda s: not s.endswith('torrent'), os.listdir(d)))[0]
try:
files = [os.path.join(dirn, s) for s in os.listdir(os.path.join(d, dirn))]
except EnvironmentError:
files = []
else:
for fn in files:
flag_ok_raw = flag_ok_files = False
for srcfn, destfn in datafns:
if os.path.join('\u3070\u304B', destfn) == fn:
raw_fnok += 1
flag_ok_raw = True
try:
open(os.path.join(d, fn), 'rb')
except (EnvironmentError, UnicodeError):
files_fnerrs += 1
else:
files_fnok += 1
flag_ok_files = True
if flag_ok_files and flag_ok_raw:
raw_files_fnok += 1
else:
raw_files_fnerrs += 1
raw_fnerrs = len(datafns) - raw_fnok
# print(len(files), files)
# print('raw', raw_fnok, raw_fnerrs)
# print('files', files_fnok, files_fnerrs)
# print('raw_files', raw_files_fnok, raw_files_fnerrs)
if files:
test_generic(cfvcmd + ' -v -m -T -p ' + d + ' ' + ' '.join(files), rcurry(cfv_all_test, ok=files_fnok, notfound=files_fnerrs))
if files_fnok == len(datafns):
test_generic(cfvcmd + ' -v -T -p ' + d + ' ' + ' '.join(files), rcurry(cfv_all_test, ok=files_fnok, notfound=files_fnerrs))
test_generic(cfvcmd + ' --encoding=raw -v -m -T -p ' + d + ' ' + ' '.join(files), rcurry(cfv_all_test, ok=raw_files_fnok))
if raw_files_fnok == len(datafns):
test_generic(cfvcmd + ' --encoding=raw -v -T -p ' + d + ' ' + ' '.join(files), rcurry(cfv_all_test, ok=raw_files_fnok))
test_generic(cfvcmd + ' --encoding=raw -m -v -T -p ' + d, rcurry(cfv_all_test, ok=raw_fnok, notfound=raw_fnerrs))
test_generic(cfvcmd + ' --encoding=raw -m -v -u -T -p ' + d, rcurry(cfv_all_test, ok=raw_fnok, unv=fnok - raw_fnok, notfound=raw_fnerrs))
if raw_fnok == len(datafns):
test_generic(cfvcmd + ' --encoding=raw -v -T -p ' + d, rcurry(cfv_all_test, ok=raw_fnok, notfound=raw_fnerrs))
test_generic(cfvcmd + ' --encoding=raw -v -u -T -p ' + d, rcurry(cfv_all_test, ok=raw_fnok, unv=fnok - raw_fnok, notfound=raw_fnerrs))
except Exception:
test_log_results('test_encoding2', 'foobar', ''.join(traceback.format_exception(*sys.exc_info())), 'foobar', {}) # yuck. I really should switch this crap all to unittest ...
# finally:
shutil.rmtree(d2)
shutil.rmtree(d)
def largefile2GB_test():
# hope you have sparse file support ;)
fn = os.path.join('bigfile2', 'bigfile')
f = open(fn, 'wb')
try:
f.write(b'hi')
f.seek(2 ** 30)
f.write(b'foo')
f.seek(2 ** 31)
f.write(b'bar')
f.close()
test_generic(cfvcmd + ' -v -T -p %s' % 'bigfile2', rcurry(cfv_all_test, ok=6))
finally:
os.unlink(fn)
def largefile4GB_test():
# hope you have sparse file support ;)
fn = os.path.join('bigfile', 'bigfile')
f = open(fn, 'wb')
try:
f.write(b'hi')
f.seek(2 ** 30)
f.write(b'foo')
f.seek(2 ** 31)
f.write(b'bar')
f.seek(2 ** 32)
f.write(b'baz')
f.close()
test_generic(cfvcmd + ' -v -T -p %s' % 'bigfile', rcurry(cfv_all_test, ok=10))
finally:
os.unlink(fn)
def manyfiles_test(t):
try:
max_open = os.sysconf('SC_OPEN_MAX')
except (AttributeError, ValueError, OSError):
max_open = 1024
if not run_long_tests and max_open > 4096:
print('max open files is big (%i)' % max_open, end=' ')
max_open = 4096
print('clipping to %i. Use --long to try the real value' % max_open)
num = max_open + 1
d = tempfile.mkdtemp()
try:
for i in range(0, num):
n = '%04i' % i
with open(os.path.join(d, n), 'wt') as f:
f.write(n)
cfn = os.path.join(d, 'manyfiles.' + t)
test_generic(cfvcmd + ' -C -p %s -t %s -f %s' % (d, t, cfn), rcurry(cfv_all_test, ok=num))
test_generic(cfvcmd + ' -T -p %s -f %s' % (d, cfn), rcurry(cfv_all_test, ok=num))
finally:
shutil.rmtree(d)
def specialfile_test(cfpath):
try:
import threading
except ImportError:
return
d = tempfile.mkdtemp()
cfn = os.path.split(cfpath)[1]
try:
fpath = os.path.join(d, 'foo.bar')
try:
os.mkfifo(fpath)
except (AttributeError, EnvironmentError):
return
shutil.copyfile(cfpath, os.path.join(d, cfn))
def pusher(fpath):
with open(fpath, 'wb') as f:
f.write(b'a' * 0x4000)
f.flush()
time.sleep(0.1)
f.write(b'b' * 0x4000)
f.flush()
time.sleep(0.1)
f.write(b'c' * 0x4000)
t = threading.Thread(target=pusher, args=(fpath,))
t.start()
s, o = cfvtest.runcfv('%s --progress=yes -T -p %s -f %s' % (cfvcmd, d, cfn))
t.join()
r = 0
if s:
r = 1
elif o.count('#') > 1:
r = 'count(#) = %s' % (o.count('#'))
elif o.count('..'):
r = 3
test_log_results('specialfile_test(%s)' % cfn, s, o, r, None)
finally:
shutil.rmtree(d)
def unrecognized_cf_test():
def cfv_unrectype(s, o):
r = cfv_all_test(s, o, cferror=1)
if r:
return r
if not o.count('type'):
return "'type' not found in output"
if o.count('encoding'):
return "'encoding' found in output"
return 0
def cfv_unrecenc(s, o):
r = cfv_all_test(s, o, cferror=1)
if r:
return r
if not o.count('type'):
return "'type' not found in output"
if not o.count('encoding'):
return "'encoding' not found in output"
return 0
# data1 is not a valid checksum file, but it is valid latin1, so it should only generate an unrecognized type error
test_generic(cfvcmd + ' -T --encoding=latin1 -f data1', cfv_unrectype)
# data1 is not a valid checksum file, nor is it valid utf-16 (no bom, odd number of bytes), so it should generate an unrecognized type or encoding error
test_generic(cfvcmd + ' -T --encoding=utf-16 -f data1', cfv_unrecenc)
def private_torrent_test():
cmd = cfvcmd
tmpd = tempfile.mkdtemp()
try:
needle = b'7:privatei1'
f = os.path.join(tmpd, 'test.torrent')
test_generic('%s -C -f %s data1' % (cmd, f), cfv_test)
data = readfile(f)
test_log_results('should not contain private flag', 0, repr(data), needle in data, None)
f = os.path.join(tmpd, 'test2.torrent')
test_generic('%s --private_torrent -C -f %s data1' % (cmd, f), cfv_test)
data = readfile(f)
test_log_results('should contain private flag', 0, repr(data), needle not in data, None)
finally:
shutil.rmtree(tmpd)
def all_unittest_tests():
if not run_internal:
return 0
test_log_start('all_unittests_suite', None)
from unittest import TextTestRunner
suite = cfvtest.all_unittests_suite()
runner = TextTestRunner(stream=logfile, descriptions=1, verbosity=2)
result = runner.run(suite)
if not result.wasSuccessful():
r = '%i failures, %i errors' % tuple(map(len, (result.failures, result.errors)))
else:
r = 0
test_log_finish('all_unittests_suite', not result.wasSuccessful(), r, None, None)
return len(result.failures) + len(result.errors)
run_internal = 1
run_long_tests = 0
run_unittests_only = 0
run_exit_early = 0
def show_help_and_exit(err=None):
if err:
print('error:', err)
print()
print('usage: test.py [-i|-e] [--long] [--unit] [--exit-early] [cfv]')
print(' -i run tests internally')
print(' -e launch seperate cfv process for each test')
print(' --long include tests that may use large amounts of CPU or disk')
print(' --unit run only unittests, no integration tests')
print(' --exit-early exit after first error')
print(' --help show this help')
print()
print('default [cfv] is:', cfvtest.cfvfn)
print('default run mode is:', run_internal and 'internal' or 'external')
sys.exit(1)
try:
optlist, args = getopt.getopt(sys.argv[1:], 'ie', ['long', 'help', 'unit', 'exit-early'])
except getopt.error as e:
show_help_and_exit(e)
if len(args) > 1:
show_help_and_exit('too many arguments')
for o, a in optlist:
if o == '--help':
show_help_and_exit()
elif o == '--long':
run_long_tests = 1
elif o == '--unit':
run_unittests_only = 1
elif o == '--exit-early':
run_exit_early = 1
elif o == '-i':
run_internal = 1
elif o == '-e':
run_internal = 0
else:
show_help_and_exit('bad opt %r' % o)
cfvtest.setcfv(fn=args and args[0] or None, internal=run_internal)
if run_unittests_only:
logfile = sys.stdout
all_unittest_tests()
sys.exit()
# set everything to default in case user has different in config file
cfvcmd = '-ZNVRMUI --unquote=no --fixpaths="" --strippaths=0 --showpaths=auto-relative --progress=no --announceurl=url --noprivate_torrent'
logfile = open(os.path.join(tempfile.gettempdir(), 'cfv_%s_test-%s.log' % (cfvtest.ver_cfv, time.strftime('%Y%m%dT%H%M%S'))), 'wt')
def all_tests():
stats.ok = stats.failed = 0
symlink_test()
deep_unverified_test()
for fmt in coreutilsfmts():
ren_test(fmt)
ren_test('md5', extra='-rr')
ren_test('bsdmd5')
ren_test('sfv')
ren_test('sfvmd5')
ren_test('csv')
ren_test('csv2')
ren_test('csv4')
ren_test('crc')
ren_test('torrent')
for t in allavailablefmts():
if t != 'torrent':
search_test(t)
search_test(t, test_nocrc=1)
# search_test('torrent',test_nocrc=1,extra='--strip=1')
quoted_search_test()
for fmt in coreutilsfmts():
T_test('.' + fmt)
T_test('.md5.gz')
T_test('comments.md5')
T_test('.bsdmd5')
# test par spec 1.0 files:
T_test('.par')
T_test('.p01')
# test par spec 0.9 files:
T_test('v09.par')
T_test('v09.p01')
T_test('.par2')
T_test('.vol0+1.par2')
T_test('.csv')
T_test('.sfv')
T_test('noheader.sfv')
T_test('.sfvmd5')
T_test('.csv2')
T_test('.csv4')
T_test('.crc')
T_test('nosize.crc')
T_test('nodims.crc')
T_test('nosizenodimsnodesc.crc')
for fmt in coreutilsfmts():
T_test('crlf.' + fmt)
T_test('crlf.bsdmd5')
T_test('crlf.csv')
T_test('crlf.csv2')
T_test('crlf.csv4')
T_test('crlf.sfv')
T_test('noheadercrlf.sfv')
T_test('crlf.crc')
for fmt in coreutilsfmts():
T_test('crcrlf.' + fmt)
T_test('crcrlf.bsdmd5')
T_test('crcrlf.csv')
T_test('crcrlf.csv2')
T_test('crcrlf.csv4')
T_test('crcrlf.sfv')
T_test('noheadercrcrlf.sfv')
T_test('crcrlf.crc')
for strip in (0, 1):
T_test('.torrent', extra='--strip=%s' % strip)
T_test('smallpiece.torrent', extra='--strip=%s' % strip)
T_test('encoding.torrent', extra='--strip=%s' % strip)
def cfv_torrentcommentencoding_test(s, o):
r = cfv_all_test(s, o, ok=1)
if r:
return r
tcount = o.count('Test_Comment-Text.')
if tcount != 1:
return 'encoded text count: %s' % tcount
return 0
test_generic(cfvcmd + ' -T -v -f testencodingcomment.torrent', cfv_torrentcommentencoding_test)
test_encoding2()
test_encoding_detection()
unrecognized_cf_test()
# test handling of directory args in recursive testmode. (Disabled since this isn't implemented, and I'm not sure if it should be. It would change the meaning of cfv *)
# test_generic(cfvcmd + ' -r a', cfv_test)
# test_generic(cfvcmd + ' -ri a', cfv_test)
# test_generic(cfvcmd + ' -ri A', cfv_test)
# test_generic(cfvcmd + ' -rm a', cfv_test)
# test_generic(cfvcmd + ' -rim a', cfv_test)
# test_generic(cfvcmd + ' -r a/C', cfv_test)
# test_generic(cfvcmd + ' -ri A/c', cfv_test)
# test handling of testfile args in recursive testmode
test_generic(cfvcmd + ' -r -p a ' + os.path.join('C', 'foo.bar'), cfv_test)
test_generic(cfvcmd + ' -ri -p a ' + os.path.join('c', 'fOo.BaR'), cfv_test)
test_generic(cfvcmd + ' -r -u -p a ' + os.path.join('C', 'foo.bar'), cfv_test)
test_generic(cfvcmd + ' -ri -u -p a ' + os.path.join('c', 'fOo.BaR'), cfv_test)
def cfv_notfound_or_bad_test(path):
if os.path.exists(path):
return cfv_bad_test
else:
return cfv_notfound_test
test_generic(cfvcmd + ' --strippaths=0 -T -f teststrip0.csv4', cfv_test)
test_generic(cfvcmd + ' --strippaths=1 -T -f teststrip1.csv4', cfv_test)
test_generic(cfvcmd + ' --strippaths=2 -T -f teststrip2.csv4', cfv_test)
test_generic(cfvcmd + ' --strippaths=all -T -f teststrip-1.csv4', cfv_test)
test_generic(cfvcmd + ' --strippaths=none -T -f teststrip-none.csv4', cfv_notfound_or_bad_test('/data1'))
test_generic(cfvcmd + r' --strippaths=0 --fixpaths \\/ -T -f testdrivestrip.md5', rcurry(cfv_all_test, ok=4))
test_generic(cfvcmd + r' --strippaths=0 --unquote=yes --fixpaths \\/ -T -f testdrivestripquoted.md5', rcurry(cfv_all_test, ok=4))
test_generic(cfvcmd + r' --strippaths=0 --unquote=yes --fixpaths \\/ -T -f testdrivestripquoted.md5 data1 data3 data4', rcurry(cfv_all_test, ok=3))
test_generic(cfvcmd + ' -i -T -f testcase.csv', cfv_test)
test_generic(cfvcmd + ' -T --unquote=yes -f testquoted.sfv', cfv_test)
test_generic(cfvcmd + ' -i --unquote=yes -T -f testquotedcase.sfv', cfv_test)
test_generic(cfvcmd + ' -i --unquote=yes -T -f testquotedcase.sfv DaTa1 ' + os.path.join('a', 'C', 'Foo.bar'), rcurry(cfv_all_test, ok=2))
test_generic(cfvcmd + ' -i -T -f testquoted.csv4', cfv_test)
test_generic(cfvcmd + r' --fixpaths \\/ -T -f testfix.csv', cfv_test)
test_generic(cfvcmd + r' --fixpaths \\/ -T -f testfix.csv4', cfv_test)
test_generic(cfvcmd + r' -i --fixpaths \\/ -T -f testfix.csv4', cfv_test)
C_test('bsdmd5', '-t bsdmd5') # ,verify=lambda f: test_generic('md5 -c ' + f, status_test)) #bsd md5 seems to have no way to check, only create
for fmt in coreutilsfmts():
if pathfind(fmt + 'sum'): # don't report pointless errors on systems that don't have e.g. sha1sum
def coreutils_verify(f):
test_external(fmt + 'sum -c ' + f, status_test)
else:
print('skipping %s verify using external tool %ssum, as it is not installed.' % (fmt, fmt))
coreutils_verify = None
C_test(fmt, verify=coreutils_verify)
C_test('csv')
if pathfind('cksfv'): # don't report pointless errors on systems that don't have cksfv
def sfvverify(f):
test_external('cksfv -f ' + f, status_test)
else:
print('skipping sfv verify using external tool cksfv, as it is not installed.')
sfvverify = None
C_test('sfv', verify=sfvverify)
C_test('sfvmd5', '-t sfvmd5')
C_test('csv2', '-t csv2')
C_test('csv4', '-t csv4')
C_test('crc')
private_torrent_test()
# test_generic('../cfv -V -T -f test.md5', cfv_test)
# test_generic('../cfv -V -tcsv -T -f test.md5', cfv_test)
for t in allavailablefmts():
if fmt_istext(t):
test_generic(cfvcmd + ' --encoding=cp500 -T -f test.' + t, rcurry(cfv_all_test, cferror=1))
else:
if t == 'par':
try:
open('data1'.encode('utf-16le').decode('utf-16be'), 'rb')
except UnicodeError:
nf = 0
err = 4
except Exception:
nf = 4
err = 0
test_generic(cfvcmd + ' --encoding=utf-16be -T -f test.' + t, rcurry(cfv_all_test, notfound=nf, ferror=err))
test_generic(cfvcmd + ' --encoding=cp500 -T -f test.' + t, rcurry(cfv_all_test, cferror=4))
test_generic(cfvcmd + ' --encoding=cp500 -i -T -f test.' + t, rcurry(cfv_all_test, cferror=4))
else:
try:
open(b'data1'.decode('cp500'), 'rb')
except UnicodeError:
nf = 0
err = 4
except Exception:
nf = 4
err = 0
test_generic(cfvcmd + ' --encoding=cp500 -T -f test.' + t, rcurry(cfv_all_test, notfound=nf, ferror=err))
test_generic(cfvcmd + ' --encoding=cp500 -i -T -f test.' + t, rcurry(cfv_all_test, notfound=nf, ferror=err))
if fmt_cancreate(t):
C_funkynames_test(t)
manyfiles_test(t)
for fn in glob(os.path.join('fifotest', 'fifo.*')):
specialfile_test(fn)
test_generic(cfvcmd + ' -m -v -T -t sfv', lambda s, o: cfv_typerestrict_test(s, o, 'sfv'))
test_generic(cfvcmd + ' -m -v -T -t sfvmd5', lambda s, o: cfv_typerestrict_test(s, o, 'sfvmd5'))
test_generic(cfvcmd + ' -m -v -T -t bsdmd5', lambda s, o: cfv_typerestrict_test(s, o, 'bsdmd5'))
for fmt in coreutilsfmts():
test_generic(cfvcmd + ' -m -v -T -t ' + fmt, lambda s, o: cfv_typerestrict_test(s, o, fmt))
test_generic(cfvcmd + ' -m -v -T -t csv', lambda s, o: cfv_typerestrict_test(s, o, 'csv'))
test_generic(cfvcmd + ' -m -v -T -t par', lambda s, o: cfv_typerestrict_test(s, o, 'par'))
test_generic(cfvcmd + ' -m -v -T -t par2', lambda s, o: cfv_typerestrict_test(s, o, 'par2'))
test_generic(cfvcmd + ' -u -t md5 -f test.md5 data* unchecked.dat test.md5', cfv_unv_test)
test_generic(cfvcmd + ' -u -f test.md5 data* unchecked.dat', cfv_unv_test)
test_generic(cfvcmd + ' -u -f test.md5 data* unchecked.dat test.md5', cfv_unv_test)
test_generic(cfvcmd + r' -i -tcsv --fixpaths \\/ -Tu', lambda s, o: cfv_unv_test(s, o, None))
test_generic(cfvcmd + ' -T -t md5 -f non_existant_file', cfv_cferror_test)
test_generic(cfvcmd + ' -T -f ' + os.path.join('corrupt', 'missingfiledesc.par2'), cfv_cferror_test)
test_generic(cfvcmd + ' -T -f ' + os.path.join('corrupt', 'missingmain.par2'), cfv_cferror_test)
test_generic(cfvcmd + ' -T -m -f ' + os.path.join('corrupt', 'missingfiledesc.par2'), cfv_cferror_test)
test_generic(cfvcmd + ' -T -m -f ' + os.path.join('corrupt', 'missingmain.par2'), cfv_cferror_test)
test_generic(cfvcmd + ' -T -f foo.torrent', cfv_test)
test_generic(cfvcmd + ' -T --strip=none -p foo -f ../foo.torrent', rcurry(cfv_all_test, notfound=7))
for strip in (0, 1):
test_generic(cfvcmd + ' -T --strippaths=%s -p foo -f %s' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=7))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2err -f %s' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=4, badcrc=3))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2err -f %s foo1 foo4' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=0, badcrc=2))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2err1 -f %s' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=6, badcrc=1))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2err1 -f %s foo1 foo4' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=2))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2badsize -f %s' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=5, badsize=1, badcrc=1))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2badsize -f %s foo1 foo4' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=1, badcrc=1))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2missing -f %s' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=4, badcrc=2, notfound=1))
test_generic(cfvcmd + ' -T --strippaths=%s -p foo2missing -f %s foo1 foo4' % (strip, os.path.join(os.pardir, 'foo.torrent')), rcurry(cfv_all_test, ok=0, badcrc=2))
d = tempfile.mkdtemp()
try:
open(os.path.join(d, 'foo'), 'wb').close()
cmd = cfvcmd.replace(' --announceurl=url', '')
test_generic(cmd + ' -C -p %s -f foo.torrent' % d, rcurry(cfv_all_test, files=1, cferror=1))
test_log_results('non-creation of empty torrent on missing announceurl?', '', repr(os.listdir(d)), len(os.listdir(d)) > 1, {})
finally:
shutil.rmtree(d)
if run_long_tests:
largefile2GB_test()
largefile4GB_test()
test_generic(cfvcmd + ' -t aoeu', rcurry(cfv_cftypehelp_test, 1), stdout='/dev/null')
test_generic(cfvcmd + ' -t aoeu', rcurry(cfv_nooutput_test, 1), stderr='/dev/null')
test_generic(cfvcmd + ' -t help', rcurry(cfv_cftypehelp_test, 0), stderr='/dev/null')
test_generic(cfvcmd + ' -t help', rcurry(cfv_nooutput_test, 0), stdout='/dev/null')
test_generic(cfvcmd + ' -h', cfv_nooutput_test, stdout='/dev/null')
test_generic(cfvcmd + ' -h', cfv_version_test, stderr='/dev/null')
donestr = '\n>>> tests finished: ok: %i failed: %i' % (stats.ok, stats.failed)
log(donestr)
print(donestr)
return stats.failed
def copytree(src, dst, ignore=None):
if ignore is None:
ignore = []
for name in os.listdir(src):
if name in ignore:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if os.path.islink(srcname):
continue
elif os.path.isfile(srcname):
shutil.copy(srcname, dstname)
elif os.path.isdir(srcname):
os.mkdir(dstname)
copytree(srcname, dstname, ignore)
else:
print('huh?', srcname)
# copy the testdata into a temp dir in order to avoid .svn dirs breaking some tests
tmpdatapath = tempfile.mkdtemp()
try:
copytree(cfvtest.datapath, tmpdatapath, ignore=['.svn'])
os.chdir(tmpdatapath) # do this after the setcfv, since the user may have specified a relative path
failed = 0
print('>>> testing...')
failed += all_unittest_tests()
failed += all_tests()
if cfvtest.ver_mmap:
print('>>> testing without mmap...')
cfvtest.setenv('CFV_NOMMAP', 'x')
assert not cfvtest.ver_mmap
failed += all_tests()
sys.exit(failed)
finally:
shutil.rmtree(tmpdatapath)
|
cfv-project/cfv
|
test/test.py
|
test.py
|
py
| 74,606 |
python
|
en
|
code
| 46 |
github-code
|
6
|
19432133852
|
import sqlite3
import os
lat = list()
with open('latitude.dat', 'r') as lats:
lat = lats.read().split('\n')
with open('longitude.dat', 'r') as lons:
lon = lons.read().split('\n')
with open('dates.dat', 'r') as dates:
tmp = [i[1:-1] for i in dates.read().split('\n')]
base = os.path.abspath(os.path.join('.', os.pardir))
conn = sqlite3.connect(base+'/firstsite/finder/static/finder/log.sqlite3')
cc = conn.cursor()
cc.execute('''CREATE TABLE IF NOT EXISTS log
(ID INTEGER PRIMARY KEY, IP TEXT, puerto TEXT, latitud TEXT, longitud TEXT, tiempo TEXT)''')
conn.commit()
for i in range(0, len(lat)):
sent_data = ('9000', '192.168.1.1', lat[i], lon[i], tmp[i])
cc.execute('''INSERT INTO log VALUES(NULL,?,?,?,?,?)''', sent_data)
conn.commit()
|
juliansibaja84/GPStracking
|
lib/data_parser.py
|
data_parser.py
|
py
| 784 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42663275089
|
import os
import numpy as np
from src.deid import plot_grid
if __name__=='__main__':
ctvol_directory = '/scratch/rlb61/2019-10-BigData-DEID/'
#First select a random set of 3,000 CT volumes
all_cts = os.listdir(ctvol_directory)
assert len(all_cts)==36316
assert len([x for x in all_cts if '.npz' in x])==36316
random_3k = np.random.choice(all_cts,size=3000,replace=False).tolist()
assert len(random_3k)==len(set(random_3k))
#Visualize these CTs
grid_plot_dir = '/home/rlb61/data/img-hiermodel2/results/results_2021/2021-04-13-Random-3000-Grid-and-MIP-Plots-of-RADChestCT'
if not os.path.exists(grid_plot_dir):
os.mkdir(grid_plot_dir)
#Make visualizations
print('Making visualizations')
for idx, ctvol_filename in enumerate(random_3k):
print(ctvol_filename,round(((idx*100)/3000), 2),'percent')
plot_grid.make_grid_plot(ctvol_filename=ctvol_filename,
ctvol_directory=ctvol_directory,
grid_plot_dir=grid_plot_dir)
|
rachellea/explainable-ct-ai
|
runs/2021-04/2021-04-13-random-3000-grid-and-mip-plots-of-radchestct.py
|
2021-04-13-random-3000-grid-and-mip-plots-of-radchestct.py
|
py
| 1,078 |
python
|
en
|
code
| 3 |
github-code
|
6
|
43219992037
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import gtk
class MyApp(object):
def __init__(self):
win = gtk.Window()
vbox = gtk.HBox()
win.add(vbox)
treeview = gtk.TreeView()
column = gtk.TreeViewColumn("Column 1")
treeview.append_column(column)
vbox.pack_start(treeview)
bt_quit = gtk.Button('Quit')
bt_quit.connect('clicked', self.on_bt_quit_clicked)
vbox.pack_start(bt_quit)
win.show_all()
def run(self):
gtk.main()
def on_bt_quit_clicked(self, *args):
gtk.main_quit()
MyApp().run()
|
texttest/storytext-selftest
|
pygtk/error_handling/tree_views/no_model/target_ui.py
|
target_ui.py
|
py
| 595 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43347091848
|
import tikzplotlib
from tensorboard.data.experimental import ExperimentFromDev
import re
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import pickle
def group_by_repetition(scalars):
runs = scalars.run.unique()
# each run has name job*_A --> no repetition
# or job*_Arepetition.*C --> repetition id indicated by second star
# job[0-9]+_([a-zA-Z0-9\._\/]*)repetition\.([0-9]+)([a-zA-Z0-9\._\/]*)
# job[0-9]+_([a-zA-Z0-9\._\/]*)
groups = defaultdict(list)
for run in runs:
match_repetition = re.match("[0-9\-\/]*job[0-9]+_([a-zA-Z0-9\._\/\-]*)repetition\.([0-9]+)([a-zA-Z0-9\._\/]*)", run)
match_no_repetition = re.match("[0-9\-\/]*job[0-9]+_([a-zA-Z0-9\._\/]*)", run)
if match_repetition:
A = match_repetition.group(1)
C = match_repetition.group(3)
groups[(A, C)].append(run)
elif match_no_repetition:
A = match_no_repetition.group(1)
groups[A].append(run)
else:
print("job name could not be match with a regex: {} , skipping".format(run))
print("Found {} groups:".format(len(groups)))
for x in groups:
if isinstance(x, tuple):
print("job*_{}repetition.*{}".format(x[0], x[1]))
elif isinstance(x, str):
print("job*_{}".format(x))
print("\n")
renamed_groups = defaultdict(list)
try:
with open("../tmp/default_short_names.pkl", 'rb') as f:
default_names = pickle.load(f)
except FileNotFoundError as e:
default_names = {}
for x in groups:
if x in default_names:
def_name = default_names[x]
suffix = "default = {}".format(def_name)
else:
suffix = ""
if isinstance(x, tuple):
name = input("Please give a short name to job*_{}repetition.*{}\n{}\n".format(x[0], x[1], suffix)) or def_name
elif isinstance(x, str):
name = input("Please give a short name to job*_{}\n{}\n".format(x, suffix)) or def_name
if name != "del":
renamed_groups[name] += groups[x]
default_names[x] = name
with open("../tmp/default_short_names.pkl", 'wb') as f:
pickle.dump(default_names, f)
return renamed_groups
def get_mean_std(data):
all_lengths = sorted([d.step.values[-1] for d in data])
all_starts = sorted([d.step.values[0] for d in data])
max_start = max(all_starts)
min_length = min(all_lengths)
max_length = max(all_lengths)
std_limit = all_lengths[-2]
x = np.arange(max_start, max_length)
data = [np.interp(x[x <= d.step.values[-1]], d.step, d["value"]) for d in data]
sum_arr = np.zeros(max_length - max_start)
count_arr = np.zeros(max_length - max_start, dtype=np.int32)
for d in data:
sum_arr[:len(d)] += d
count_arr[:len(d)] += 1
mean = sum_arr / count_arr
sum_arr = np.zeros(max_length - max_start)
count_arr = np.zeros(max_length - max_start, dtype=np.int32)
for d in data:
sum_arr[:len(d)] += (d - mean[:len(d)]) ** 2
count_arr[:len(d)] += 1
std = np.sqrt(sum_arr / count_arr)
return x, mean, std
def plot_by_tag(fig, scalars, groups, tag, ylim=None):
ax = fig.add_subplot(111)
for name, runs in groups.items(): # for each group
data = [scalars[scalars.run.eq(run) & scalars.tag.eq(tag)] for run in runs]
x, mean, std = get_mean_std(data)
line, = ax.plot(x, mean, label=name)
ax.fill_between(x, mean - std, mean + std, color=line.get_color(), alpha=0.1)
scalar_name = tag.split("/")[-1].replace('_', ' ')
n_repetitions = set(len(runs) for runs in groups.values())
if len(n_repetitions) == 1:
suffix = " ({} repetitions)".format(n_repetitions.pop())
else:
suffix = ""
fig.suptitle(scalar_name + suffix)
ax.legend()
ax.set_xlabel("episodes")
ax.set_ylabel(scalar_name)
if ylim is not None:
ax.set_ylim(ylim)
def aggregate_runs(experiment_id, path):
exp = ExperimentFromDev(experiment_id)
scalars = exp.get_scalars()
groups = group_by_repetition(scalars)
available_groups_string = ""
for i, key in enumerate(groups):
available_groups_string += "{: 2d} \t {}\n".format(i, key)
fig = plt.figure(dpi=300)
done = False
while not done:
which = list(map(int, input("Which groups should be plotted? available are:\n" + available_groups_string).split(',')))
groups_to_plot = {key: value for i, (key, value) in enumerate(groups.items()) if i in which}
for tag, ylim in [
("evaluation_success_rate_percent_wrt_ep", (0, 105)),
("evaluation_success_rate_percent_wrt_tr", (0, 105)),
("exploration_success_rate_percent_wrt_ep", (0, 105)),
("exploration_success_rate_percent_wrt_tr", (0, 105)),
("evaluation_delta_distance_to_goal_wrt_ep", (0, 2.0)),
("evaluation_delta_distance_to_goal_wrt_tr", (0, 2.0)),
("exploration_delta_distance_to_goal_wrt_ep", (0, 2.0)),
("exploration_delta_distance_to_goal_wrt_tr", (0, 2.0)),
("evaluation_time_to_solve_wrt_ep", (0, 25)),
("evaluation_time_to_solve_wrt_tr", (0, 25)),
]:
plot_by_tag(fig, scalars, groups_to_plot, "collection/{}".format(tag), ylim=ylim)
fig.savefig(path + "/{}_{}_{}.png".format(tag, "_".join(map(str, which)), experiment_id))
# tikzplotlib.save(path + "/{}_{}_{}.tex".format(tag, "_".join(map(str, which)), experiment_id))
fig.clf(fig)
done = input("make an other plot? (yes/no)") == "no"
plt.close(fig)
if __name__ == '__main__':
import sys
experiment_id = sys.argv[1]
aggregate_runs(experiment_id, '/tmp')
|
charleswilmot/coppelia_sim_inverse_model
|
src/aggregate_runs.py
|
aggregate_runs.py
|
py
| 5,903 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4730565575
|
__author__ = 'Dih0r'
import AVT
#---------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------T1U2Exchange-------------------------------------------------------
#---------------------------------------------------------------------------------------------------------------------
# ALGORITHM 3
# works well!
def T1U2Exchange(user1,
user2,
epsilon,
beta,
ITEM_VALUES_TEST):
W1_I2 = user1.wishList & user2.unneededItemList
W2_I1 = user2.wishList & user1.unneededItemList
gainUser1 = 0.0
gainUser2 = 0.0
maxSwapU1_U2 = ()
maxSwapU2_U1 = ()
if len(W1_I2) != 0 and len(W2_I1) != 0:
#print("\n\n--------------------------------AVT TABLES FOR " + str(user1.id) + ", " + str(user2.id) + "----------------")
AVT1 = AVT.AVT(epsilon, beta, W1_I2, ITEM_VALUES_TEST)
AVT2 = AVT.AVT(epsilon, beta, W2_I1, ITEM_VALUES_TEST)
#print("--------------------------------END TABLES FOR " + str(user1.id) + ", " + str(user2.id) + "----------------\n\n")
for key1, avo1 in AVT1.table.items():
for key2, avo2 in AVT2.table.items():
if beta <= avo1.ub / avo2.lb and \
1.0/beta >= avo1.ub / avo2.lb and \
beta <= avo2.ub / avo1.lb and \
1.0/beta >= avo2.ub / avo1.lb:
# the tuples represent (user1_ID, user2_ID, receive_set_of_user1, receive_set_of_user2)
#eligibleSwapsForUser1.add((user1.id, user2.id, avo1.ubi, avo2.lbi))
if avo1.lb > gainUser1:
maxSwapU1_U2 = (user1.id, user2.id, avo1.ubi, avo2.lbi)
gainUser1 = avo1.lb
#eligibleSwapsForUser2.add((user1.id, user2.id, avo1.lbi, avo2.ubi))
if avo2.lb > gainUser2:
maxSwapU2_U1 = (user2.id, user1.id, avo2.ubi, avo1.lbi)
gainUser2 = avo2.lb
# (max recommendation tuple for user1 to swap with user 2, max gain for user1 when swapping with user2, max recommendation for user2 to swap with user 1, ...)
return (maxSwapU1_U2, gainUser1, maxSwapU2_U1, gainUser2)
|
mvladarean/swaprec
|
T1U2Exchange.py
|
T1U2Exchange.py
|
py
| 2,036 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71756196989
|
#!/usr/bin/env python
# File created on 07 May 2013
from __future__ import division
__author__ = "Yoshiki Vazquez Baeza"
__copyright__ = "Copyright 2013, ApocaQIIME"
__credits__ = ["Yoshiki Vazquez Baeza"]
__license__ = "GPL"
__version__ = "1.6.0-dev"
__maintainer__ = "Yoshiki Vazquez Baeza"
__email__ = "[email protected]"
__status__ = "Development"
from qiime.util import qiime_system_call, get_qiime_temp_dir
from qiime.format import format_mapping_file
from qiime.parse import parse_mapping_file
from qiime.util import parse_command_line_parameters, make_option
script_info = {}
script_info['brief_description'] = "Merge columns in a metadata mapping file"
script_info['script_description'] = "Use at your own risk"
script_info['script_usage'] = [("","","")]
script_info['output_description']= ""
script_info['required_options'] = [
make_option('-m', '--mapping_fp', type="existing_filepath", help='the input'
' filepath of the metadata mapping file.'),
make_option('-c', '--columns_to_merge', help='Columns separated by two '
'ampersands (&&) that will get merged', action='append', default=None)]
script_info['optional_options'] = [
make_option('-o', '--output_fp', type="new_filepath", help="the name of the"
" mapping file with the merged columns [default: %default]", default=
"merged_columns_mapping_file.txt")]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
columns_to_merge = opts.columns_to_merge
mapping_fp = opts.mapping_fp
output_fp = opts.output_fp
try:
data, headers, comments = parse_mapping_file(open(mapping_fp, 'U'))
except:
option_parser.error('Bro, that doesn\'t look like a mapping file')
for merging in columns_to_merge:
retrieve = lambda x: headers.index(x)
indices = map(retrieve, merging.split('&&'))
headers.append(''.join([headers[element] for element in indices]))
for line in data:
line.append(''.join([line[element] for element in indices]))
# this should never happen
assert len(headers) == len(data[0]), "Something went horribly wrong, "+\
"that's what you get for using non-unit-tested software"
lines = format_mapping_file(headers, data, comments)
fd = open(output_fp, 'w')
fd.writelines(lines)
fd.close()
if __name__ == "__main__":
main()
|
ElDeveloper/apocaqiime
|
scripts/merge_columns_in_mapping_file.py
|
merge_columns_in_mapping_file.py
|
py
| 2,420 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39722795672
|
from logger.logger import Logger
class Branch:
logger = None
master = 'master'
support = 'support'
develop = 'develop'
mainBranch = ''
developBranch = ''
def __init__(self, customer, config, version = ''):
self.logger = Logger.getInstance()
branchPrefix = customer.lower()
self.master = config.get('master', self.master)
self.develop = config.get('develop', self.develop)
self.support = config.get('support', self.support)
if version == '':
if branchPrefix == '':
self.mainBranch = self.master
self.developBranch = self.develop
else:
self.mainBranch = '/'.join([branchPrefix, self.master])
self.developBranch = '/'.join([branchPrefix, self.develop])
else:
if branchPrefix == '':
self.mainBranch = self.support + '_' + version
self.developBranch = self.develop + '_' + version
else:
self.mainBranch = '/'.join([branchPrefix, self.support + '_' + version])
self.developBranch = '/'.join([branchPrefix, self.develop + '_' + version])
self.logger.log('Init branch with: ' + str(self.develop) + ' | ' + str(self.master))
|
Dominik93/version-management
|
git/branch.py
|
branch.py
|
py
| 1,088 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20147878245
|
"""
Given a 32-bit signed integer, reverse digits of an integer.
Example 1:
Input: 123
Output: 321
Example 2:
Input: -123
Output: -321
Example 3:
Input: 120
Output: 21
"""
class Solution:
def reverse(self, x: int) -> int:
s = (x > 0) - (x < 0)
r = int(str(x*s)[::-1])
return r*s*(r < 2**31)
def main():
i1 = -8463847412
i2 = -123
i3 = 120
s = Solution()
print(s.reverse(i1))
print(s.reverse(i2))
print(s.reverse(i3))
if __name__ == '__main__':
main()
|
pansinyoung/pythod-leet
|
7_Reverse_Integer.py
|
7_Reverse_Integer.py
|
py
| 524 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71049150269
|
from functools import reduce
import time
def add(x, y):
return x + y
# 匿名函数
f = lambda x, y: x + y
# 三元表达式
x, y = 2,3
r = x if x > y else y
# map
list_x = [1, 2, 3, 4]
m = map(lambda x: x*x, list_x)
print(list(m)) # [1, 4, 9, 16]
# map 多个参数
list_y = [2, 3, 4, 5]
m2 = map(lambda x, y: x*x + y , list_x, list_y)
print(list(m2)) # [3, 7, 13, 21]
# reduce 连续计算
r = reduce(lambda x, y: x+y, list_x)
print(r) # 10
r1 = reduce(lambda x, y: x+y, list_x, 10)
print(r) # 20 10为初始值
# filter
list_x = [0, 1, 0, 2, 0, 1]
f = filter(lambda x: True if x==1 else False, list_x) # 把1选出来
print(list(f)) # [1, 1]
# 装饰器(注解)
def print_current_time(func):
def wrapper(*agrs, **kw): # 可变参数
print('func start')
func(*agrs, **kw)
print('func end')
return wrapper
@print_current_time
def pf(name):
print('this is a func ', name)
pf('pf')
|
xxg3053/learn-python
|
lang/high.py
|
high.py
|
py
| 942 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32150085277
|
# Upload BOJ Gold-5 Stack 2504번 괄호의 값
# exp = input()
# result = ""
# stack = []
# operator = []
# beforeValue = ""
# op = {
# ")":"(",
# "]":"[",
# "(": 2,
# "[": 3,
# }
# for value in exp:
# if value in ["(","["]:
# stack.append(value)
# if beforeValue:
# if beforeValue in ["(", "["]:
# operator.append("*")
# else:
# operator.append("+")
# operator.append("(")
# result += str(op[value])
# else:
# if stack[-1] == op[value]:
# stack.pop()
# while operator[-1] != "(":
# result += operator.pop()
# operator.pop()
# else:
# break
# beforeValue = value
# if stack:
# print(0)
# else:
# num_stack = []
# for value in result:
# if value == '*':
# num1 = num_stack.pop()
# num2 = num_stack.pop()
# num_stack.append(num1 * num2)
# elif value == '+':
# num1 = num_stack.pop()
# num2 = num_stack.pop()
# num_stack.append(num1 + num2)
# else:
# num_stack.append(int(value))
# print(num_stack[0])
#
bracket = input()
stack = []
answer = 0
tmp = 1
for i in range(len(bracket)):
if bracket[i] == "(":
stack.append(bracket[i])
tmp *= 2
elif bracket[i] == "[":
stack.append(bracket[i])
tmp *= 3
elif bracket[i] == ")":
if not stack or stack[-1] == "[":
answer = 0
break
if bracket[i-1] == "(":
answer += tmp
stack.pop()
tmp //= 2
else:
if not stack or stack[-1] == "(":
answer = 0
break
if bracket[i-1] == "[":
answer += tmp
stack.pop()
tmp //= 3
if stack:
print(0)
else:
print(answer)
|
HS980924/Algorithm
|
src/7.Stack/B#2504_괄호의값.py
|
B#2504_괄호의값.py
|
py
| 2,009 |
python
|
en
|
code
| 2 |
github-code
|
6
|
19109140061
|
from django.shortcuts import render, redirect
from time import strftime
def index(request):
data = {
"date": strftime("%B %d, %Y"), # automatically adds localtime() as parameter
"time": strftime("%I:%M %p")
}
return render(request,'myapp/index.html', data)
|
klandon94/django_intro
|
time_display/apps/myapp/views.py
|
views.py
|
py
| 286 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12454720392
|
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
# Configurações do ChromeDriver
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless') # Para executar em modo headless (sem janela do navegador)
# Iniciar o ChromeDriver
driver = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)
# Acessar o site do Google
driver.get('https://www.google.com.br')
# Fechar o navegador
driver.quit()
|
hericmr/OlhoVivo
|
sms.py
|
sms.py
|
py
| 481 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
6814769507
|
### Zadanie 2.1 | Zagadka matematyczna
# Program losuje dwie liczby z zakresu od 0 do 99 (patrz poniżej).
# Podaje te dwie liczby i pyta jaka jest ich suma (nie podaje jej). Użytkownik ma odgadnąć (no, policzyć w głowie) wynik.
# Program pyta o wynik wielokrotnie, tak długo, aż użytkownik poda prawidłowy wynik.
from random import randint
x = randint(0,99) # wylosowanie liczby x z zakresu 0,99>
y = randint(0,99) # wylosowanie liczby y z zakresu 0,99>
wynik = x + y
licznik = 1
print(f'Wylosowane liczby to {x} i {y}')
odp = int(input('Podaj wynik działania: '))
while odp != wynik:
print('To nie jest prawidłowa odpowiedź')
licznik += 1
odp = int(input('Podaj wynik działania: '))
print(f'Gratulacje! Zgadłeś za {licznik} razem')
|
dawidradziwoniuk/python_zadania
|
zadanie_2.1.py
|
zadanie_2.1.py
|
py
| 769 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
42408411037
|
# Телеграмм-бот для конвертации валют: @valuta_course_bot
import telebot
from config import keys, TOKEN
from extensions import APIException, CurrencyConverter
bot = telebot.TeleBot(TOKEN)
@bot.message_handler(commands=['start'])
def function_start(message: telebot.types.Message):
bot.send_message(message.chat.id, f'Добро пожаловать,\t {message.chat.username}!')
bot.send_message(message.chat.id, 'Это бот для конвертации валют.')
bot.send_message(message.chat.id, 'Для начала работы воспользуйтесь подсказками бота /help')
@bot.message_handler(commands=['help'])
def function_help(message: telebot.types.Message):
text = 'Для конвертации введите 3 параметра через пробел:\n' \
'<Ваша валюта>\n<В какую валюту хотите перевести>\n' \
'<Количество переводимой валюты>\n' \
'(пример: рубль евро 1000)\n' \
'Обратите внимание - название валюты пишется\nв именительном падеже единственного числа!\n' \
'Посмотреть список доступных валют: /values'
bot.send_message(message.chat.id, text)
@bot.message_handler(commands=['values'])
def values(message: telebot.types.Message):
text = 'Доступные валюты:'
for key in keys.keys():
text = '\n'.join((text, key))
bot.reply_to(message, text)
@bot.message_handler(content_types=['text'])
def get_price(message: telebot.types.Message):
try:
values = message.text.split(' ')
if len(values) != 3:
raise APIException('Введите три параметра через пробел или\nвоспользуйтесь подсказками бота /help')
val_origin, val_base, amount = values
val_origin = val_origin.lower()
val_base = val_base.lower()
total_base = CurrencyConverter.get_price(val_origin, val_base, amount)
except APIException as e:
bot.reply_to(message, f'Ошибка пользователя.\n{e}')
except Exception as e:
bot.reply_to(message, f'Не удалось обработать команду\n{e}')
else:
text = f'{amount} {val_origin} = {round((total_base * float(amount)), 2)} {val_base}'
bot.send_message(message.chat.id, text)
bot.polling(none_stop=True)
|
TamaraRiga/Control-project-18.6
|
app.py
|
app.py
|
py
| 2,545 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
30477916720
|
# Lowest Common Ancestor in Binary Tree
def lca(root, n1, n2):
if root is None:
return None
if root.data == n1 or root.data == n2:
return root
leftAns = lca(root.left, n1, n2)
rightAns = lca(root.right, n1, n2)
if leftAns is not None and rightAns is not None:
return root
elif leftAns is not None and rightAns is None:
return leftAns
elif leftAns is None and rightAns is not None:
return rightAns
else:
return None
# T.C = O(N)
# S.C = O(height)
# Maximum difference between node and its ancestor
# Maximum path sum from any node
|
prabhat-gp/GFG
|
Binary Trees/Love Babbar/21_lca_tree.py
|
21_lca_tree.py
|
py
| 629 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32150077207
|
# 1부터 n까지의 수를 스택에 넣었다가 뽑아 늘어놓음으로써 하나의 수열을 만들 수 있다
# 이때, 스택에 push하는 순서는 반드시 오름차순을 지키도록 한다고 하자.
# 임의의 수열이 주어졌을 때 스택을 이용해 그 수열을 만들 수 있는지 없는지
# , 있다면 어떤 순서로 push와 pop 연산을 수행해야 하는지를 알아낼 수 있다.
# 이를 계산하는 프로그램을 작성하라.
import sys
Num = []
stack = []
result = []
N = int(input())
for i in range(N):
Num.append(int(sys.stdin.readline()))
def numeric():
cnt = 1
for x in Num:
while(cnt <= x):
result.append('+')
stack.append(cnt)
cnt += 1
if x == stack[-1]:
stack.pop()
result.append('-')
else:
return "NO"
return ('\n'.join(result))
print(numeric())
|
HS980924/Algorithm
|
src/7.Stack/B#1874.py
|
B#1874.py
|
py
| 955 |
python
|
ko
|
code
| 2 |
github-code
|
6
|
40333382628
|
from loguru import logger
from gpiozero import Button
from fabiotobox.camera import Camera
from fabiotobox.diaporama import Diaporama
from fabiotobox.photohandler import PhotoHandler
from fabiotobox.tumblr import Tumblr
from enum import IntEnum
import pendulum
import time
SCREENSAVER_DELAY = 1
class PhotoFormat(IntEnum):
PHOTOBOX = 0
POLAROID = 1
class Mode(IntEnum):
PHOTOBOX = 0
DIAPORAMA = 1
class Fabiotobox:
def __init__(
self,
camera: Camera,
photo_handler: PhotoHandler,
diaporama: Diaporama,
tumblr: Tumblr,
shoot_button_port: int,
effect_button_port: int = None,
format_button_port: int = None,
event_title: str = "Test",
):
self.shoot_button = Button(shoot_button_port)
if effect_button_port:
self.effect_button = Button(effect_button_port)
if format_button_port:
self.format_button = Button(format_button_port)
self.camera = camera
self.photo_handler = photo_handler
self.diaporama = diaporama
self.tumblr = tumblr
self.photo_format = PhotoFormat.POLAROID
self.event_title = event_title
self.mode = Mode.PHOTOBOX
self.diaporama_countdown = pendulum.now("Europe/Paris")
def run(self):
self.shoot_button.when_held = self.camera.end
self.camera.start_preview()
self.reset_diaporama_countdown()
while True:
if self.is_diaporama_countdown_reached():
self.mode = Mode.DIAPORAMA
if self.mode is Mode.PHOTOBOX:
self.run_photobox()
else:
self.run_diaporama()
def run_photobox(self):
if self.shoot_button.is_pressed:
logger.debug("Button pressed for a photo")
photo = self.shoot_photo()
self.camera.display_image(photo)
time.sleep(3)
self.camera.undisplay_image()
if self.tumblr is not None:
logger.info("Sending {} to tumblr".format(photo))
self.tumblr.post_photo(photo, self.event_title, [])
self.reset_diaporama_countdown()
def run_diaporama(self):
if self.shoot_button.is_pressed:
logger.debug("Button pressed for exiting diaporama")
self.mode = Mode.PHOTOBOX
self.camera.undisplay_image()
self.reset_diaporama_countdown()
time.sleep(1) # prevent event to be catched by photobox too
else:
if self.is_diaporama_countdown_reached():
logger.info("dirs : {}".format(len(self.diaporama.dirs)))
new_picture = self.diaporama.pick_photo()
# Picture can be none : then, just reset countdown
if new_picture is not None:
self.camera.display_image(new_picture)
self.reset_diaporama_countdown()
def shoot_photo(self) -> str:
if self.photo_format == PhotoFormat.POLAROID:
pictures = self.camera.shoot(1)
photo = self.photo_handler.make_polaroid(pictures[0])
else:
pictures = self.camera.shoot(3)
photo = self.photo_handler.make_photostrip(pictures)
return photo
def reset_diaporama_countdown(self):
self.diaporama_countdown = pendulum.now("Europe/Paris").add(
minutes=SCREENSAVER_DELAY
)
def is_diaporama_countdown_reached(self) -> bool:
return self.diaporama_countdown < pendulum.now("Europe/Paris")
|
fabiolab/photobox
|
fabiotobox/fabiotobox.py
|
fabiotobox.py
|
py
| 3,580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71802647227
|
import pefile
import sys
import os
import json
def locate_data_sections(pe):
data_sections = []
for section in pe.sections:
if section.Name == b'.text\x00\x00\x00':
data_sections.append({
'name': section.Name,
'virtual_address': hex(section.VirtualAddress),
'virtual_size': hex(section.Misc_VirtualSize),
'size': section.SizeOfRawData,
})
return data_sections
directory = sys.argv[1]
output = {}
try:
entries = os.listdir(directory)
for entry in entries:
pe = pefile.PE(os.path.join(directory, entry))
output[entry] = locate_data_sections(pe)
except:
pe = pefile.PE(sys.argv[1])
output[sys.argv[1]] = locate_data_sections(pe)
print(output)
|
luiz-cesar/CDadosSeg
|
T2/Parte2/exe_analysis.py
|
exe_analysis.py
|
py
| 715 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36767766109
|
'''
Author : knight_byte
File : A_Die_Roll.py
Created on : 2021-04-14 09:25:32
'''
from fractions import Fraction
def main():
y, w = map(int, input().split())
d = 6-max(y, w)+1
print(Fraction(d, 6) if d != 6 else "1/1")
if __name__ == '__main__':
main()
|
arbkm22/Codeforces-Problemset-Solution
|
Python/A_Die_Roll.py
|
A_Die_Roll.py
|
py
| 284 |
python
|
en
|
code
| 0 |
github-code
|
6
|
716738610
|
import pytest
import json
from sovrin_client.test.cli.constants import INVALID_SYNTAX
from sovrin_client.test.cli.helper import createUuidIdentifier, addNym
attrib_name = 'dateOfBirth'
ATTRIBUTE_ADDED = 'Attribute added for nym {valid_dest}'
RETURNED_DATA = ['Found attribute', attrib_name, 'dayOfMonth', 'year', 'month']
ATTR_NOT_FOUND = 'Attr not found'
@pytest.fixture(scope="module")
def send_attrib(be, do, poolNodesStarted, trusteeCli):
valid_identifier = createUuidIdentifier()
invalid_identifier = createUuidIdentifier()
addNym(be, do, trusteeCli, idr=valid_identifier)
parameters = {
'attrib_name': attrib_name,
'valid_dest': valid_identifier,
'invalid_dest': invalid_identifier,
'raw': json.dumps({
attrib_name: {
'dayOfMonth': 23,
'year': 1984,
'month': 5
}
})
}
be(trusteeCli)
do('send ATTRIB dest={valid_dest} raw={raw}',
mapper=parameters, expect=ATTRIBUTE_ADDED, within=2)
return parameters
def test_send_get_attr_succeeds_for_existing_uuid_dest(
be, do, poolNodesStarted, trusteeCli, send_attrib):
be(trusteeCli)
do('send GET_ATTR dest={valid_dest} raw={attrib_name}',
mapper=send_attrib, expect=RETURNED_DATA, within=2)
def test_send_get_attr_fails_for_invalid_uuid_dest(
be, do, poolNodesStarted, trusteeCli, send_attrib):
do('send GET_ATTR dest={invalid_dest} raw={attrib_name}',
mapper=send_attrib, expect=ATTR_NOT_FOUND, within=2)
def test_send_get_attr_fails_for_nonexistent_uuid_dest(
be, do, poolNodesStarted, trusteeCli, send_attrib):
with pytest.raises(AssertionError) as excinfo:
do('send GET_ATTR dest=this_is_not_valid raw={attrib_name}',
mapper=send_attrib, expect=ATTR_NOT_FOUND, within=2)
assert(INVALID_SYNTAX in str(excinfo.value))
def test_send_get_attr_fails_for_invalid_attrib(
be, do, poolNodesStarted, trusteeCli, send_attrib):
do('send GET_ATTR dest={valid_dest} raw=badname',
mapper=send_attrib, expect=ATTR_NOT_FOUND, within=2)
def test_send_get_attr_fails_with_missing_dest(
be, do, poolNodesStarted, trusteeCli, send_attrib):
with pytest.raises(AssertionError) as excinfo:
do('send GET_ATTR raw={attrib_name}',
mapper=send_attrib, expect=ATTR_NOT_FOUND, within=2)
assert(INVALID_SYNTAX in str(excinfo.value))
def test_send_get_attr_fails_with_missing_attrib(
be, do, poolNodesStarted, trusteeCli, send_attrib):
with pytest.raises(AssertionError) as excinfo:
do('send GET_ATTR dest={valid_dest}',
mapper=send_attrib, expect=ATTR_NOT_FOUND, within=2)
assert(INVALID_SYNTAX in str(excinfo.value))
|
hyperledger-archives/indy-client
|
sovrin_client/test/cli/test_send_get_attr.py
|
test_send_get_attr.py
|
py
| 2,791 |
python
|
en
|
code
| 18 |
github-code
|
6
|
47509541
|
from typing import *
import math
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def maxAncestorDiff(self, root: Optional[TreeNode]) -> int:
ans = 0
memo = {}
def minmax(node):
if node in memo:
return memo[node]
if node is None:
return (float("inf"), -1)
mi_l, ma_l = minmax(node.left)
mi_r, ma_r = minmax(node.right)
val = node.val
mi = min([mi_l, mi_r])
ma = max([ma_l, ma_r])
nonlocal ans
if math.isfinite(mi):
v = abs(val-mi)
if v > ans:
ans = v
if ma != -1:
v = abs(val-ma)
if v > ans:
ans = v
mi = min([mi_l, mi_r, val])
ma = max([ma_l, ma_r, val])
memo[node] = (mi, ma)
return (mi, ma)
minmax(root)
return ans
if __name__ == "__main__":
s = Solution()
root = TreeNode(0)
root.left = TreeNode(1)
root.right = TreeNode(2)
assert s.maxAncestorDiff(root)
|
code-cp/leetcode
|
solutions/1026/main.py
|
main.py
|
py
| 1,395 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25008864271
|
from osv import fields, osv
class res_partner_address(osv.osv):
_inherit = 'res.partner.address'
def _complete_name_get_fnc(self, cr, uid, ids, prop, unknow_none, unknow_dict):
table = self.name_get(cr, uid, ids, {'contact_display':'contact'})
return dict(table)
_columns = {
'complete_address': fields.function(_complete_name_get_fnc, method=True, type="char", size=512, string='Complete Name'),
}
res_partner_address()
|
factorlibre/openerp-extra-6.1
|
sale_supplier_direct_delivery/partner.py
|
partner.py
|
py
| 476 |
python
|
en
|
code
| 9 |
github-code
|
6
|
9064742045
|
from utils import readEdgeList, split_between_last_char
import numpy as np
import time
from ResultWritter import ResultWritter
import os
class KTupleFeatureGenerator:
def __init__(self, path, k = 5, sample_times = 100, thread_num = 40):
self.path = path
self.k = k
self.sample_times = sample_times
self.thread_num = thread_num
def generate_k_tuple_feature(self, path):
os.system('./run ' + path + " " + str(self.k) + " " + str(self.sample_times) + " " + str(self.thread_num))
def generate_k_tuple_feature_old(self, path):
for i in range(3, self.k + 1):
os.system('./runold ' + path + " " + str(i) + " " + str(self.sample_times) + " " + str(self.thread_num))
def generateDataFeature(self):
print(self.path)
# self.generate_k_tuple_feature(self.path)
prefix, _ = split_between_last_char(self.path, '.')
# prefix += suffix
print(prefix)
if os.path.exists(prefix):
filenames = os.listdir(prefix)
filenames = [(prefix + "/" + name) for name in filenames]
fileNames = []
for name in filenames:
if name.split('.')[-1] == "edges":
print(name)
self.generate_k_tuple_feature_old(name)
if __name__ == '__main__':
path = "../data/artist_edges.edges"
ktuple = KTupleFeatureGenerator(path = path)
ktuple.generate_k_tuple_feature_old(ktuple.path)
ktuple.generateDataFeature()
|
anonydeepgraphlet/DeepGraphlet
|
src/k_tuple_feature_generator.py
|
k_tuple_feature_generator.py
|
py
| 1,538 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40145168774
|
#!/usr/bin/env python
from lxml import html
import wikipedia
with open('movies.txt','r') as f:
movies = f.read().strip().split('\n')
m = movies[0]
html = wikipedia.page(m).html()
tree = html.fromstring(html)
director = tree.xpath('//')
|
luster/is-pepsi-okay
|
script/scrape.py
|
scrape.py
|
py
| 245 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39477906211
|
st1={1,2,3,4}
st2={5,4,6,7,8}
z=st1.union(st2)
z2=st1.intersection(st2)
z3=st1-st2 #difference in set
z4=st1^st2 #symmetric Difference
print(z)
print(z2)
print(z3)
print(z4)
|
satyamsingh-stack/Python-Programming
|
set.py
|
set.py
|
py
| 186 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40504216527
|
import os
import subprocess
import tempfile
import nbformat
import pytest
IGNORE_NOTEBOOKS: list[str] = [
"12_ResDMD.ipynb",
"12_koopman_mpc.ipynb",
"koopman_mpc.ipynb",
]
def _notebook_run(path):
"""Execute a notebook via nbconvert and collect output. Returns the parsed notebook object
and the execution errors.
Source:
https://blog.thedataincubator.com/2016/06/testing-jupyter-notebooks/
"""
dirname, _ = os.path.split(path)
os.chdir(dirname)
with tempfile.NamedTemporaryFile(suffix=".ipynb") as fout:
args = [
"jupyter",
"nbconvert",
"--to",
"notebook",
"--execute",
"--ExecutePreprocessor.timeout=400",
"--output",
fout.name,
path,
]
subprocess.check_call(args)
fout.seek(0)
nb = nbformat.read(fout, nbformat.current_nbformat)
errors = [
output
for cell in nb.cells
if "outputs" in cell
for output in cell["outputs"]
if output.output_type == "error"
]
return nb, errors
def _find_all_notebooks_to_run():
from datafold import __path__ as datafold_path
assert len(datafold_path) == 1
import pathlib
datafold_path = pathlib.Path(datafold_path[0])
tutorials_path = datafold_path.parent / "tutorials"
assert tutorials_path.is_dir()
example_notebooks = []
for ipynb_filepath in tutorials_path.rglob("*.ipynb"):
if (
".ipynb_checkpoints" not in str(ipynb_filepath)
and ipynb_filepath.name not in IGNORE_NOTEBOOKS
):
assert ipynb_filepath.is_file()
example_notebooks.append(ipynb_filepath)
return example_notebooks
@pytest.mark.parametrize("nb_path", _find_all_notebooks_to_run())
def test_notebooks(nb_path):
_, errors = _notebook_run(nb_path)
assert not errors
|
datafold-dev/datafold
|
tutorials/tests/test_notebooks.py
|
test_notebooks.py
|
py
| 1,932 |
python
|
en
|
code
| 13 |
github-code
|
6
|
23731980444
|
from collections import abc
from typing import Any, Callable, Dict, List, Optional, Union
def remove_nulls(data: Union[List, Dict],
value_filter: Optional[Callable[[Any], bool]] = None) -> Union[List, Dict]:
""" Given a list or dict, returns an object of the same structure without filtered values.
By default, key-value pairs where the value is 'None' are removed. The `value_filter` param
must be a function which takes values from the provided dict/list structure, and returns a
truthy value if the key-value pair is to be removed, and a falsey value otherwise.
Args:
data (Union[List, Dict]): List or dict containing data
value_filter (Optional[Callable[[Any], bool]], optional): Lambda function to use to filter out values (e.g. "lambda x: x in (None, 'NULL', 'null')"). Defaults to None.
Raises:
TypeError: Raise TypeError if an unsupported data type is encountered
Returns:
Union[List, Dict]: Returns a filtered version of the list or dictionary passed to the function call
Taken and modified from https://stackoverflow.com/questions/67806380/recursive-remove-all-keys-that-have-null-as-value
"""
collection_types = (list, tuple) # avoid including 'str' here
mapping_types = (abc.Mapping,)
all_supported_types = (*mapping_types, *collection_types)
if value_filter is None:
value_filter = lambda x: x is None
if isinstance(data, collection_types):
data = [d for d in data if not value_filter(d)] # Remove Nones at root level of list
return [remove_nulls(x, value_filter) if isinstance(x, all_supported_types) else x for x in data]
elif isinstance(data, mapping_types):
clean_val = lambda x: remove_nulls(x, value_filter) if isinstance(x, all_supported_types) else x
return {k: clean_val(v) for k, v in data.items() if not value_filter(v)}
raise TypeError(f"Unsupported type '{type(data)}': {data!r}")
# data = {
# "field_1":None,
# "field_2":"b",
# "field_3":{"z":"z","y":"y","x":None},
# "field_4":[{"z":"z","y":"y","x":None}, None, {"z":"z","y":None, "x":{"a":None,"b":"b"}}]
# }
# print(remove_nulls(data))
|
rylativity/python-utils
|
dataprep.py
|
dataprep.py
|
py
| 2,193 |
python
|
en
|
code
| 0 |
github-code
|
6
|
910883870
|
import numpy as np, h5py as h5
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
from horton.part.test.common import get_proatomdb_cp2k
from horton.test.common import tmpdir
def test_db_basics():
padb = ProAtomDB.from_refatoms(numbers=[8, 1], max_cation=1, max_anion=1)
assert padb.get_numbers() == [1, 8]
assert padb.get_charges(8) == [1, 0, -1]
assert padb.get_charges(1) == [0, -1]
r1 = padb.get_record(8, -1)
assert r1.number == 8
assert r1.charge == -1
assert abs(r1.energy - -72.587) < 1e-3
assert r1.ipot_energy == padb.get_record(8, 0).energy - r1.energy
assert r1.population == 9
assert r1.pseudo_number == 8
assert r1.pseudo_population == 9
assert r1.safe
assert r1.rgrid.size == 59
r2 = padb.get_record(8, -1)
r3 = padb.get_record(8, 0)
assert r1 == r2
assert r1 != r3
assert padb.get_rgrid(8) is r1.rgrid
assert padb.get_record(8, +1).ipot_energy is None
assert padb.get_record(8, -1).ipot_energy == padb.get_record(8, 0).energy - padb.get_record(8, -1).energy
assert padb.get_record(1, 0).ipot_energy == -padb.get_record(1, 0).energy
def test_db_basics_pseudo():
padb = get_proatomdb_cp2k()
assert padb.get_numbers() == [8, 14]
assert padb.get_charges(8) == [2, 1, 0, -1, -2]
assert padb.get_charges(8, safe=True) == [2, 1, 0, -1]
assert padb.get_charges(14) == [0]
assert not padb.get_record(8, -2).safe
assert padb.get_rgrid(8) is padb.get_record(8, -2).rgrid
assert padb.get_rgrid(8) is padb.get_record(8, -1).rgrid
assert padb.get_rgrid(8) is padb.get_record(8, 0).rgrid
assert padb.get_rgrid(8) is padb.get_record(8, 1).rgrid
assert padb.get_rgrid(8) is padb.get_record(8, 2).rgrid
r1 = padb.get_record(8, -1)
assert r1.safe
assert abs(r1.energy - -15.866511882272) < 1e-8
assert abs(r1.ipot_energy - (padb.get_record(8, 0).energy - r1.energy)) < 1e-5
r2 = padb.get_record(8, -2)
assert not r2.safe
assert abs(r2.energy - -15.464982778766) < 1e-8
assert abs(r2.ipot_energy - (r1.energy - r2.energy)) < 1e-5
assert padb.get_record(8, +2).ipot_energy is None
def test_record_basics_pseudo():
fn_out = context.get_fn('test/atom_si.cp2k.out')
mol = IOData.from_file(fn_out)
r = ProAtomRecord.from_iodata(mol)
assert r.number == 14
assert r.charge == 0
assert abs(r.energy - -3.761587698067) < 1e-10
assert r.ipot_energy is None
assert r.population == 14
assert r.pseudo_number == 4
assert r.pseudo_population == 4
assert r.safe
def compare_padbs(padb1, padb2):
assert padb1.size == padb2.size
for number in padb1.get_numbers():
for charge in padb1.get_charges(number):
r1 = padb1.get_record(number, charge)
r2 = padb2.get_record(number, charge)
assert r1 == r2
def test_io_group():
padb1 = ProAtomDB.from_refatoms(numbers=[1, 6], max_cation=1, max_anion=1)
assert padb1.size == 5
keys = sorted(padb1._map.keys())
assert keys == [(1, -1), (1, 0), (6, -1), (6, 0), (6, +1)]
with h5.File('horton.dpart.test.test_proatomdb.test_io_group', "w", driver='core', backing_store=False) as f:
padb1.to_file(f)
padb2 = ProAtomDB.from_file(f)
compare_padbs(padb1, padb2)
def test_io_filename():
padb1 = ProAtomDB.from_refatoms(numbers=[1, 6], max_cation=1, max_anion=0)
keys = sorted(padb1._map.keys())
assert keys == [(1, 0), (6, 0), (6, 1)]
with tmpdir('horton.dpart.test.test_proatomdb.test_io_filename') as dn:
filename = '%s/test.h5' % dn
padb1.to_file(filename)
padb2 = ProAtomDB.from_file(filename)
compare_padbs(padb1, padb2)
def test_compute_radii():
rgrid = RadialGrid(ExpRTransform(1e-3, 1e1, 100))
padb = ProAtomDB.from_refatoms([1, 6], 0, 0, (rgrid, 110))
record = padb.get_record(6, 0)
indexes, radii = record.compute_radii([2.0, 5.9, 5.999])
assert (indexes == [68, 89, 100]).all()
assert abs(radii - np.array([0.522305, 3.595831, 10.0])).max() < 1e-5
def test_moments():
padb = get_proatomdb_cp2k()
record0 = padb.get_record(8, 0)
record1 = padb.get_record(8, 1)
m0 = record0.get_moment(3)
m1 = record1.get_moment(3)
assert m0 > m1
assert abs(m0-21.84) < 1e-2
assert abs(m1-12.17) < 1e-2
def check_spline_record(spline, record):
assert abs(spline.y - record.rho).max() < 1e-10
assert abs(spline.dx - record.deriv).max() < 1e-10
def check_spline_pop(spline, pop):
rtf = spline.rtransform
check_pop = 4*np.pi*dot_multi(
rtf.get_deriv(),
rtf.get_radii()**2,
spline.y,
)
assert abs(pop - check_pop) < 1e-2
def check_spline_mono_decr(spline):
t = np.arange(0, spline.rtransform.npoint, 0.1)
x = spline.rtransform.radius(t)
y = spline(x)
i = (abs(y) < 1e-10).nonzero()[0][0]
y = y[:i]
assert ((y[1:] - y[:-1])/y[:-1]).min() < 1e-9
def test_get_spline():
padb = ProAtomDB.from_refatoms(numbers=[1, 6], max_cation=1, max_anion=1)
spline = padb.get_spline(6)
check_spline_pop(spline, 6.0)
check_spline_record(spline, padb.get_record(6, 0))
check_spline_mono_decr(spline)
spline = padb.get_spline(6, -1)
check_spline_pop(spline, 7.0)
check_spline_record(spline, padb.get_record(6, -1))
check_spline_mono_decr(spline)
spline = padb.get_spline(6, {0:0.5, -1:0.5})
check_spline_pop(spline, 6.5)
check_spline_mono_decr(spline)
spline = padb.get_spline(1, {0:0.5})
check_spline_pop(spline, 0.5)
check_spline_mono_decr(spline)
def test_get_spline_pseudo():
padb = get_proatomdb_cp2k()
spline = padb.get_spline(8)
check_spline_pop(spline, 6.0)
check_spline_record(spline, padb.get_record(8, 0))
spline = padb.get_spline(8, -1)
check_spline_pop(spline, 7.0)
check_spline_record(spline, padb.get_record(8, -1))
spline = padb.get_spline(8, {0:0.5, -1:0.5})
check_spline_pop(spline, 6.5)
spline = padb.get_spline(14)
check_spline_pop(spline, 4.0)
check_spline_record(spline, padb.get_record(14, 0))
def test_compact():
padb = get_proatomdb_cp2k()
padb.compact(0.1)
assert padb.get_rgrid(8).size < 100
assert padb.get_rgrid(14).size < 100
def test_normalize():
padb = get_proatomdb_cp2k()
padb.compact(0.1)
padb.normalize()
for number in padb.get_numbers():
rgrid = padb.get_rgrid(number)
for charge in padb.get_charges(number):
r = padb.get_record(number, charge)
nel = rgrid.integrate(r.rho)
nel_integer = r.pseudo_number - charge
assert abs(nel - nel_integer) < 1e-10
def test_empty_proatom():
padb = get_proatomdb_cp2k()
assert (padb.get_rho(8, {}) == 0.0).all()
def test_io_atdens():
padb = ProAtomDB.from_file(context.get_fn('test/pro.atdens'))
assert padb.get_numbers() == [16]
assert padb.get_charges(16) == [3, 2]
r = padb.get_record(16, 3)
assert abs(r.rho[0] - 0.2628105459E+04) < 1e-5
assert abs(r.rho[-1] - 0.1998952826E-16) < 1e-5
s = padb.get_spline(16, 3)
assert abs(s(np.array([0.0])) - 2661.68659449) < 1e-5
radii = r.rgrid.rtransform.get_radii()
assert radii[0] == 0.5216488380E-03
assert abs(radii[-1] - 20) < 1e-14
assert abs(radii[1] - 0.5442350204E-03) < 1e-8
assert abs(r.rgrid.integrate(r.rho) - 13) < 1e-3
# check the basics of the get_rho method (charge)
rho1 = padb.get_rho(16, 3)
rho2, deriv = padb.get_rho(16, 3, do_deriv=True)
assert (rho1 == rho2).all()
assert deriv is None
# check the basics of the get_rho method (dict)
rho1 = padb.get_rho(16, {3:1})
rho2, deriv = padb.get_rho(16, {3:1}, do_deriv=True)
assert (rho1 == rho2).all()
assert deriv is None
|
theochem/horton
|
horton/part/test/test_proatomdb.py
|
test_proatomdb.py
|
py
| 7,845 |
python
|
en
|
code
| 83 |
github-code
|
6
|
73954150588
|
import cv2
import sys
from emot import emo_det
cascPath = "webcam.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
font = cv2.FONT_HERSHEY_SIMPLEX
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces and display the dominant emotion and score(in %)
for (x, y, w, h) in faces:
dom_emotion,dom_score = emo_det(frame)
cv2.putText(frame, dom_emotion+" "+str(dom_score*100)+'%', (x, y), font, 1, (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
allansuresh/face-emo-detect
|
det.py
|
det.py
|
py
| 1,087 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75282246908
|
import hashlib
import re
import sys
import time
re_three_same = re.compile(r'(.)\1\1')
pi = 'yjdafjpo'
i = 0
p = 2
counting_map = {}
found_indexes = []
max_found_indexes = 80
def get_hash(plain_text, p):
if p == 1:
return hashlib.md5(plain_text.encode('utf-8')).hexdigest()
elif p == 2:
h = plain_text
for x in range(2017):
h = hashlib.md5(h.encode('utf-8')).hexdigest()
return h
while len(found_indexes) < max_found_indexes:
plain_text = '{}{}'.format(pi, i)
h = get_hash(plain_text, p)
# check still valid
remove_keys = []
for k in counting_map:
if len(found_indexes) < max_found_indexes:
chars = ''.join([ counting_map[k]['char'] for x in range(5)])
if chars in h:
counting_map[k]['inner_count'] += 1
if counting_map[k]['inner_count'] == 1: # thought they would edit this count for part 2
# ok key
found_indexes.append(counting_map[k]['start_i'])
remove_keys.append(k)
print(i, found_indexes)
if i - counting_map[k]['start_i'] == 1000:
remove_keys.append(k)
for key in remove_keys:
del counting_map[key]
# addin new ones
m = re_three_same.match(h)
m = re.findall(re_three_same, h)
if m:
counting_map[i] = {
'start_i': i,
'inner_count': 0,
'char': m[0],
}
i += 1
print(sorted(found_indexes)[63])
|
elitan/adventofcode
|
2016/14/main.py
|
main.py
|
py
| 1,289 |
python
|
en
|
code
| 1 |
github-code
|
6
|
17891204899
|
from typing import List
from game.GameState import GameState, Move
from policy.Policy import EstimatingPolicy
from policy.exceptions import *
import random
import numpy.random
import torch
import torch.cuda
class ModelBasedPolicy(EstimatingPolicy):
def __init__(self, model, feature_extractor, h, w, exploration=0, cuda = False):
self.model = model
self.h = h
self.w = w
self.exploration = exploration
self.feature_extractor = feature_extractor
self.name = model.name
self.cuda = cuda
def get_next_state_values(self, game_state: GameState):
available_moves, next_states = self.get_next_states(game_state)
with torch.no_grad():
features_for_all_states = self.feature_extractor.get_features(next_states).float()
if self.cuda:
features_for_all_states = features_for_all_states.cuda()
v: torch.Tensor = self.model.forward(features_for_all_states)
return available_moves, next_states, v
def get_next_states(self, game_state):
available_moves: List[Move] = list(game_state.get_all_moves())
if not available_moves:
raise NoValidMovesException(game_state.to_move, 'No move for {}'.format(game_state.to_move))
next_states = [game_state.get_copy_with_move(move) for move in available_moves]
return available_moves, next_states
def get_best_option(self, game_state: GameState):
available_moves, next_states, v = self.get_next_state_values(game_state)
self.pos_checked = len(next_states)
if self.exploration:
if random.random() < self.exploration:
v.squeeze_(1)
v = v.numpy()
v += abs(v.min())
v /= v.sum()
i = numpy.random.choice(range(len(available_moves)), p=v)
return v[i], available_moves[i]
# we minimize quality of position for moving player (opponent) prediction of the net for next state.
best_move_value, best_move_index = v.min(0)
# print(best_move_value)
return best_move_value, available_moves[int(best_move_index)]
|
nkorobkov/virus-game
|
policy/ModelBasedPolicy.py
|
ModelBasedPolicy.py
|
py
| 2,183 |
python
|
en
|
code
| 1 |
github-code
|
6
|
8385089331
|
from __future__ import absolute_import
import os
import sys
import re
import xml.dom.minidom
import random
from sumolib.files.additional import write_additional_minidom
try:
from typing import Any, List, Tuple, Union
except ImportError:
# there are python2 versions on MacOS coming without typing
pass
"""
Creates a vehicle type distribution with a number of representative car-following parameter sets.
"""
class _FixDistribution(object):
def __init__(self, params, isNumeric=True):
if isNumeric:
self._params = tuple([float(p) for p in params])
else:
self._params = params
self._limits = (0, None)
self._isNumeric = isNumeric
self._maxSampleAttempts = 10
def setMaxSamplingAttempts(self, n):
if n is not None:
self._maxSampleAttempts = n
def setLimits(self, limits):
self._limits = limits
def sampleValue(self):
if self._isNumeric:
value = None
nrSampleAttempts = 0
# Sample until value falls into limits
while nrSampleAttempts < self._maxSampleAttempts \
and (value is None or (self._limits[1] is not None and value > self._limits[1]) or
(self._limits[0] is not None and value < self._limits[0])):
value = self._sampleValue()
nrSampleAttempts += 1
# Eventually apply fallback cutting value to limits
if self._limits[0] is not None and value < self._limits[0]:
value = self._limits[0]
elif self._limits[1] is not None and value > self._limits[1]:
value = self._limits[1]
else:
value = self._sampleValue()
return value
def sampleValueString(self, decimalPlaces):
if self._isNumeric:
decimalPattern = "%." + str(decimalPlaces) + "f"
return decimalPattern % self.sampleValue()
return self.sampleValue()
def _sampleValue(self):
return self._params[0]
class _NormalDistribution(_FixDistribution):
def __init__(self, mu, sd):
_FixDistribution.__init__(self, (mu, sd))
def _sampleValue(self):
return random.normalvariate(self._params[0], self._params[1])
class _LogNormalDistribution(_FixDistribution):
def __init__(self, mu, sd):
_FixDistribution.__init__(self, (mu, sd))
def _sampleValue(self):
return random.lognormvariate(self._params[0], self._params[1])
class _NormalCappedDistribution(_FixDistribution):
def __init__(self, mu, sd, min, max):
_FixDistribution.__init__(self, (mu, sd, min, max))
if mu < min or mu > max:
raise Exception("mean %s is outside cutoff bounds [%s, %s]" % (mu, min, max))
def _sampleValue(self):
while True:
cand = random.normalvariate(self._params[0], self._params[1])
if cand >= self._params[2] and cand <= self._params[3]:
return cand
class _UniformDistribution(_FixDistribution):
def __init__(self, a, b):
_FixDistribution.__init__(self, (a, b))
def _sampleValue(self):
return random.uniform(self._params[0], self._params[1])
class _GammaDistribution(_FixDistribution):
def __init__(self, alpha, beta):
_FixDistribution.__init__(self, (alpha, 1.0 / beta))
def _sampleValue(self):
return random.gammavariate(self._params[0], self._params[1])
_DIST_DICT = {
'normal': _NormalDistribution,
'lognormal': _LogNormalDistribution,
'normalCapped': _NormalCappedDistribution,
'uniform': _UniformDistribution,
'gamma': _GammaDistribution
}
class VehAttribute:
def __init__(self, name, is_param=False, distribution=None, distribution_params=None,
bounds=None, attribute_value=None):
# type: (str, bool, str, Union[dict, Any], tuple, str) -> None
"""
This emmulates one line of example config.txt in
https://sumo.dlr.de/docs/Tools/Misc.html#createvehtypedistributionpy
Either distribution or attribute_value should be populated
Args:
name (str): the name of the attribute. Examples: "tau", "sigma", "length"
is_param (bool, optional): is the attribute a parameter that should be added as a child element.
distribution (str, optional): the name of the distribution to use ()
distribution_params (Union[dict, Any], optional): the parameters corresponding to the distribution
bounds (tuple, optional): the bounds of the distribution.
attribute_value (str, optional): if no distribution is given, the fixed value for the attribute
"""
self.is_param = is_param
self.name = name
self.distribution = distribution
self.distribution_params = distribution_params
self.bounds = bounds
self.attribute_value = attribute_value
if self.attribute_value and self.distribution:
raise Exception("Only one of distribution or attribute value should be defined, not both")
self.d_obj = self._dist_helper(distribution, distribution_params, bounds)
def _dist_helper(self, distribution, dist_params, dist_bounds):
# type: (...) -> Union[None, _FixDistribution]
if distribution:
try:
d = _DIST_DICT[distribution](**dist_params)
d.setLimits(dist_bounds) if dist_bounds else d.setLimits(
(0, None))
except KeyError:
raise KeyError("The distribution %s is not known. Please select one of: \n%s " %
(distribution, "\n".join(_DIST_DICT.keys())))
else:
isNumeric = False if self.name == "emissionClass" else len(
re.findall(r'^(-?[0-9]+(\.[0-9]+)?)$', self.attribute_value)) > 0
d = _FixDistribution((self.attribute_value, ), isNumeric)
return d
def add_sampling_attempts(self, attempts):
if self.d_obj:
self.d_obj.setMaxSamplingAttempts(attempts)
class CreateVehTypeDistribution:
def __init__(self, seed=None, size=100, name='vehDist', resampling=100, decimal_places=3):
# type: (int, int, str, int, int) -> None
"""
Creates a VehicleType Distribution.
See https://sumo.dlr.de/docs/Definition_of_Vehicles,_Vehicle_Types,_and_Routes.html#vehicle_type_distributions
Args:
seed (int, optional): random seed.
size (int, optional): number of vTypes in the distribution.
name (str, optional): alphanumerical ID used for the created vehicle type distribution.
resampling (int, optional): number of attempts to resample a value until it lies in the specified bounds.
decimal_places (int, optional): number of decimal places.
"""
if seed:
random.seed(seed)
self.size = size
self.name = name
self.resampling = resampling
self.decimal_places = decimal_places
self.attributes = [] # type: List[VehAttribute]
def add_attribute(self, attribute):
# type: (Union[VehAttribute, dict]) -> None
"""
Add an instance of the attribute class to the Parameters. Pass the sampling attempts "global" parameter
Args:
attribute (VehAttribute or dict): An instance of VehAttribute or
a dictionary of parameters to be passed to the VehAttribute constructor
"""
attribute = attribute if isinstance(attribute, VehAttribute) else VehAttribute(**attribute)
attribute.add_sampling_attempts(self.resampling)
self.attributes.append(attribute)
def create_veh_dist(self, xml_dom):
# type: (xml.dom.minidom.Document) -> xml.dom.minidom.Element
# create the vehicleDist node
vtype_dist_node = xml_dom.createElement("vTypeDistribution")
vtype_dist_node.setAttribute("id", self.name)
# create the vehicle types
for i in range(self.size):
veh_type_node = xml_dom.createElement("vType")
veh_type_node.setAttribute("id", self.name + str(i))
self._generate_vehType(xml_dom, veh_type_node)
vtype_dist_node.appendChild(veh_type_node)
return vtype_dist_node
def to_xml(self, file_path):
# type: (str) -> None
xml_dom, existing_file = self._check_existing(file_path)
vtype_dist_node = self.create_veh_dist(xml_dom)
if existing_file:
self._handle_existing(xml_dom, vtype_dist_node)
with open(file_path, 'w') as f:
dom_string = xml_dom.toprettyxml()
# super annoying but this makes re-writing the xml a little bit prettier
f.write(os.linesep.join([s for s in dom_string.splitlines() if s.strip()]))
else:
write_additional_minidom(xml_dom, vtype_dist_node, file_path=file_path)
sys.stdout.write("Output written to %s" % file_path)
def _handle_existing(self, xml_dom, veh_dist_node):
# type: (xml.dom.minidom.Document, xml.dom.minidom.Element) -> None
existingDistNodes = xml_dom.getElementsByTagName("vTypeDistribution")
replaceNode = None
for existingDistNode in existingDistNodes:
if existingDistNode.hasAttribute("id") and existingDistNode.getAttribute("id") == self.name:
replaceNode = existingDistNode
break
if replaceNode is not None:
replaceNode.parentNode.replaceChild(veh_dist_node, replaceNode)
else:
xml_dom.documentElement.appendChild(veh_dist_node)
def _generate_vehType(self, xml_dom, veh_type_node):
# type: (xml.dom.minidom.Document, xml.dom.minidom.Element) -> xml.dom.minidom.Node
for attr in self.attributes:
if attr.is_param:
param_node = xml_dom.createElement("param")
param_node.setAttribute("key", attr.name)
param_node.setAttribute(
"value", attr.d_obj.sampleValueString(self.decimal_places))
veh_type_node.appendChild(param_node)
else:
veh_type_node.setAttribute(
attr.name, attr.d_obj.sampleValueString(self.decimal_places))
@staticmethod
def _check_existing(file_path):
# type: (str) -> Tuple[xml.dom.minidom.Document, bool]
if os.path.exists(file_path):
try:
return xml.dom.minidom.parse(file_path), True
except Exception as e:
raise Exception("Cannot parse existing %s. Error: %s" %
(file_path, str(e)))
else:
return xml.dom.minidom.Document(), False
def save_myself(self, file_path):
# type: (str) -> None
"""
This function saves the class to a json format. Used for logging simulation inputs
Args:
file_path (str): path to save json to
"""
import json
with open(file_path, "w") as f:
f.write(
json.dumps(
self,
default=lambda o: {
key: param for key, param in o.__dict__.items() if '_' not in key[0]},
sort_keys=True,
indent=4,
)
)
class CreateMultiVehTypeDistributions(CreateVehTypeDistribution):
def __init__(self):
# type: () -> None
self.distributions = [] # type: List[CreateVehTypeDistribution]
def register_veh_type_distribution(self, veh_type_dist, veh_attributes):
# type: (Union[dict, CreateVehTypeDistribution], List[Union[dict, VehAttribute]]) -> None
veh_type_dist = veh_type_dist if isinstance(
veh_type_dist, CreateVehTypeDistribution) else CreateVehTypeDistribution(**veh_type_dist)
for attr in veh_attributes:
veh_type_dist.add_attribute(attr if isinstance(attr, VehAttribute) else VehAttribute(**attr))
self.distributions.append(veh_type_dist)
def write_xml(self, file_path):
# type: (str) -> None
"""
This function will overwrite existing files
Args:
file_path (str): Path to the file to write to
"""
xml_dom, _ = self._check_existing(file_path)
veh_dist_nodes = [dist.create_veh_dist(xml_dom=xml_dom) for dist in self.distributions]
write_additional_minidom(xml_dom, veh_dist_nodes, file_path=file_path)
|
ngctnnnn/DRL_Traffic-Signal-Control
|
sumo-rl/sumo/tools/sumolib/vehicletype.py
|
vehicletype.py
|
py
| 12,625 |
python
|
en
|
code
| 17 |
github-code
|
6
|
71648693307
|
#!/usr/bin/env python3
import asyncio
import socket
from keyword import kwlist
from typing import Tuple as tuple
MAX_KEYWORD_LEN = 4 # <1>
async def probe(domain: str) -> tuple[str, bool]: # <2>
loop = asyncio.get_running_loop() # <3>
try:
await loop.getaddrinfo(domain, None) # <4>
except socket.gaierror:
return (domain, False)
return (domain, True)
async def main() -> None: # <5>
names = (kw for kw in kwlist if len(kw) <= MAX_KEYWORD_LEN) # <6>
domains = (f'{name}.dev'.lower() for name in names) # <7>
coros = [probe(domain) for domain in domains] # <8>
for coro in asyncio.as_completed(coros): # <9>
# 这里coro已经是完成的了,为什么还要await,去掉会报错的,可能是解包相关的问题。
# 原文解释:the await expression will not block but we need it to get the
# result from coro. If coro raised an unhandled exception,
# it would be re-raised
# here.
domain, found = await coro # <10>
mark = '+' if found else ' '
print(f'{mark} {domain}')
if __name__ == '__main__':
asyncio.run(main()) # <11>
|
yangguang8112/fluentPy
|
new_chapter18/21-async/domains/asyncio/blogdom.py
|
blogdom.py
|
py
| 1,205 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16119990604
|
H, W = map(int, input().split())
N = int(input())
sticker = [list(map(int, input().split())) for _ in range(N)]
result = 0
for i in range(N):
for j in range(i+1, N):
row1, col1 = sticker[i]
row2, col2 = sticker[j]
# 스티커 2개 모두 90도 회전하지 않을 경우
if (row1 + row2 <= H and max(col1, col2) <= W) or (max(row1, row2) <= H and col1 + col2 <= W):
result = max(result, (row1 * col1) + row2 * col2)
# 첫 번쨰 스티커만 90도 회전할 경우
if (col1 + row2 <= H and max(row1, col2) <= W) or (max(col1, row2) <= H and row1 + col2 <= W):
result = max(result, row1 * col1 + row2 * col2)
# 두 번쨰 스티커만 90도 회전할 경우
if (row1 + col2 <= H and max(col1, row2) <= W) or (max(row1, col2) <= H and col1 + row2 <= W):
result = max(result, row1 * col1 + row2 * col2)
# 스티커 2개 모두 90도 회전할 경우
if (col1 + col2 <= H and max(row1, row2) <= W) or (max(col1, col2) <= H and row1 + row2 <= W):
result = max(result, row1 * col1 + row2 * col2)
print(result)
|
sujeong11/Algorithm
|
완전탐색/16937.py
|
16937.py
|
py
| 1,138 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
19491319237
|
from Permission import Permission
import pandas as pd
import requests
class Get_info:
urlMarvel = 'http://gateway.marvel.com/v1/public/characters/' #Marvel API's url
def __init__ (self, id):
"""Accessing Marvel API to get information about desired character using its id.
Information retrieved: 1. name, 2. total number of events, 3. total number of series
available, 4. total number of comics and 5. price of the most expensive comic that
the charatcer was featured in"""
self.id = id #id needs to be given
link = self.urlMarvel + str(self.id) #url for specific Marvel Character
response = requests.get(link, params = Permission().parameters()).json()
response_price = requests.get(link + '/comics', params = Permission().parameters()).json() #Request for price feature
#Get relevant features related to the character (name, events,series,comics & highest price)
self.name = response['data']['results'][0]['name']
self.events = response['data']['results'][0]['events']['available']
self.series = response['data']['results'][0]['series']['available']
self.comics = response['data']['results'][0]['comics']['available']
#To get the highest price per comic
all_prices_per_comic_list = []
for dicts in response_price['data']['results']:
for prices in dicts['prices']:
all_prices_per_comic_list.append(prices['price'])
#Highest price info
self.price = max(all_prices_per_comic_list, default=0)
def filtered_info(self):
"""Return dataframe with all the information related to desired character"""
entry = pd.DataFrame({
'Character Name' : [self.name],
'Character ID' : [self.id],
'Total Available Events' : [self.events],
'Total Available Series' : [self.series],
'Total Available Comics' : [self.comics],
'Price of the Most Expensive Comic' : [self.price]})
return entry
|
Guibas1812/create-api-marvel-characters
|
Get_Info.py
|
Get_Info.py
|
py
| 2,168 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23190704074
|
import csv
from django.core.management.base import BaseCommand
from recipes.models import Tag
class Command(BaseCommand):
help = 'Добавить список тэгов в базу (цвет и наименование)'
def handle(self, *args, **options):
with open('recipes/presets/tags.csv',
'r',
encoding='utf8'
) as f:
reader = csv.reader(f)
current = Tag.objects.count()
for row in reader:
color, tag, slug = row
Tag.objects.get_or_create(
color=color,
tag=tag,
slug=slug
)
result = Tag.objects.count() - current
print(f'В базу добавлено {result} тэгов')
|
mechnotech/foodgram-project
|
recipes/management/commands/add_tags.py
|
add_tags.py
|
py
| 825 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43734236645
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 16 13:14:57 2021
@author: Samael Olascoaga
@email: [email protected]
"""
import pandas as pd
import gseapy as gp
import matplotlib.pyplot as plt
from gseapy.plot import barplot, dotplot
import numpy as np
import seaborn as sns
sns.set_style("whitegrid")
gene_list = pd.read_csv('common.csv', header=None)
glist = gene_list.squeeze().str.strip().tolist()
names = gp.get_library_name()
enr = gp.enrichr(gene_list= glist,
gene_sets=['KEGG_2019_Human'],
organism='Human', # don't forget to set organism to the one you desired! e.g. Yeast
description='KEGG common targets',
# no_plot=True,
cutoff=0.5 # test dataset, use lower value from range(0,1)
)
resultados = enr.results.head(15)
resultados['-log10(FDR)'] = -np.log10(resultados['Adjusted P-value'])
resultados['Genes'] = resultados['Genes'].str.split(';')
resultados['Genes'] = resultados['Genes'].apply(lambda x: len(x))
g = sns.scatterplot(data=resultados, x="-log10(FDR)", y="Term", hue='-log10(FDR)', palette="seismic"
, size="Genes", sizes=(30, 300), legend=True)
g.legend(loc=6, bbox_to_anchor=(1, 0.5), ncol=1)
plt.ylabel('')
plt.xlabel('-log10(FDR)')
plt.title('KEGG Common targets')
plt.savefig(r'KEGG_common' + '.svg', format='svg', dpi=600, bbox_inches = "tight" )
|
Olascoaga/Senotherapy
|
ora.py
|
ora.py
|
py
| 1,448 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74912417468
|
import time
from src.game import *
# Controale
# E - Rotire spre stanga
# R - Rotire spre dreapta
# F - Foc
# Sageti - Miscare
# ESC - Iesire joc
if __name__ == '__main__':
g = Game()
start_time = int(time.time())
while True:
g.events()
g.start_screen()
if int(time.time()) - start_time > 3:
break
g.draw_walls()
g.draw_mobs()
g.draw_treasures()
g.run()
|
lupusg/the-explorer-game
|
playable-version/main.py
|
main.py
|
py
| 424 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73554593149
|
import numpy as np
from functions import mean_absolute_percentage_error
import torch
data = np.load('predictions.npz')
h_label = data['labels']
h_pred = data['pred']
indices_under_1500 = h_label < 1500
indices_under_1300 = h_label < 1300
h_pred = torch.Tensor(h_pred)
h_label = torch.Tensor(h_label)
h_pred_under_1500 = h_pred[indices_under_1500]
h_label_under_1500 = h_label[indices_under_1500]
h_pred_under_1300 = h_pred[indices_under_1300]
h_label_under_1300 = h_label[indices_under_1300]
print('all data: ', h_pred.shape[0])
print('under 1500 data: ', h_pred_under_1500.shape[0])
print('under 1300 data: ', h_pred_under_1300.shape[0])
print('mape (all data): ', mean_absolute_percentage_error(h_label, h_pred))
print('mape (under 1500): ', mean_absolute_percentage_error(h_label_under_1500, h_pred_under_1500))
print('mape (under 1300): ', mean_absolute_percentage_error(h_label_under_1300, h_pred_under_1300))
print('label under 1300: ', h_label_under_1300)
print('pred under 1300: ', h_pred_under_1300)
|
arseniybelkov/Determining_HOCB
|
auxillary_functions/analizePredictions.py
|
analizePredictions.py
|
py
| 1,015 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22290767213
|
# -*- coding: utf-8 -*-
import streamlit as st
from st_aggrid import AgGrid
import pandas as pd
import pymysql
from sqlalchemy import create_engine
engine = create_engine('mysql+pymysql://root:chiangcw@localhost/python?charset=utf8')
uploaded_file = st.file_uploader("请选择要上传的csv格式表格!")
if uploaded_file is not None:
df1 = pd.read_csv(uploaded_file)
AgGrid(df1)
df1.to_sql(name=str(uploaded_file.name).replace(".csv",""), con=engine, chunksize=1000, if_exists='replace', index=None)
st.success("上传成功!")
db = pymysql.connect(host="localhost", user="root", password="abcde", database="python", charset="utf8")
sql="select * from "+str(uploaded_file.name).replace(".csv","")
cursor = db.cursor()
cursor.execute(sql)
db.commit()
df2=pd.read_sql(sql,con=db)
st.success("数据库中的表格内容如下")
st.dataframe(df2)
else:
st.warning("请上传表格!")
|
chiangcw0410/mysql_test
|
test/test.py
|
test.py
|
py
| 904 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23448112816
|
## ----------------------------------------------------------------------------
# Import
import unittest
from arf_python.DataVector import DataVector
from arf_python.Point import Point
import os
## ----------------------------------------------------------------------------
# Constant
PATH_REPO = "./arf_python/tests"
NAME_FILE = "data.csv"
class RandomTest(unittest.TestCase):
"""Test on the ARF algorithm"""
def test_dataVector(self):
data_vector = DataVector(2, NAME_FILE)
self.assertEqual(data_vector.get_size_of_data(), 250, "the size is not good")
def test_dataVector_point_list(self):
data_vector_test = DataVector(2, NAME_FILE)
self.assertEqual(len(data_vector_test.get_points()), 250, "the size is not good")
def test_dataVector_points(self):
data_vector1 = DataVector(2, NAME_FILE)
data_vector2 = DataVector(2, NAME_FILE)
list_point = data_vector2.get_points()
for index, point in enumerate(data_vector1.get_points()):
self.assertEqual(list_point[index], point, "the point are not equal.")
|
jfolleas1/ProjetSpecifique
|
arf_python/tests/test_dataVector.py
|
test_dataVector.py
|
py
| 1,101 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20615133935
|
# Usage: python3 parse_and_collate_logs.py /path/to/logs/ /path/to/dest/
# Parameters:
# /path/to/logs/: contains directories, each containing log files
# /path/to/dest/: where the parsed and collated file will be placed
# Output:
# A CSV file of altitude-deadline pairs
import csv
import os
import sys
def calc_julian_day_from_ymd(year,month,day):
monthm14d12 = int((month-14)/12)
jd = day-32075+1461*(year+4800+monthm14d12)//4+367*(month-2-monthm14d12*12)//12-3*((year+4900+monthm14d12)//100)//4
return float(jd)-0.5
def calc_sec_since_midnight(hour,minute,second):
return second+60*(minute+60*hour)
src = ''
dst = ''
if len(sys.argv)==3:
src = sys.argv[1]
if src[-1] != '/':
src += '/'
dst = sys.argv[2]
if dst[-1] != '/':
dst += '/'
with open(dst+'collated.csv',mode='w',newline='') as csv_o:
csvwriter = csv.writer(csv_o)
csvwriter.writerow(['altitude-km','deadline-s'])
else:
print("Usage: python3 parse_and_collate_logs.py /path/to/logs/ /path/to/dest/")
exit()
src_contents = os.listdir(src)
src_contents.sort()
for e in src_contents:
if os.path.isdir(src+e):
logdir = src+e+'/'
idstr = e[3:]
with open(logdir+'meas-sat-'+idstr+'-gtfs.csv',newline='') as csv_i:
#print('meas-sat-'+idstr+'-gtfs.csv')
csvreader = csv.reader(csv_i,delimiter=',')
header = next(csvreader)
row_prev = next(csvreader)
row_curr = next(csvreader)
while row_prev and row_curr:
# Get the years
year_prev = int(row_prev[0][0:4])
year_curr = int(row_curr[0][0:4])
# Get the months
month_prev = int(row_prev[0][5:7])
month_curr = int(row_curr[0][5:7])
# Get the days
day_prev = int(row_prev[0][8:10])
day_curr = int(row_curr[0][8:10])
# Get the hours
hour_prev = int(row_prev[0][11:13])
hour_curr = int(row_curr[0][11:13])
# Get the minutes
minute_prev = int(row_prev[0][14:16])
minute_curr = int(row_curr[0][14:16])
# Get the seconds
second_prev = int(row_prev[0][17:19])
second_curr = int(row_curr[0][17:19])
# Get the nanoseconds
nanosecond_prev = int(row_prev[0][20:29])
nanosecond_curr = int(row_curr[0][20:29])
# Calculate the difference in days
jd_prev = calc_julian_day_from_ymd(year_prev,month_prev,day_prev)
jd_curr = calc_julian_day_from_ymd(year_curr,month_curr,day_curr)
jd_diff = jd_curr-jd_prev
# Calculate the difference in seconds
sec_prev = calc_sec_since_midnight(hour_prev,minute_prev,second_prev)
sec_curr = calc_sec_since_midnight(hour_curr,minute_curr,second_curr)
sec_diff = sec_curr-sec_prev
# Calculate the difference in nanoseconds
nanosecond_diff = nanosecond_curr-nanosecond_prev
# Calculate the total difference in seconds
deadline = jd_diff*86400.0+float(sec_diff)+float(nanosecond_diff)/1000000000.0
# Average the altitudes
altitude = (float(row_prev[1])+float(row_curr[1]))/2.0
# Write out
with open(dst+'collated.csv',mode='a',newline='') as csv_o:
csvwriter = csv.writer(csv_o)
csvwriter.writerow(['{:+05.6f}'.format(altitude),'{:+03.6f}'.format(deadline)])
# Loop update
try:
row_prev = next(csvreader)
row_curr = next(csvreader)
except StopIteration:
break
#print(' complete')
|
CMUAbstract/cote
|
examples/generate-deadlines/analysis/parse_and_collate_logs.py
|
parse_and_collate_logs.py
|
py
| 3,468 |
python
|
en
|
code
| 15 |
github-code
|
6
|
17791637310
|
# Download the audio files for all of the video URLs given in the input file
import argparse
import os
import sys
youtube_cmd = \
"youtube-dl --extract-audio --audio-format mp3 -o \"{file_name}\" {url}"
def download_song(artist, song, url):
artist_part = '-'.join(artist.lower().split())
song_part = '-'.join(song.lower().split())
file_name = artist_part + "__" + song_part + ".%(ext)s"
return 0 == os.system(youtube_cmd.format(file_name=file_name, url=url))
# (Command returns 0 on success)
def mark_completed(f, line):
"""
Mark the song on the current line in the given file as downloaded. Change
the mark from `-` to `#`.
Args:
f (file) : A handle to the songs file, open at the current song
line (string) : The currently read line (containing the song)
Returns:
Nothing
NOTE: The file must be open in binary format.
"""
try:
marker_position = line.decode().index("| -") + 2 # add 2 to reach `-`
f.seek(-len(line), os.SEEK_CUR) # move back to the start of the current line
f.seek(marker_position, os.SEEK_CUR) # move to the position of the marker
f.write(b"#") # write the mark completed symbol (`-` -> `#`)
f.readline() # move to the next line (read past the current `\n`)
except ValueError:
# Could not find `-` marker
pass
except Exception as e:
# Here's a generic way for printing where the exception occurred
_, _, e_traceback = sys.exc_info()
print(f"Error:{e_traceback.tb_lineno}: {e}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Download music from YouTube.")
parser.add_argument("-songs_file", type=str, help="list of songs to download")
parser.add_argument("-out_dir", type=str, help="download directory")
args = parser.parse_args()
if args.songs_file is None:
print("Please supply songs file")
exit(0)
elif args.out_dir is None:
print("Please supply download directory")
exit(0)
try:
os.listdir(args.out_dir)
except FileNotFoundError:
# Download directory does not exist, so create it
try:
os.mkdir(args.out_dir)
except:
print("Could not create download directory")
exit(0)
home_dir = os.getcwd()
with open(args.songs_file, 'rb+') as f:
# Move to download directory for placing songs
os.chdir(args.out_dir)
for line in f:
# Extract the song name and look up video, cleaning up white space
fields = [ field.strip() for field in line.decode().split('|') ]
song = fields[0]
artist = fields[1]
try:
url = fields[2]
try:
mark = fields[3]
if mark == '#':
print(f"Already downloaded: {song}")
continue
except IndexError:
# Song not marked with `-` or `#`
print(f"Mark not found: {song}")
continue
except IndexError:
# Cannot download this song
continue
if (download_song(artist, song, url)):
mark_completed(f, line)
if (home_dir != os.getcwd()):
# Return to original directory
os.chdir(home_dir)
|
S0l0m4n/random-code
|
python/download_songs/youtube_download.py
|
youtube_download.py
|
py
| 3,450 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5299866462
|
from proxy_checker import ProxyChecker
from pathlib import Path
import os.path
class GettingProxyException(Exception):
def __str__(self):
return f"{super().__str__()}: Error while getting proxy!\n"
class ListEmptyException(Exception):
def __str__(self):
return f"{super().__str__()}: Your proxy list is empty, check path file or validate your proxy shema <ip>:<port>!\n"
def checker(proxy):
checker = ProxyChecker()
checked_status = checker.check_proxy(proxy)
print(f"CHECKING: {proxy}")
print(f"Proxy working status: {checked_status}\n")
return checked_status
def get_proxy(proxies_file, is_check=False):
proxy_list = []
if not os.path.isfile(proxies_file):
print(f"File '{proxies_file}' does not exist")
raise ListEmptyException
try:
with open(proxies_file) as p_file:
for proxy in p_file:
if "\n" in str(proxy):
proxy = proxy[:-2]
if is_check:
if checker(proxy):
proxy_list.append(proxy)
else:
proxy_list.append(proxy)
except Exception as e:
raise GettingProxyException(f"An error at getting proxy ocurred: {e}")
finally:
if proxy_list:
return proxy_list
elif not proxy_list:
raise ListEmptyException
else:
raise Exception
|
pawel-krakowiak/trovo_viewer_bot
|
proxy_config.py
|
proxy_config.py
|
py
| 1,499 |
python
|
en
|
code
| 1 |
github-code
|
6
|
5308237700
|
# Bottom-up 방식
n = int(input())
d = [0] * (n+1)
d[1] = 0
for i in range(2, n+1):
# +1
d[i] = d[i-1]+1
# /2 한 경우, +1 은 / 연산 횟수 추가한 것
# 2로 나누어 떨어지는 경우 / 2로 나눈 것에 1 더한 값이 현재 값보다 작으면 바꿈
if i % 2 == 0 and d[i] > d[i//2] + 1:
d[i] = d[i//2]+1
# /3 한 경우, +1 은 / 연산 횟수 추가한 것
# 3로 나누어 떨어지는 경우 / 3로 나눈 것에 1 더한 값이 현재 값보다 작으면 바꿈
if i % 3 == 0 and d[i] > d[i//3] + 1:
d[i] = d[i//3]+1
print(d[n])
# x = int(input())
#
# min_cnt = [0] * (x+1)
# idx = 0
# while True:
# if idx > x:
# break
# if idx <= 1:
# min_cnt[idx] = 0
# else:
# temp_min = x+1
# if idx % 3 == 0:
# temp_idx = int(idx/3)
# temp_min = min(temp_min, min_cnt[temp_idx])
#
# if idx % 2 == 0:
# temp_idx = int(idx/2)
# temp_min = min(temp_min, min_cnt[temp_idx])
# temp_min = min(temp_min, min_cnt[idx-1])
# min_cnt[idx] = int(temp_min+1)
# idx = idx + 1
# print(min_cnt[x])
|
louisuss/Algorithms-Code-Upload
|
Python/Baekjoon/DP/1463.py
|
1463.py
|
py
| 1,165 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
19052725448
|
'''
6075 : [기초-반복실행구조] 정수 1개 입력받아 그 수까지 출력하기1
본 문제는 python 의 빠른 기초 학습을 위해 설계된 문제로서 python 코드 제출을 기준으로 설명되어 있습니다.
------
정수(0 ~ 100) 1개를 입력받아 0부터 그 수까지 순서대로 출력해보자.
'''
n = int(input())
tmp = 0
while n >= tmp:
print(tmp)
tmp += 1
|
parkjunga/algorithm
|
코드업/6075 [기초-반복실행구조] 정수 1개 입력받아 그 수까지 출력하기1.py
|
6075 [기초-반복실행구조] 정수 1개 입력받아 그 수까지 출력하기1.py
|
py
| 407 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
42241289409
|
# -*- coding:utf-8 -*-
# author:DragonCheng
# Data: 2022/9/14
# function: socket通信
import socket
import time
# 1.创建socket通信对象
client = socket.socket()
# 2.连接服务器
client.connect(('192.168.2.58',8090))
while True:
# send_data = input("client send data >>>")
# client.send(send_data.encode('utf-8'))
# 3.给socket服务器发送信息
client.send(b'client send data >>>')
# 4.从服务器接收数据
re_data = client.recv(1024).decode('utf-8')
if re_data == 'quit':
break
print("客户端接收到服务器的数据为:",time.strftime("%Y-%m-%d %X",time.localtime()),re_data)
time.sleep(1)
# 5.关闭socket对象
client.close()
|
Dragonchengllc/Socket
|
socket_client.py
|
socket_client.py
|
py
| 726 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33625318941
|
from util import logging
from training.util import monitor_loss
import tensorflow as tf
import streamlit as st
import os
import time
import numpy as np
import pandas as pd
def batch_loss(model, inp, aux, targ, loss_funct, opt = None):
loss = 0
with tf.GradientTape() as tape:
pred = model(inp, aux, training=True)
loss = loss_funct(targ, pred)
if opt is not None:
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
opt.apply_gradients(zip(gradients, variables))
#save model eager tf
checkpoint_dir = 'trained_model_dir'
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
root = tf.train.Checkpoint(optimizer=opt,
model=model)
root.save(checkpoint_prefix)
return loss
def training(model, nb_epochs, step_epoch, train_set, valid_set, loss_fct, valid_loss_fct, opt, patience, min_delta):
# Keep results for plotting
train_loss_results = []
valid_loss_results = []
steps_per_epoch = step_epoch
# early stopping
patience_cnt = 0
logging.info('Training started...')
start = time.time()
df = pd.DataFrame({'Loss': [], 'Loss Val': []})
chart = st.line_chart(df)
for epoch in range(nb_epochs):
## training
epoch_loss_avg = tf.keras.metrics.Mean()
for (batch, (inp_tot, targ)) in enumerate(train_set.take(steps_per_epoch)):
# define encoder and decoder inputs
inp, aux = inp_tot[0], inp_tot[1]
# loss for batch
batch_loss_results = batch_loss(model, inp, aux, targ, loss_fct, opt)
# training progress
epoch_loss_avg.update_state(batch_loss_results)
# collect training loss values
train_loss_results.append(epoch_loss_avg.result())
_Loss = epoch_loss_avg.result().numpy()
## validation
epoch_valid_loss_avg = tf.keras.metrics.Mean()
for (batch, (inp_tot, targ)) in enumerate(valid_set.take(steps_per_epoch)):
inp, aux = inp_tot[0], inp_tot[1]
batch_loss_results = batch_loss(model, inp, aux, targ, valid_loss_fct, None)
epoch_valid_loss_avg.update_state(batch_loss_results)
# collect training loss values
valid_loss_results.append(epoch_valid_loss_avg.result())
ValLoss = epoch_valid_loss_avg.result().numpy()
df = pd.DataFrame({'Loss': [_Loss],
'Loss Val': [ValLoss]})
chart.add_rows(df)
# early stopping
patience_cnt = monitor_loss(epoch, valid_loss_results, min_delta, patience_cnt)
if patience_cnt > patience:
logging.info("early stopping...")
break
if epoch % 50 == 0: #logging.info
st.text("Epoch {}: Loss MAE: {:.5f} --- Val Loss MAE: {:.5f}".format(epoch,
epoch_loss_avg.result(),
epoch_valid_loss_avg.result()))
logging.info('Time taken to train {} sec\n'.format(time.time() - start))
logging.info('Training finished...')
return model
|
giobbu/ML-streamlit-apps
|
geo-ML/road-traffic-forecasting-belgium/training/train_module.py
|
train_module.py
|
py
| 3,373 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6998246271
|
from csv import DictReader
from glob import glob
from random import choices
from datetime import date
import numpy as np
import matplotlib.pyplot as plt
from statistics import mean
from dataclasses import dataclass
'''
for traders with less than 25k in their brokerage accounts, three day trades are allowed per five day rolling period
this means, in effect, that three day trades are allowed per week as long as they are performed on the same days each week
Which days?
I will rule out Mondays since many market holidays fall on Mondays
The number of market holidays falling on each day are:
Monday: 19
Tuesday: 1
Wednesday: 2
Thursday: 3
Friday: 3
So, Tuesdays and Wednesdays are in.
Fridays have shortened days each November on black friday.
So, if day trading on exactly the same three weekdays each week, it seems that Tuesday, Wednesday, and Thursday are optimal.
For traders with 25k or more in their brokerage accounts, it would seem that there is no limitation on number of day trades.
'''
csv_files = glob('./data-files/**/*.csv')
quotes = {}
@dataclass
class Quote:
open_price: float
high_price: float
close_price: float
trade_date: date
for csv_file in csv_files:
with open(csv_file, newline='') as f:
quote_reader = DictReader(f)
quote = []
for row in quote_reader:
trade_date = date.fromisoformat(row['Date'])
if trade_date.weekday() in (1, 2, 3):
try:
quote.append(Quote(float(row['Open']), float(row['High']), float(row['Close']), trade_date))
except:
print(csv_file)
print(row)
if (len(quote) != 155):
print(csv_file)
print(len(quote))
else:
quotes[csv_file.split('.')[1].split('\\')[2]] = quote
# sample_symbol = 'INDB'
trading_days = 155
simulation_size = 500000
# targets = [{ 'target': 1.005 + 0.0000005 * i, 'running total': [1000.0 for _ in range(simulation_size)] } for i in range(10000)]
# print(len([quotes[equity] for equity in quotes.keys()]))
running_totals = [1000.0 for _ in range(simulation_size)]
target = 1.008778
print(len(quotes.keys()))
for i in range(trading_days):
equities = choices([key for key in quotes.keys()], k=simulation_size)
for (j, equity) in enumerate(equities):
quote = quotes[equity][i]
entry_price = quote.open_price
position_entry_shares = running_totals[j] / entry_price
target_price = entry_price * target
if target_price <= quote.high_price:
position_exit = position_entry_shares * target_price
else:
position_exit = position_entry_shares * quote.close_price
if position_exit > 500:
# regulatory transaction fee
position_exit -= position_exit * 22.9 / 1000000.0
if position_entry_shares > 50:
# trading activity fee
position_exit -= 0.00013 * position_entry_shares
running_totals[j] = position_exit
# print(sorted(running_totals))
print(f'less than 700: {len([x for x in running_totals if x < 700])}')
print(f'at least 700 and less than 800: {len([x for x in running_totals if x >= 700 and x < 800])}')
print(f'at least 800 and less than 900: {len([x for x in running_totals if x >= 800 and x < 900])}')
print(f'at least 900 and less than 1000: {len([x for x in running_totals if x >= 900 and x < 1000])}')
print(f'at least 1000 and less than 1100: {len([x for x in running_totals if x >= 1000 and x < 1100])}')
print(f'at least 1100 and less than 1200: {len([x for x in running_totals if x >= 1100 and x < 1200])}')
print(f'at least 1200 and less than 1300: {len([x for x in running_totals if x >= 1200 and x < 1300])}')
print(f'at least 1300 and less than 1400: {len([x for x in running_totals if x >= 1300 and x < 1400])}')
print(f'at least 1400: {len([x for x in running_totals if x >= 1400])}')
'''
x = [st['target'] for st in targets]
y = [mean(st['running total']) for st in targets]
plt.scatter(x, y)
plt.show()
'''
|
joshparkerj/day-trader
|
simulate.py
|
simulate.py
|
py
| 3,898 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21093087761
|
from Stack import Stack
def sortArray(arr):
if arr is None:
return None
if len(arr) == 0:
return None
st1 = Stack()
st2 = Stack()
for i in arr:
if st1.isEmpty():
st1.push(i)
else:
top = st1.top()
if i <= top:
st1.push(i)
else:
while i > top:
st2.push(st1.pop())
if st1.isEmpty():
st1.push(i)
break
top = st1.top()
while not st2.isEmpty():
st1.push(st2.pop())
return st1
if __name__ == '__main__':
arr = [7, 8, 3, 1, 5, 9]
print(sortArray(arr).items)
|
fahadfahim13/Problem_Solve
|
Python/Coding Simplified/Stack_Python/getMinO(1).py
|
getMinO(1).py
|
py
| 742 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15422153495
|
from os import path
from setuptools import setup
# read the contents of your description file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="pyswahili",
version="0.1.4",
description="""
Python package for briding python english keywords
with swahili one to allow swahili speakers to learn the basics of coding
without ever knowing english
""",
long_description=long_description,
long_description_content_type='text/markdown',
url="https://github.com/Kalebu/pyswahili",
download_url='https://github.com/Kalebu/pyswahili/releases/tag/0.1',
author="Jordan Kalebu",
author_email="[email protected]",
license="MIT",
packages=["pyswahili"],
keywords=[
"pyswahili",
"python-tanzania",
"python-transpiler",
"swahili-python",
"python in swahili",
"python for swahili speakers",
"code python in swahili",
"swahili programming language",
"program in swahili",
],
entry_points={
"console_scripts": [
"pyswahili = pyswahili.__main__:main"
]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
]
)
|
Kalebu/pyswahili
|
setup.py
|
setup.py
|
py
| 1,661 |
python
|
en
|
code
| 79 |
github-code
|
6
|
28352614903
|
from django.shortcuts import render, HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate, logout
from django.core.context_processors import csrf
from django.views.decorators.csrf import csrf_protect
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.http import (
JsonResponse
)
from django.core import serializers
from second.models import Message
class AddMessageView(View):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(AddMessageView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
new_msg = Message()
new_msg.from_user = request.user
try:
to_user = User.objects.get(username=request.POST['to'])
except:
response = {'success':False, 'msg':'To user does not exist'}
return JsonResponse(response)
new_msg.to_user = to_user
new_msg.text = request.POST['text']
new_msg.save()
return JsonResponse({'success':True, 'msg':"message successfully sent"})
class GetAllMessages(View):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(GetAllMessages, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
user = request.user
username = request.GET['user']
try:
other_user = User.objects.get(username=username)
except:
response = {'success':False, 'msg': 'User does not exist'}
return JsonResponse(response)
msgs = list(Message.objects.filter(from_user=user, to_user=other_user))
msgs_tmp = list(Message.objects.filter(from_user=other_user, to_user=user))
msgs+=msgs_tmp
msgs.sort(key=lambda x: x.time, reverse=True)
all_msgs = [msg.to_dict() for msg in msgs]
return JsonResponse(all_msgs, safe=False)
class GetAllUsers(View):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(GetAllUsers, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
user = request.user
msgs = Message.objects.filter(from_user=user)
users = [msg.to_user for msg in msgs]
msgs = Message.objects.filter(to_user=user)
users_tmp = [msg.from_user for msg in msgs]
users+=users_tmp
users = list(set(users))
# users = serializers.serialize('json', users)
users = [{"username":user.username, "name":
user.first_name+" "+user.last_name} for user in users]
return JsonResponse(users, safe=False)
|
sanjayramesh005/chat-app
|
second/views.py
|
views.py
|
py
| 2,958 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37182738964
|
from abc import ABC, abstractmethod
from typing import List
import requests
from config import EnvConfig
from models.Note import Card, DeckServiceCard
class DeckServiceAPIInterface(ABC):
@abstractmethod
def save_cards(self, user_id: str, deck_id: str, cards: List[Card]):
pass
class DeckServiceAPI(DeckServiceAPIInterface):
def __init__(self, config: EnvConfig):
self.DECK_SERVICE_HOST_NAME = config.DECK_SERVICE_HOST_NAME
def save_cards(
self, user_id: str, deck_id: str, cards: List[Card]
) -> List[DeckServiceCard]:
url = f"http://{self.DECK_SERVICE_HOST_NAME}/decks/{deck_id}/cards?userID={user_id}"
data = [{"deckID": deck_id, **card} for card in cards]
response = requests.post(url, json=data)
if response.status_code != 201:
raise Exception(
f"Failed to save cards. Status code: {response.status_code}. Response: {response.json()}"
)
response_body = response.json()
response_data: List[DeckServiceCard] = response_body["data"]
return response_data
|
MoShrank/card-generation-service
|
external/DeckServiceAPI.py
|
DeckServiceAPI.py
|
py
| 1,105 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35428259157
|
import pickle
from typing import List, Tuple
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
def plot_segments(
true_segment_list: List[Tuple[float, float]],
pred_segment_list: List[Tuple[float, float]],
t_min: float = None,
t_max: float = None,
zoom: bool = True,
marker_list: List[int] = None,
):
"""Display true and predicted timecodes on a common timeline.
:param true_segment_list: list of groundtruth timecodes.
:param pred_segment_list: list of predicted timecodes.
:param t_min: timecode from which starting the timeline.
:param t_max: timecode to which ending the timeline.
:param zoom: wether to display the diagram in a "zoom" fashion or not
(ie: with details, the timeline should be short).
:param marker_list: list of markers to add on the diagram (gray lines).
"""
true_segment_list = sorted(true_segment_list)
pred_segment_list = sorted(pred_segment_list)
x_max = max(true_segment_list[-1][-1], pred_segment_list[-1][-1])
t_min = 0 if not t_min else t_min
t_max = x_max if not t_max else t_max
true_segment_list = [
[t1, t2]
for t1, t2 in true_segment_list
if (t1 >= t_min) and (t2 <= t_max)
]
pred_segment_list = [
[t1, t2]
for t1, t2 in pred_segment_list
if (t1 >= t_min) and (t2 <= t_max)
]
plt.figure(figsize=(20, 5))
prev_x_min = t_min
for x_min, x_max in true_segment_list:
if zoom:
plt.vlines(x_min, 1, 2, color="#e41a1c", linestyles="dashed")
plt.vlines(x_max, 1, 2, color="#e41a1c", linestyles="dashed")
plt.fill_between([x_min, x_max], 1, 2, color="#e41a1c", alpha=0.1)
plt.hlines(
1,
prev_x_min,
x_min,
color="black",
linewidth=2,
linestyles="dashed",
)
plt.hlines(1, x_min, x_max, color="#e41a1c", linewidth=4)
prev_x_min = x_max
plt.hlines(
1, x_max, t_max, color="black", linewidth=2, linestyles="dashed"
)
prev_x_min = t_min
for x_min, x_max in pred_segment_list:
if zoom:
plt.vlines(x_min, 1, 2, color="#377eb8", linestyles="dashed")
plt.vlines(x_max, 1, 2, color="#377eb8", linestyles="dashed")
plt.fill_between([x_min, x_max], 1, 2, color="#377eb8", alpha=0.1)
plt.hlines(
2,
prev_x_min,
x_min,
color="black",
linewidth=2,
linestyles="dashed",
)
plt.hlines(2, x_min, x_max, color="#377eb8", linewidth=4)
prev_x_min = x_max
plt.hlines(
2, x_max, t_max, color="black", linewidth=2, linestyles="dashed"
)
if marker_list is not None:
marker_list = [t for t in marker_list if (t >= t_min) and (t <= t_max)]
for timecode in marker_list:
plt.vlines(timecode, 1, 2, color="#000000")
pred_legend = mpatches.Patch(color="#e41a1c", label="pred")
true_legend = mpatches.Patch(color="#377eb8", label="true")
plt.legend(handles=[pred_legend, true_legend], loc=6)
plt.show()
def load_labels(label_path: str) -> List[Tuple[float, float]]:
"""Load a Friends label file and extract laugther timecodes."""
labels = pickle.load(open(label_path, "rb"))
true_timecodes = []
for segment in labels.values():
if segment[-1][-10:-2].lower() == "laughter":
true_timecodes.append(segment[:2])
return sorted(true_timecodes)
def load_preds(pred_path: str) -> List[Tuple[float, float]]:
"""Load a prediction file with laugther timecodes."""
preds = pickle.load(open(pred_path, "rb"))
return sorted(preds)
|
robincourant/FunnyNet
|
laughter_detection/core/utils.py
|
utils.py
|
py
| 3,805 |
python
|
en
|
code
| 1 |
github-code
|
6
|
43974550604
|
import re
from collections import defaultdict
from typing import List
class Solution:
def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:
counter = defaultdict() # 딕셔너리 선언
split = re.split('[! ?.,;\']', paragraph) # multiple delimiter
for word in split: # 잘라둔 문자열 순회
if word == '': # 빈 문자열 건너뛰기
continue
word = word.lower() # 소문자로 바꾸고
if word not in banned: # 제외 단어가 아니라면
if word not in counter: # 키 없으면 0으로 세팅해주고
counter[word] = 0
counter[word] += 1 # 값 1 증가
answer = ""
count = 0
for k, v in counter.items(): # 딕셔너리 순회하며
if (v > count): # 카운트가 가장 높은 단어 찾아 반환
count = v
answer = k
return answer
|
HJ-Rich/leetcode
|
819-most-common-word/819-most-common-word.py
|
819-most-common-word.py
|
py
| 980 |
python
|
ko
|
code
| 1 |
github-code
|
6
|
11783374292
|
"""
Homework 2 Server
Cameron Sprowls
"""
# Imports
import socket
import os
class Server:
@staticmethod
def main():
"""
Does the normal function of the program
"""
# Prompt the user for the port from which the server will run
ip = '35.40.114.88'
port = 5000
buf = 1024
packets = {}
window = 0
# Wait for data in a loop until data is received
# Send the data back to the client
while True:
# Connect to the server and listen for data
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((ip, port))
while True:
data = s.recvfrom(buf)
print("message: " + str(data[0].decode()))
if os.path.isfile(str(data[0].decode())):
file = open(data[0].decode(), 'rb')
# Loop through file and send all of it in small packets
while True:
# Store window of packets into a dictionary for easy access
while window < 5:
packets[window] = file.readline(buf)
print(packets)
s.sendto(bytearray(packets), (data[1][0], data)[1][1])
packets.clear()
window += 1
# Get back acknowledgements, move window accordingly
# Get a list of the packets that weren't sent, as well as the smallest one?
max_packet_sent = s.recvfrom(buf)[len(s.recvfrom(buf)) - 1]
print(max_packet_sent)
for key in packets:
if int(key) + max_packet_sent > 4:
packets[key] = []
else:
packets[key] = packets[int(key) + max_packet_sent]
window = max_packet_sent
Server.main()
|
CameronSprowls/PythonClientServerTCP
|
src/ServerUDP.py
|
ServerUDP.py
|
py
| 2,048 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23312692887
|
import discord
import os
from data_loader import input_text_parser
from data_loader import input_image_parser
image_path = "./images"
text_path = "./text"
token = open("token.txt", "r").read()
# change cwd in case this is called from shell script
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# create data objects, reading in data
text_data = input_text_parser(text_path)
file_data = input_image_parser(image_path)
client = discord.Client(intents=discord.Intents.all())
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord')
@client.event
async def on_message(message):
"""
Main work method, called whenever someone posts a message
"""
# don't respond to self, empty messages, or messages that don't start with a bang
if message.author == client.user or \
len(message.content) == 0 or \
message.content[0] != '!':
return
out_str = None
out_file = None
# clean message
cmd = message.content[1:].strip().lower()
# bot info
if cmd == "encyclopedia bottanica":
out_str = "I am an encyclopedia. Enter '!toc' or '!contents' for a table of contents.\n" + \
"Enter '!<entry name>' to view an entry. I am case-insensitive."
# user requests table of contents
if cmd == "contents" or cmd == "toc":
out_str = f"I have information on the following categories:\n{text_data.get_contents()}\n" + \
f"and can share the following files:\n{file_data.get_contents()}"
else:
# get usual output
out_str = text_data.get(cmd)
out_file = file_data.get(cmd)
# print results
if out_file != None:
await message.channel.send(file=out_file)
if out_str != None:
await message.channel.send(out_str)
# fire this bad boy up
client.run(token)
|
WireHallMedic/Encyclopedia-Bottanica
|
encyclopedia_bottanica.py
|
encyclopedia_bottanica.py
|
py
| 1,802 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20632320396
|
import struct
import typing
from .base_bean import BaseBean
from .config import Config
from .ctrl_enum import EnumDevice, EnumCmdType, EnumFanDirection, EnumOutDoorRunCond, EnumFanVolume, EnumControl, \
EnumSensor, FreshAirHumidification, ThreeDFresh
from .dao import Room, AirCon, Geothermic, Ventilation, HD, Device, AirConStatus, get_device_by_aircon, Sensor, \
UNINITIALIZED_VALUE
from .param import GetRoomInfoParam, AirConRecommendedIndoorTempParam, AirConCapabilityQueryParam, \
AirConQueryStatusParam, Sensor2InfoParam
def decoder(b):
if b[0] != 2:
return None, None
length = struct.unpack('<H', b[1:3])[0]
if length == 0 or len(b) - 4 < length or struct.unpack('<B', b[length + 3:length + 4])[0] != 3:
if length == 0:
return HeartbeatResult(), None
else:
return None, None
return result_factory(struct.unpack('<BHBBBBIBIBH' + str(length - 16) + 'sB', b[:length + 4])), b[length + 4:]
def result_factory(data):
r1, length, r2, r3, subbody_ver, r4, cnt, dev_type, dev_id, need_ack, cmd_type, subbody, r5 = data
if dev_id == EnumDevice.SYSTEM.value[1]:
if cmd_type == EnumCmdType.SYS_ACK.value:
result = AckResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_CMD_RSP.value:
result = CmdRspResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_TIME_SYNC.value:
result = TimeSyncResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_ERR_CODE.value:
result = ErrCodeResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_GET_WEATHER.value:
result = GetWeatherResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_LOGIN.value:
result = LoginResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_CHANGE_PW.value:
result = ChangePWResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_GET_ROOM_INFO.value:
result = GetRoomInfoResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_QUERY_SCHEDULE_SETTING.value:
result = QueryScheduleSettingResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_QUERY_SCHEDULE_ID.value:
result = QueryScheduleIDResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_HAND_SHAKE.value:
result = HandShakeResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_CMD_TRANSFER.value:
result = CmdTransferResult(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_QUERY_SCHEDULE_FINISH.value:
result = QueryScheduleFinish(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SYS_SCHEDULE_QUERY_VERSION_V3:
result = ScheduleQueryVersionV3Result(cnt, EnumDevice.SYSTEM)
elif cmd_type == EnumCmdType.SENSOR2_INFO:
result = Sensor2InfoResult(cnt, EnumDevice.SYSTEM)
else:
result = UnknownResult(cnt, EnumDevice.SYSTEM, cmd_type)
elif dev_id == EnumDevice.NEWAIRCON.value[1] or dev_id == EnumDevice.AIRCON.value[1] \
or dev_id == EnumDevice.BATHROOM.value[1] or dev_id == EnumDevice.SENSOR.value[1]:
device = EnumDevice((8, dev_id))
if cmd_type == EnumCmdType.STATUS_CHANGED.value:
result = AirConStatusChangedResult(cnt, device)
elif cmd_type == EnumCmdType.QUERY_STATUS.value:
result = AirConQueryStatusResult(cnt, device)
elif cmd_type == EnumCmdType.AIR_RECOMMENDED_INDOOR_TEMP.value:
result = AirConRecommendedIndoorTempResult(cnt, device)
elif cmd_type == EnumCmdType.AIR_CAPABILITY_QUERY.value:
result = AirConCapabilityQueryResult(cnt, device)
elif cmd_type == EnumCmdType.QUERY_SCENARIO_SETTING.value:
result = AirConQueryScenarioSettingResult(cnt, device)
elif cmd_type == EnumCmdType.SENSOR2_INFO.value:
result = Sensor2InfoResult(cnt, device)
else:
result = UnknownResult(cnt, device, cmd_type)
else:
"""ignore other device"""
result = UnknownResult(cnt, EnumDevice.SYSTEM, cmd_type)
result.subbody_ver = subbody_ver
result.load_bytes(subbody)
return result
class Decode:
def __init__(self, b):
self._b = b
self._pos = 0
def read1(self):
pos = self._pos
s = struct.unpack('<B', self._b[pos:pos + 1])[0]
pos += 1
self._pos = pos
return s
def read2(self):
pos = self._pos
s = struct.unpack('<H', self._b[pos:pos + 2])[0]
pos += 2
self._pos = pos
return s
def read4(self):
pos = self._pos
s = struct.unpack('<I', self._b[pos:pos + 4])[0]
pos += 4
self._pos = pos
return s
def read(self, l):
pos = self._pos
s = self._b[pos:pos + l]
pos += l
self._pos = pos
return s
def read_utf(self, l):
pos = self._pos
try:
s = self._b[pos:pos + l].decode('utf-8')
except UnicodeDecodeError:
s = None
pos += l
self._pos = pos
return s
class BaseResult(BaseBean):
def __init__(self, cmd_id: int, targe: EnumDevice, cmd_type: EnumCmdType):
BaseBean.__init__(self, cmd_id, targe, cmd_type)
def load_bytes(self, b):
"""do nothing"""
def do(self):
"""do nothing"""
class HeartbeatResult(BaseResult):
def __init__(self):
BaseResult.__init__(self, 0, EnumDevice.SYSTEM, EnumCmdType.SYS_ACK)
class AckResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_ACK)
def load_bytes(self, b):
Config.is_new_version = struct.unpack('<B', b)[0] == 2
class ScheduleQueryVersionV3Result(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_ACK)
class Sensor2InfoResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SENSOR2_INFO)
self._count = 0
self._mode = 0
self._room_id = 0
self._sensor_type = 0
self._sensors: typing.List[Sensor] = []
def load_bytes(self, b):
data = Decode(b)
self._mode = data.read1()
count = data.read1()
self._count = count
while count > 0:
self._room_id = data.read1()
d = Decode(data.read(data.read1()))
self._sensor_type = d.read1()
unit_id = d.read1()
sensor = Sensor()
sensor.mac = d.read(6).hex()
sensor.room_id = self._room_id
sensor.unit_id = unit_id
length = d.read1()
sensor.alias = d.read_utf(length)
sensor.name = sensor.alias
sensor.type1 = d.read1()
sensor.type2 = d.read1()
humidity = UNINITIALIZED_VALUE
hcho = UNINITIALIZED_VALUE
temp = UNINITIALIZED_VALUE
if (sensor.type1 & 1) == 1:
temp = d.read2()
if ((sensor.type1 >> 1) & 1) == 1:
humidity = d.read2()
pm25 = UNINITIALIZED_VALUE
if (sensor.type1 >> 2) & 1 == 1:
pm25 = d.read2()
co2 = UNINITIALIZED_VALUE
if (sensor.type1 >> 3) & 1 == 1:
co2 = d.read2()
voc = EnumSensor.Voc.STEP_UNUSE
if (sensor.type1 >> 4) & 1 == 1:
f = d.read1()
voc = EnumSensor.Voc(f)
tvoc = UNINITIALIZED_VALUE
if (sensor.type1 >> 5) & 1 == 1:
tvoc = d.read2()
if (sensor.type1 >> 6) & 1 == 1:
hcho = d.read2()
switch_on_off = d.read1() == 1
temp_upper = d.read2()
temp_lower = d.read2()
humidity_upper = d.read2()
humidity_lower = d.read2()
pm25_upper = d.read2()
pm25_lower = d.read2()
co2_upper = d.read2()
co2_lower = d.read2()
voc_lower = d.read1()
tvoc_upper = d.read2()
hcho_upper = d.read2()
connected = d.read1() == 1
sleep_mode_count = d.read1()
sleep_mode_enable = False
if sleep_mode_count > 0:
sleep_mode_enable = d.read1() == 1
sensor.sensor_type = self._sensor_type
sensor.temp = temp
sensor.humidity = humidity
sensor.pm25 = pm25
sensor.co2 = co2
sensor.voc = voc
if self._sensor_type == 3:
sensor.tvoc = tvoc
sensor.hcho = hcho
sensor.tvoc_upper = tvoc_upper
sensor.hcho_upper = hcho_upper
sensor.switch_on_off = switch_on_off
sensor.temp_upper = temp_upper
sensor.temp_lower = temp_lower
sensor.humidity_upper = humidity_upper
sensor.humidity_lower = humidity_lower
sensor.pm25_upper = pm25_upper
sensor.pm25_lower = pm25_lower
sensor.co2_upper = co2_upper
sensor.co2_lower = co2_lower
sensor.voc_lower = voc_lower
sensor.connected = connected
sensor.sleep_mode_count = sleep_mode_count
self._sensors.append(sensor)
count = count - 1
def do(self):
from .service import Service
Service.set_sensors_status(self._sensors)
@property
def count(self):
return self._count
@property
def mode(self):
return self._mode
@property
def room_id(self):
return self._room_id
@property
def sensor_type(self):
return self._sensor_type
@property
def sensors(self):
return self._sensors
class CmdRspResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_CMD_RSP)
self._cmdId = None
self._code = None
def load_bytes(self, b):
self._cmdId, self._code = struct.unpack('<IB', b)
@property
def cmd_id(self):
return self._cmdId
@property
def code(self):
return self._code
class TimeSyncResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_TIME_SYNC)
self._time = None
def load_bytes(self, b):
self._time = struct.unpack('<I', b)[0]
@property
def time(self):
return self._time
class ErrCodeResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_ERR_CODE)
self._code = None
self._device = None
self._room = None
self._unit = None
def load_bytes(self, b):
dev_id, room, unit = struct.unpack('<iBB', b[:6])
self._device = EnumDevice((8, dev_id))
self._room = room
self._unit = unit
self._code = b[6:].decode('ASCII')
@property
def code(self):
return self._code
@property
def device(self):
return self._device
@property
def room(self):
return self._room
@property
def unit(self):
return self._unit
class GetWeatherResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_GET_WEATHER)
self._condition = None
self._humidity = None
self._temp = None
self._wind_dire = None
self._wind_speed = None
def load_bytes(self, b):
self._condition, self._humidity, self._temp, self._wind_dire, self._wind_speed \
= struct.unpack('<BBHBB', b)
@property
def condition(self):
return self._condition
@property
def humidity(self):
return self._humidity
@property
def temp(self):
return self._temp
@property
def wind_dire(self):
return self._wind_dire
@property
def wind_speed(self):
return self._wind_speed
class LoginResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_LOGIN)
self._status = None
def load_bytes(self, b):
self._status = struct.unpack('<BB', b)[1]
@property
def status(self):
return self._status
class ChangePWResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_CHANGE_PW)
self._status = None
def load_bytes(self, b):
self._status = struct.unpack('<B', b)[0]
@property
def status(self):
return self._status
class GetRoomInfoResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_GET_ROOM_INFO)
self._count: int = 0
self._hds: typing.List[HD] = []
self._sensors: typing.List[Sensor] = []
self._rooms: typing.List[Room] = []
def load_bytes(self, b):
ver_flag = 1
d = Decode(b)
self._count = d.read2()
room_count = d.read1()
for i in range(room_count):
room = Room()
room.id = d.read2()
if self.subbody_ver == 1:
ver_flag = d.read1()
if ver_flag != 2:
length = d.read1()
room.name = d.read_utf(length)
length = d.read1()
room.alias = d.read_utf(length)
length = d.read1()
room.icon = d.read_utf(length)
unit_count = d.read2()
for j in range(unit_count):
device = EnumDevice((8, d.read4()))
device_count = d.read2()
for unit_id in range(device_count):
if EnumDevice.AIRCON == device or EnumDevice.NEWAIRCON == device or EnumDevice.BATHROOM == device:
dev = AirCon()
room.air_con = dev
dev.new_air_con = EnumDevice.NEWAIRCON == device
dev.bath_room = EnumDevice.BATHROOM == device
elif EnumDevice.GEOTHERMIC == device:
dev = Geothermic()
room.geothermic = dev
elif EnumDevice.HD == device:
dev = HD()
self.hds.append(dev)
room.hd_room = True
room.hd = dev
elif EnumDevice.SENSOR == device:
dev = Sensor()
self.sensors.append(dev)
room.sensor_room = True
elif EnumDevice.VENTILATION == device or EnumDevice.SMALL_VAM == device:
dev = Ventilation()
room.ventilation = dev
dev.is_small_vam = EnumDevice.SMALL_VAM == device
else:
dev = Device()
dev.room_id = room.id
dev.unit_id = unit_id
if ver_flag > 2:
length = d.read1()
dev.name = d.read_utf(length)
length = d.read1()
dev.alias = d.read_utf(length)
if dev.alias is None:
dev.alias = room.alias
self.rooms.append(room)
def do(self):
from .service import Service
Service.set_rooms(self.rooms)
Service.send_msg(AirConRecommendedIndoorTempParam())
Service.set_sensors(self.sensors)
aircons = []
new_aircons = []
bathrooms = []
for room in Service.get_rooms():
if room.air_con is not None:
room.air_con.alias = room.alias
if room.air_con.new_air_con:
new_aircons.append(room.air_con)
elif room.air_con.bath_room:
bathrooms.append(room.air_con)
else:
aircons.append(room.air_con)
p = AirConCapabilityQueryParam()
p.aircons = aircons
p.target = EnumDevice.AIRCON
Service.send_msg(p)
p = AirConCapabilityQueryParam()
p.aircons = new_aircons
p.target = EnumDevice.NEWAIRCON
Service.send_msg(p)
p = AirConCapabilityQueryParam()
p.aircons = bathrooms
p.target = EnumDevice.BATHROOM
Service.send_msg(p)
@property
def count(self):
return self._count
@property
def hds(self):
return self._hds
@property
def rooms(self):
return self._rooms
@property
def sensors(self):
return self._sensors
class QueryScheduleSettingResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_QUERY_SCHEDULE_SETTING)
def load_bytes(self, b):
"""todo"""
class QueryScheduleIDResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_QUERY_SCHEDULE_ID)
def load_bytes(self, b):
"""todo"""
class HandShakeResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_HAND_SHAKE)
self._time: str = ''
def load_bytes(self, b):
d = Decode(b)
self._time = d.read_utf(14)
def do(self):
p = GetRoomInfoParam()
p.room_ids.append(0xffff)
from .service import Service
Service.send_msg(p)
Service.send_msg(Sensor2InfoParam())
class GetGWInfoResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_HAND_SHAKE)
self._time: str = ''
def load_bytes(self, b):
"""todo"""
def do(self):
"""todo"""
class CmdTransferResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_CMD_TRANSFER)
def load_bytes(self, b):
"""todo"""
class QueryScheduleFinish(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.SYS_QUERY_SCHEDULE_FINISH)
def load_bytes(self, b):
"""todo"""
class AirConStatusChangedResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.STATUS_CHANGED)
self._room = 0 # type: int
self._unit = 0 # type: int
self._status = AirConStatus() # type: AirConStatus
def load_bytes(self, b):
d = Decode(b)
self._room = d.read1()
self._unit = d.read1()
status = self._status
flag = d.read1()
if flag & EnumControl.Type.SWITCH:
status.switch = EnumControl.Switch(d.read1())
if flag & EnumControl.Type.MODE:
status.mode = EnumControl.Mode(d.read1())
if flag & EnumControl.Type.AIR_FLOW:
status.air_flow = EnumControl.AirFlow(d.read1())
if flag & EnumControl.Type.CURRENT_TEMP:
status.current_temp = d.read2()
if flag & EnumControl.Type.SETTED_TEMP:
status.setted_temp = d.read2()
if Config.is_new_version:
if flag & EnumControl.Type.FAN_DIRECTION:
direction = d.read1()
status.fan_direction1 = EnumControl.FanDirection(direction & 0xF)
status.fan_direction2 = EnumControl.FanDirection((direction >> 4) & 0xF)
def do(self):
from .service import Service
Service.update_aircon(self.target, self._room, self._unit, status=self._status)
class AirConQueryStatusResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.QUERY_STATUS)
self.unit = 0
self.room = 0
self.current_temp = 0
self.setted_temp = 0
self.switch = EnumControl.Switch.OFF
self.air_flow = EnumControl.AirFlow.AUTO
self.breathe = EnumControl.Breathe.CLOSE
self.fan_direction1 = EnumControl.FanDirection.INVALID
self.fan_direction2 = EnumControl.FanDirection.INVALID
self.humidity = EnumControl.Humidity.CLOSE
self.mode = EnumControl.Mode.AUTO
self.hum_allow = False
self.fresh_air_allow = False
self.fresh_air_humidification = FreshAirHumidification.OFF
self.three_d_fresh = ThreeDFresh.CLOSE
def load_bytes(self, b):
d = Decode(b)
self.room = d.read1()
self.unit = d.read1()
flag = d.read1()
if flag & 1:
self.switch = EnumControl.Switch(d.read1())
if flag >> 1 & 1:
self.mode = EnumControl.Mode(d.read1())
if flag >> 2 & 1:
self.air_flow = EnumControl.AirFlow(d.read1())
if Config.is_c611:
if flag >> 3 & 1:
bt = d.read1()
self.hum_allow = bt & 8 == 8
self.fresh_air_allow = bt & 4 == 4
self.fresh_air_humidification = FreshAirHumidification(bt & 3)
if flag >> 4 & 1:
self.setted_temp = d.read2()
if Config.is_new_version:
if flag >> 5 & 1:
b = d.read1()
self.fan_direction1 = EnumControl.FanDirection(b & 0xf)
self.fan_direction2 = EnumControl.FanDirection(b >> 4 & 0xf)
if flag >> 6 & 1:
self.humidity = EnumControl.Humidity(d.read1())
if self.target == EnumDevice.BATHROOM:
if flag >> 7 & 1:
self.breathe = EnumControl.Breathe(d.read1())
elif self.target == EnumDevice.AIRCON:
if flag >> 7 & 1 == 1:
self.three_d_fresh = ThreeDFresh(d.read1())
else:
if flag >> 3 & 1:
self.current_temp = d.read2()
if flag >> 4 & 1:
self.setted_temp = d.read2()
if Config.is_new_version:
if flag >> 5 & 1:
b = d.read1()
self.fan_direction1 = EnumControl.FanDirection(b & 0xf)
self.fan_direction2 = EnumControl.FanDirection(b >> 4 & 0xf)
if self.target == EnumDevice.NEWAIRCON:
if flag >> 6 & 1:
self.humidity = EnumControl.Humidity(d.read1())
else:
if flag >> 7 & 1:
self.breathe = EnumControl.Breathe(d.read1())
def do(self):
from .service import Service
status = AirConStatus(self.current_temp, self.setted_temp, self.switch, self.air_flow, self.breathe,
self.fan_direction1, self.fan_direction2, self.humidity, self.mode)
Service.set_aircon_status(self.target, self.room, self.unit, status)
class AirConRecommendedIndoorTempResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.AIR_RECOMMENDED_INDOOR_TEMP)
self._temp: int = 0
self._outdoor_temp: int = 0
def load_bytes(self, b):
d = Decode(b)
self._temp = d.read2()
self._outdoor_temp = d.read2()
@property
def temp(self):
return self._temp
@property
def outdoor_temp(self):
return self._outdoor_temp
class AirConCapabilityQueryResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.AIR_CAPABILITY_QUERY)
self._air_cons: typing.List[AirCon] = []
def load_bytes(self, b):
d = Decode(b)
room_size = d.read1()
for i in range(room_size):
room_id = d.read1()
unit_size = d.read1()
for j in range(unit_size):
aircon = AirCon()
aircon.unit_id = d.read1()
aircon.room_id = room_id
aircon.new_air_con = self.target == EnumDevice.NEWAIRCON
aircon.bath_room = self.target == EnumDevice.BATHROOM
flag = d.read1()
aircon.fan_volume = EnumFanVolume(flag >> 5 & 0x7)
aircon.dry_mode = flag >> 4 & 1
aircon.auto_mode = flag >> 3 & 1
aircon.heat_mode = flag >> 2 & 1
aircon.cool_mode = flag >> 1 & 1
aircon.ventilation_mode = flag & 1
if Config.is_new_version:
flag = d.read1()
if flag & 1:
aircon.fan_direction1 = EnumFanDirection.STEP_5
else:
aircon.fan_direction1 = EnumFanDirection.FIX
if flag >> 1 & 1:
aircon.fan_direction2 = EnumFanDirection.STEP_5
else:
aircon.fan_direction2 = EnumFanDirection.FIX
aircon.fan_dire_auto = flag >> 2 & 1
aircon.fan_volume_auto = flag >> 3 & 1
aircon.temp_set = flag >> 4 & 1
aircon.hum_fresh_air_allow = (flag >> 5 & 1) & (flag >> 6 & 1)
aircon.three_d_fresh_allow = flag >> 7 & 1
flag = d.read1()
aircon.out_door_run_cond = EnumOutDoorRunCond(flag >> 6 & 3)
aircon.more_dry_mode = flag >> 4 & 1
aircon.pre_heat_mode = flag >> 3 & 1
aircon.auto_dry_mode = flag >> 2 & 1
aircon.relax_mode = flag >> 1 & 1
aircon.sleep_mode = flag & 1
else:
d.read1()
self._air_cons.append(aircon)
def do(self):
from .service import Service
if Service.is_ready():
if len(self._air_cons):
for i in self._air_cons:
Service.update_aircon(get_device_by_aircon(i), i.room_id, i.unit_id, aircon=i)
else:
for i in self._air_cons:
p = AirConQueryStatusParam()
p.target = self.target
p.device = i
from .service import Service
Service.send_msg(p)
Service.set_device(self.target, self._air_cons)
@property
def aircons(self):
return self._air_cons
class AirConQueryScenarioSettingResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice):
BaseResult.__init__(self, cmd_id, target, EnumCmdType.QUERY_SCENARIO_SETTING)
def load_bytes(self, b):
"""todo"""
class UnknownResult(BaseResult):
def __init__(self, cmd_id: int, target: EnumDevice, cmd_type: EnumCmdType):
BaseResult.__init__(self, cmd_id, target, cmd_type)
self._subbody = ''
def load_bytes(self, b):
self._subbody = struct.pack('<' + str(len(b)) + 's', b).hex()
@property
def subbody(self):
return self._subbody
|
mypal/ha-dsair
|
custom_components/ds_air/ds_air_service/decoder.py
|
decoder.py
|
py
| 27,763 |
python
|
en
|
code
| 65 |
github-code
|
6
|
3024078044
|
import socket
class PortChecker:
@staticmethod
def is_open(ip:str, port:str) -> None:
"""
Checks if a port is open on an IP.
Args:
ip (str): The IP address to check.
port (int): The port number to check.
Returns:
bool: True if the port is open, False otherwise.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False
# test case
if __name__ == "__main__":
ip_address = "127.0.0.1"
port_number = 80
result = PortChecker.is_open(ip_address, port_number)
print(result)
|
LopeKinz/project-evoli
|
src/check_port.py
|
check_port.py
|
py
| 751 |
python
|
en
|
code
| 17 |
github-code
|
6
|
27661384502
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from base import BaseModel
class Encoder(nn.Module):
def __init__(self, embedding, hidden_size, rnn_cell='GRU', bidirectional=False, n_layers=1, dropout=0.0, device='cpu'):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.rnn_cell = rnn_cell
#self.padding_idx = self.embedding.to_index(padding)
self.bidirectional = bidirectional
self.n_layers = n_layers
self.dropout = dropout
self.device = device
self.n_init = (2 if bidirectional == True else 1) * n_layers
self.vocab_size = embedding.vocab_size
self.emb_size = embedding.emb_size
self.embedding = nn.Embedding(self.vocab_size, self.emb_size)
#self.embedding.weight = nn.Parameter(embedding.vectors)
if rnn_cell == 'GRU': self.rnn = nn.GRU(self.emb_size, self.hidden_size, batch_first=True, dropout=self.dropout, num_layers=self.n_layers)
#self.freeze_embedding()
def freeze_embedding(self):
for param in self.embedding.parameters():
param.requires_grad = False
def forward(self, source):
# source: (batch, seq_len)
#init_hidden = torch.randn(self.n_init, source.size(0), self.hidden_size).to(self.device) #(n_layer*n_direction, batch, hidden_size)
source = self.embedding(source) # (batch, seq_len) -> (batch, seq_len, emb_size)
output, hidden = self.rnn(source, None) #(batch, seq_len, emb_size) -> (batch, seq_len, emb_size*n_direction), (n_layer*n_direction, batch, hidden_size)
return output, hidden #(n_layer*n_direction, batch, hidden_size)
class Decoder(nn.Module):
def __init__(self, embedding, hidden_size, rnn_cell='GRU', bidirectional=False, n_layers=1, dropout=0.2, device='cpu', teaching_force_rate=0.0, use_attn=False, method=None, padded_len=None):
super(Decoder, self).__init__()
self.hidden_size = hidden_size
self.rnn_cell = rnn_cell
self.bidirectional = bidirectional
self.n_layers = n_layers
self.dropout = dropout
self.device = device
self.n_init = (2 if bidirectional == True else 1) * n_layers
self.teaching_force_rate = teaching_force_rate
self.vocab_size = embedding.vocab_size
self.emb_size = embedding.emb_size
self.use_attn = use_attn
self.step = 0
self.relu = nn.ReLU()
self.softmax = nn.LogSoftmax(dim=-1)
self.embedding_org = embedding
self.embedding = nn.Embedding(self.vocab_size, self.emb_size)
#self.embedding.weight = nn.Parameter(embedding.vectors)
self.linear = nn.Linear(self.hidden_size, self.vocab_size)
if rnn_cell == 'GRU': self.rnn = nn.GRU(self.emb_size, self.hidden_size, batch_first=True, dropout=self.dropout, num_layers=self.n_layers)
if self.use_attn:
self.attn = Attention(hidden_size=self.hidden_size, method=method, padded_len=padded_len)
#self.freeze_embedding()
def freeze_embedding(self):
for param in self.embedding.parameters():
param.requires_grad = False
def forward(self, label, init_hidden, encoder_output=None):
if(self.step > 2000): self.teaching_force_rate = 0.2
self.step += 1
use_teaching_force = True if random.random() <= self.teaching_force_rate else False
# source: (batch, seq_len)
#input = self.relu(self.embedding(input)) # (batch, seq_len) -> (batch, seq_len, emb_size)
batch, seq_len = label.size(0), label.size(1)
outputs = []
hidden = init_hidden
if use_teaching_force:
for i in range(seq_len):
input = label[:, i].unsqueeze(1)
#print(label)
#print(str(i) + ': ' + self.embedding_org.indice_to_sentence(label[0].tolist()))
input = self.relu(self.embedding(input))
if self.use_attn:
attn_output = self.attn(encoder_output, input, hidden)
output, hidden = self.rnn(attn_output, hidden)
else:
output, hidden = self.rnn(input, hidden)
output = self.softmax(self.linear(output))
last_predict = output.max(2)[1]
#print(str(i) + ': ' + self.embedding_org.indice_to_sentence(last_predict[0].tolist()))
outputs.append(output)
else:
input = label[:, 0].unsqueeze(1)
input = self.relu(self.embedding(input))
for i in range(seq_len):
if self.use_attn:
attn_output = self.attn(encoder_output, input, hidden)
output, hidden = self.rnn(attn_output, hidden)
else:
output, hidden = self.rnn(input, hidden)
output = self.softmax(self.linear(output))
outputs.append(output)
last_predict = output.max(2)[1]
input = self.relu(self.embedding(last_predict))
outputs = torch.cat(outputs, dim=1)
return outputs
class Attention(nn.Module):
def __init__(self, hidden_size, method, padded_len):
super(Attention, self).__init__()
self.hidden_size = hidden_size
self.method = method
self.attn = nn.Linear(hidden_size*2, padded_len)
self.attn_combine = nn.Linear(hidden_size*2, hidden_size)
self.softmax = nn.Softmax()
self.relu = nn.ReLU()
def forward(self, encoder_output, decoder_input_embeded, decoder_hidden):
# encoder_output: [batch, seq_len, hidden_size=embedded_size]
# decoder_input_embeded: [batch, 1, embedded_size]
# decoder_hidden: [batch, 1, embedded_size]
decoder_hidden = decoder_hidden.permute(1, 0, 2)
# print(encoder_output.size())
# print(decoder_input_embeded.size())
# print(decoder_hidden.size())
similarity = self.attn(torch.cat((decoder_input_embeded, decoder_hidden), dim=-1))
attn_weight = self.softmax(similarity) # [batch, 1, padded_len]
attn_applied = torch.bmm(attn_weight, encoder_output) #[batch, 1, hidden_size]
output = self.relu(self.attn_combine(torch.cat((attn_applied, decoder_input_embeded), dim=-1)))
return output
class ChatBotModel(BaseModel):
def __init__(self, embedding, hidden_size, rnn_cell='GRU', bidirectional=False, n_layers=1, dropout=0.2, device='cpu', teaching_force_rate=0.0, use_attn=False, method='concat', padded_len=10):
super().__init__()
self.use_attn = use_attn
self.embedding = embedding
if self.use_attn:
self.hidden_size = embedding.emb_size
else:
self.hidden_size = hidden_size
self.encoder = Encoder(self.embedding, self.hidden_size, rnn_cell=rnn_cell, bidirectional=bidirectional, n_layers=n_layers, dropout=dropout, device=device)
self.decoder = Decoder(self.embedding, self.hidden_size, rnn_cell=rnn_cell, bidirectional=bidirectional, n_layers=n_layers, dropout=dropout, device=device, teaching_force_rate=teaching_force_rate, use_attn=self.use_attn, method=method, padded_len=padded_len)
def forward(self, source, target):
# print('> : ' + self.embedding.indice_to_sentence(source[0].tolist()))
# print('= : ' + self.embedding.indice_to_sentence(target[0].tolist()))
encoder_output, encoder_hidden = self.encoder(source)
if self.use_attn:
output = self.decoder(target, encoder_hidden, encoder_output)
else:
output = self.decoder(target, encoder_hidden)
return output
|
vincent861223/ChatBot
|
model/model.py
|
model.py
|
py
| 7,730 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4352669045
|
from flask import Flask, render_template, request, redirect, session
app = Flask(__name__)
app.secret_key = "No secrets on github or youtube"
@app.route('/')
def index():
return render_template('index.html')
@app.route('/submit', methods=['POST'])
def submit():
print(request.form)
session['name'] = request.form['name']
session['age'] = request.form['age']
return redirect('/display')
@app.route('/display')
def display():
name = session['name']
return render_template('display.html', name=name, age=submit['age'])
if __name__ == '__main__':
app.run(debug = True)
|
kwersland/coding_dojo-Python
|
flask/fundamentals/post_form/server.py
|
server.py
|
py
| 601 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19271398173
|
# coding=utf-8
from __future__ import unicode_literals
from django.contrib import admin
from django.http import HttpResponseRedirect
from django.conf.urls import url
from django.utils.html import format_html
from django.core.urlresolvers import reverse
from ordered_model.admin import OrderedModelAdmin
from monitoreo.apps.dashboard.models import IndicatorType, TableColumn
from monitoreo.apps.dashboard.admin.utils import switch
@admin.register(TableColumn)
class TableColumnAdmin(OrderedModelAdmin):
list_display = ('full_name', 'move_up_down_links')
@admin.register(IndicatorType)
class IndicatorTypeAdmin(OrderedModelAdmin):
list_display = ('nombre', 'order', 'resumen', 'mostrar',
'series_red', 'series_nodos', 'series_federadores',
'panel_red', 'panel_nodos', 'panel_federadores',
'move_up_down_links', 'position_actions')
list_filter = ('resumen', 'mostrar')
actions = ('queryset_to_top', 'queryset_to_bottom',
'summarize', 'desummarize',
'show', 'hide',
'add_to_aggregated_series', 'remove_from_aggregated_series',
'add_to_nodes_series', 'remove_from_nodes_series',
'add_to_indexing_series', 'remove_from_indexing_series',
'add_to_aggregated_panel', 'remove_from_aggregated_panel',
'add_to_nodes_panel', 'remove_from_nodes_panel',
'add_to_federators_panel', 'remove_from_federators_panel'
)
def get_urls(self):
urls = super(IndicatorTypeAdmin, self).get_urls()
extra_urls = [url(r'^(?P<model_id>.+)/(?P<direction>top|bottom)/$',
self.order_move, name='order_move'), ]
return extra_urls + urls
def position_actions(self, obj):
return format_html(
'<a class="button" href="{}">Tope</a> '
'<a class="button" href="{}">Fondo</a>',
reverse('admin:order_move', args=[obj.pk, 'top']),
reverse('admin:order_move', args=[obj.pk, 'bottom']),
)
position_actions.short_description = 'Posicionamientos'
position_actions.allow_tags = True
def order_move(self, request, model_id, direction):
indicator_type = IndicatorType.objects.get(pk=model_id)
if direction == 'top':
indicator_type.top()
elif direction == 'bottom':
indicator_type.bottom()
indicator_type.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
summarize = switch({'resumen': True})
summarize.short_description = 'Agregar al resumen'
desummarize = switch({'resumen': False})
desummarize.short_description = 'Quitar del resumen'
show = switch({'mostrar': True})
show.short_description = 'Agregar al reporte'
hide = switch({'mostrar': False})
hide.short_description = 'Quitar del reporte'
add_to_aggregated_series = switch({'series_red': True})
add_to_aggregated_series.short_description =\
'Agregar a las series de tiempo de red'
remove_from_aggregated_series = switch({'series_red': False})
remove_from_aggregated_series.short_description =\
'Quitar de las series de tiempo de red'
add_to_nodes_series = switch({'series_nodos': True})
add_to_nodes_series.short_description = \
'Agregar a las series de tiempo de nodos'
remove_from_nodes_series = switch({'series_nodos': False})
remove_from_nodes_series.short_description = \
'Quitar de las series de tiempo de nodos'
add_to_indexing_series = switch({'series_federadores': True})
add_to_indexing_series.short_description = \
'Agregar a las series de tiempo de nodos federadores'
remove_from_indexing_series = switch({'series_federadores': False})
remove_from_indexing_series.short_description = \
'Quitar de las series de tiempo de nodos federadores'
add_to_aggregated_panel = switch({'panel_red': True})
add_to_aggregated_panel.short_description = \
'Agregar al panel de indicadores de red'
remove_from_aggregated_panel = switch({'panel_red': False})
remove_from_aggregated_panel.short_description = \
'Quitar del panel de indicadores de red'
add_to_nodes_panel = switch({'panel_nodos': True})
add_to_nodes_panel.short_description = \
'Agregar al panel de indicadores de nodos'
remove_from_nodes_panel = switch({'panel_nodos': False})
remove_from_nodes_panel.short_description = \
'Quitar del panel de indicadores de nodos'
add_to_federators_panel = switch({'panel_federadores': True})
add_to_federators_panel.short_description = \
'Agregar al panel de indicadores de nodos federadores'
remove_from_federators_panel = switch({'panel_federadores': False})
remove_from_federators_panel.short_description = \
'Quitar del panel de indicadores de nodos federadores'
|
datosgobar/monitoreo-apertura
|
monitoreo/apps/dashboard/admin/indicator_types.py
|
indicator_types.py
|
py
| 4,952 |
python
|
en
|
code
| 5 |
github-code
|
6
|
27165563628
|
# Author: José Rodolfo (jric2002)
s = str(input())
t = str(input())
is_correct = True
size_s = len(s)
size_t = len(t)
i = 0
j = size_s - 1
if (size_s == size_t):
while (i < size_s):
if (s[i] != t[j]):
is_correct = False
break
i += 1
j -= 1
else:
is_correct = False
print("YES") if (is_correct) else print("NO")
|
jric2002/algorithms
|
codeforces/low_difficulty/difficulty_800/21.translation_[41A]/translation.py
|
translation.py
|
py
| 339 |
python
|
en
|
code
| 2 |
github-code
|
6
|
2534597571
|
import copy
import json
import os
import time
from flask import Blueprint, request, Response
import pm4py
from pm4py.algo.filtering.log.attributes import attributes_filter
from pm4py.objects.conversion.process_tree.converter import to_petri_net_transition_bordered as converter
from pm4py.visualization.common.utils import get_base64_from_gviz
from KPIAlgebras.entities import data
from KPIAlgebras.entities import model as model_object
from KPIAlgebras.request_objects import request_objects
from KPIAlgebras.response_objects import response_objects
from KPIAlgebras.serializers import extended_process_tree_serializer as serializer
from KPIAlgebras.use_cases import alignment_computation_use_case as alignment
from KPIAlgebras.use_cases import cycle_time_analysis_use_case as measurement_high_level
from KPIAlgebras.use_cases import decorate_extended_process_tree_use_case as decorate_tree
from KPIAlgebras.use_cases import import_event_log_use_case as import_log
from KPIAlgebras.use_cases import time_range_construction_use_case as measurement_fine_grained
from KPIAlgebras.util import constants, http_response_status_code
blueprint = Blueprint('endpoints', __name__)
alignments = None
log = None
model = None
initial_marking = None
final_marking = None
extended_process_tree = None
@blueprint.route('/measurement', methods=['POST'])
def measurement():
print("Begining the fine grained analysis")
t1 = time.perf_counter()
parameters = dict()
log_file = request.files['eventLog']
log_file.save(os.path.join(constants.upload_folder, log_file.filename))
model_file = request.files['model']
model_file.save(os.path.join(constants.upload_folder, model_file.filename))
import_log_use_case = import_log.ImportEventLogUseCase()
request_object = request_objects.TimeRangeConstructionRequestObject.from_dict({'event_log': log_file.filename})
global log
log = data.EventLog(import_log_use_case.import_event_log_from_xes(request_object))
os.remove(os.path.join(constants.upload_folder, log_file.filename))
process_tree = pm4py.read_ptml(os.path.join(constants.upload_folder, model_file.filename))
os.remove(os.path.join(constants.upload_folder, model_file.filename))
global extended_process_tree
extended_process_tree = model_object.ExtendedProcessTree(process_tree)
global model, initial_marking, final_marking
model, initial_marking, final_marking = converter.apply(extended_process_tree)
alignment_use_case = alignment.AlignmentComputationUseCase()
global alignments
alignments = alignment_use_case.compute(model, initial_marking, final_marking, log)
high_level_use_case = measurement_high_level.CycleTimeAnalysisUseCase()
response = high_level_use_case.analyse(log.log, alignments, extended_process_tree, model)
extended_process_tree = response.value
lifecycle = attributes_filter.get_attribute_values(log.log, "lifecycle:transition")
if lifecycle is not None and 'start' in lifecycle:
fine_grained_use_case = measurement_fine_grained.TimeRangesConstructionUseCase(log, extended_process_tree,
model, initial_marking,
final_marking, alignments)
response = fine_grained_use_case.construct_time_ranges(log.log, alignments, model, initial_marking,
final_marking)
decoration_use_case = decorate_tree.DecorateExtendedProcessTreeUseCase()
gviz = decoration_use_case.decorate(extended_process_tree)
svg = get_base64_from_gviz(gviz)
extended_process_tree_json = json.dumps(response.value, cls=serializer.ExtendedProcessTreeJsonEncoder)
json_dict = json.loads(extended_process_tree_json)
json_dict["svg"] = svg.decode('utf-8')
extended_process_tree_json = json.dumps(json_dict, cls=serializer.ExtendedProcessTreeJsonEncoder)
t2 = time.perf_counter()
print(t2 - t1)
return Response(extended_process_tree_json, mimetype='application/json',
status=http_response_status_code.STATUS_CODES[response.type])
@blueprint.route('/timeshifting', methods=['POST'])
def timeshifting():
parameters = request.get_json()
if parameters is None:
parameters = dict()
for arg, value in request.args.items():
parameters[arg] = value
request_object = request_objects.TimeShiftingRequestObject.from_dict(parameters)
global log, model, initial_marking, final_marking, extended_process_tree, alignments
extended_process_tree.states.append(copy.deepcopy(extended_process_tree))
fine_grained_use_case = measurement_fine_grained.TimeRangesConstructionUseCase(log.log, extended_process_tree,
model, initial_marking,
final_marking, alignments)
response = fine_grained_use_case.shift_time_ranges(request_object)
decoration_use_case = decorate_tree.DecorateExtendedProcessTreeUseCase()
gviz = decoration_use_case.decorate(extended_process_tree)
svg = get_base64_from_gviz(gviz)
extended_process_tree_json = json.dumps(response.value, cls=serializer.ExtendedProcessTreeJsonEncoder)
json_dict = json.loads(extended_process_tree_json)
json_dict["svg"] = svg.decode('utf-8')
extended_process_tree_json = json.dumps(json_dict, cls=serializer.ExtendedProcessTreeJsonEncoder)
return Response(extended_process_tree_json, mimetype='application/json',
status=http_response_status_code.STATUS_CODES[response.type])
@blueprint.route('/undoChange', methods=['GET'])
def undo_change():
global extended_process_tree
extended_process_tree = extended_process_tree.states.pop()
decoration_use_case = decorate_tree.DecorateExtendedProcessTreeUseCase()
gviz = decoration_use_case.decorate(extended_process_tree)
svg = get_base64_from_gviz(gviz)
extended_process_tree_json = json.dumps(extended_process_tree, cls=serializer.ExtendedProcessTreeJsonEncoder)
json_dict = json.loads(extended_process_tree_json)
json_dict["svg"] = svg.decode('utf-8')
extended_process_tree_json = json.dumps(json_dict, cls=serializer.ExtendedProcessTreeJsonEncoder)
return Response(extended_process_tree_json, mimetype='application/json',
status=http_response_status_code.STATUS_CODES[response_objects.ResponseSuccess.SUCCESS])
@blueprint.route('/undoAllChanges', methods=['GET'])
def undo_all_changes():
global extended_process_tree
extended_process_tree = extended_process_tree.states[0]
decoration_use_case = decorate_tree.DecorateExtendedProcessTreeUseCase()
gviz = decoration_use_case.decorate(extended_process_tree)
svg = get_base64_from_gviz(gviz)
extended_process_tree_json = json.dumps(extended_process_tree, cls=serializer.ExtendedProcessTreeJsonEncoder)
json_dict = json.loads(extended_process_tree_json)
json_dict["svg"] = svg.decode('utf-8')
extended_process_tree_json = json.dumps(json_dict, cls=serializer.ExtendedProcessTreeJsonEncoder)
return Response(extended_process_tree_json, mimetype='application/json',
status=http_response_status_code.STATUS_CODES[response_objects.ResponseSuccess.SUCCESS])
|
luisfsts/KPIAlgebras
|
KPIAlgebras/rest/endpoints.py
|
endpoints.py
|
py
| 7,463 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42688926971
|
import logging
import time
import uuid
from queue import Queue
from threading import Event, Thread
import zmq
class ControlClient(Thread):
def __init__(self, port: int):
super(ControlClient, self).__init__()
self.daemon = True
self.command_queue = Queue()
self.command_return = {}
self.command_return_popque = Queue()
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
self.port = port
self.timeout = 5000
self.active = Event()
def execute_command(self, device, command, args, kwargs, id=None):
self.socket.send_json([device, command, args, kwargs])
if (self.socket.poll(self.timeout) & zmq.POLLIN) != 0:
status, retval = self.socket.recv_json()
if status == "OK":
return retval
else:
logging.warning(
f"{device} networking warning in "
+ f"ExecuteNetworkCommand : error for {command} -> {retval}"
)
return "Error"
def run(self):
self.socket.connect(f"tcp://localhost:{self.port}")
while self.active.is_set():
while not self.command_queue.empty():
id, device, command, args, kwargs = self.command_queue.get()
ret = self.execute_command(device, command, args, kwargs)
self.command_return[id] = ret
while not self.command_return_popque.empty():
self.command_return.pop(self.command_return_popque.get())
time.sleep(1e-6)
def send_command(
control_client: ControlClient, device, command, args, kwargs, wait_return=False
):
id = uuid.uuid1().int >> 64
control_client.command_queue.put([id, device, command, args, kwargs])
if wait_return:
while True:
if id in control_client.command_return:
ret = control_client.command_return.pop(id)
return ret
time.sleep(1e-6)
else:
control_client.command_return_popque.put(id)
return
|
ograsdijk/CeNTREX-compressorcabinet
|
centrex_compressorcabinet/networking/controller_client.py
|
controller_client.py
|
py
| 2,107 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34832241170
|
import cv2
import numpy as np
img = np.zeros((600, 600, 3), np.uint8) #全为0表示是黑色
"""
img.shape[1]代表图片的宽度
img.shape[0]代表图片的高度(长度)
"""
#画直线
cv2.line(img, (0,0), (img.shape[1], img.shape[0]), (255,255,255), 2) #参数:物件、起始座标、终点座标、颜色、粗度
#画方型 (cv2.FILLED代表填满)
cv2.rectangle(img, (100, 100), (200, 200), (0,255,255), 2)
cv2.rectangle(img, (400, 400), (500, 500), (0,255,255), cv2.FILLED) #参数:物件、起始座标、终点座标、颜色、粗度
#画圆形
cv2.circle(img, (300, 300), 100, (0,0,255), 2) #参数:物件、圆心、半径、颜色、粗度
#画椭圆形
cv2.ellipse(img , (300, 300), (50, 100), 45, 0, 360, (255, 0, 0), cv2.FILLED) #参数:物件、(水平半轴,垂直半轴)、旋转角度、起始角度、终止角度、颜色、粗度
#写字(不支援中文)
cv2.putText(img, "Hello", (300, 300), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 1) #参数:物件、文字、文字左下方的座标、字体、文字大小、颜色、粗度
cv2.imshow("img", img)
cv2.waitKey(0)
|
jim2832/Image-Recognition
|
draw.py
|
draw.py
|
py
| 1,114 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
38679091876
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import subprocess
import sys
from setuptools import setup, find_packages, Distribution
import setuptools.command.build_ext as _build_ext
# Ideally, we could include these files by putting them in a
# MANIFEST.in or using the package_data argument to setup, but the
# MANIFEST.in gets applied at the very beginning when setup.py runs
# before these files have been created, so we have to move the files
# manually.
ray_files = [
"ray/core/src/common/thirdparty/redis/src/redis-server",
"ray/core/src/common/redis_module/libray_redis_module.so",
"ray/core/src/plasma/plasma_store",
"ray/core/src/plasma/plasma_manager",
"ray/core/src/local_scheduler/local_scheduler",
"ray/core/src/local_scheduler/liblocal_scheduler_library.so",
"ray/core/src/global_scheduler/global_scheduler",
"ray/WebUI.ipynb"
]
optional_ray_files = []
ray_ui_files = [
"ray/core/src/catapult_files/index.html",
"ray/core/src/catapult_files/trace_viewer_full.html"
]
ray_autoscaler_files = [
"ray/autoscaler/aws/example-full.yaml"
]
# The UI files are mandatory if the INCLUDE_UI environment variable equals 1.
# Otherwise, they are optional.
if "INCLUDE_UI" in os.environ and os.environ["INCLUDE_UI"] == "1":
ray_files += ray_ui_files
else:
optional_ray_files += ray_ui_files
optional_ray_files += ray_autoscaler_files
extras = {
"rllib": [
"tensorflow", "pyyaml", "gym[atari]", "opencv-python",
"python-snappy", "scipy"]
}
class build_ext(_build_ext.build_ext):
def run(self):
# Note: We are passing in sys.executable so that we use the same
# version of Python to build pyarrow inside the build.sh script. Note
# that certain flags will not be passed along such as --user or sudo.
# TODO(rkn): Fix this.
subprocess.check_call(["../build.sh", sys.executable])
# We also need to install pyarrow along with Ray, so make sure that the
# relevant non-Python pyarrow files get copied.
pyarrow_files = [
os.path.join("ray/pyarrow_files/pyarrow", filename)
for filename in os.listdir("./ray/pyarrow_files/pyarrow")
if not os.path.isdir(os.path.join("ray/pyarrow_files/pyarrow",
filename))]
files_to_include = ray_files + pyarrow_files
for filename in files_to_include:
self.move_file(filename)
# Copy over the autogenerated flatbuffer Python bindings.
generated_python_directory = "ray/core/generated"
for filename in os.listdir(generated_python_directory):
if filename[-3:] == ".py":
self.move_file(os.path.join(generated_python_directory,
filename))
# Try to copy over the optional files.
for filename in optional_ray_files:
try:
self.move_file(filename)
except Exception as e:
print("Failed to copy optional file {}. This is ok."
.format(filename))
def move_file(self, filename):
# TODO(rkn): This feels very brittle. It may not handle all cases. See
# https://github.com/apache/arrow/blob/master/python/setup.py for an
# example.
source = filename
destination = os.path.join(self.build_lib, filename)
# Create the target directory if it doesn't already exist.
parent_directory = os.path.dirname(destination)
if not os.path.exists(parent_directory):
os.makedirs(parent_directory)
print("Copying {} to {}.".format(source, destination))
shutil.copy(source, destination)
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
setup(name="ray",
# The version string is also in __init__.py. TODO(pcm): Fix this.
version="0.3.1",
packages=find_packages(),
cmdclass={"build_ext": build_ext},
# The BinaryDistribution argument triggers build_ext.
distclass=BinaryDistribution,
install_requires=["numpy",
"funcsigs",
"click",
"colorama",
"psutil",
"pytest",
"pyyaml",
"redis",
# The six module is required by pyarrow.
"six >= 1.0.0",
"flatbuffers"],
setup_requires=["cython >= 0.23"],
extras_require=extras,
entry_points={"console_scripts": ["ray=ray.scripts.scripts:main"]},
include_package_data=True,
zip_safe=False,
license="Apache 2.0")
|
ray-project/sandbox
|
python/setup.py
|
setup.py
|
py
| 4,842 |
python
|
en
|
code
| 4 |
github-code
|
6
|
4492425254
|
import xml.etree.ElementTree as ET
from fastapi import FastAPI, Path
from fastapi.responses import Response
app = FastAPI()
@app.get("/IF01/{name}")
async def get(
name: str = Path(title="名前"),
):
root = ET.Element("root")
# 「番号タグ」というどうしようもないゴミ
nameElement = ET.SubElement(root, "DT0001")
nameElement.text = name
birthday = ET.SubElement(root, "DT0002")
birthday.text = "19700101"
address = ET.SubElement(root, "DT0003")
address.text = "神奈川県横浜市以下略"
gender = ET.SubElement(root, "DT0004")
gender.text = "1"
xml_data = ET.tostring(root, encoding="shift_jis", xml_declaration=True)
response = Response(content=xml_data, media_type="application/xml; charset=shift_jis")
return response
|
ikemo3/conveni-pdf-example
|
terrible-api/src/main.py
|
main.py
|
py
| 809 |
python
|
en
|
code
| 0 |
github-code
|
6
|
314473279
|
import os
from util.IO import IO
from util.Event import Event
import matplotlib.pyplot as plt
from matplotlib import rc
import numpy as np
from datetime import date, timedelta, datetime
import pandas as pd
class Calendar:
def __init__(self, username):
self.usernames = username
io = IO(self.usernames)
self.mysql = io.dbConnect()
def plotEvents(self, today, defaultUserName=None):
'''
defaultUserName : the current user
'''
otherEvents = []
if defaultUserName != None:
query = f"""
SELECT *
FROM REQUESTS
WHERE Requestor != '{defaultUserName}'
"""
reqTable = pd.read_sql(query, self.mysql)
for ii, row in reqTable.iterrows():
event = Event(row['Requestor'], row['EventName'], row['StartTime'], row['EndTime'], row['StartDate'])
otherEvents.append(event)
colors = ['firebrick', 'dodgerblue', 'seagreen']
colorIdx = 0
#raise ValueError('made it')
fig, axs = plt.subplots(1, 7, figsize=(30, 15))
# get new ioObj
io = IO(self.usernames)
# generate list of next 7 days
datesList = [today + timedelta(days=i) for i in range(7)]
# generate plot of the users schedule for the next 7 days
strTimes = [f"{ii}:00" for ii in range(24)]
axs[0].set_ylabel('Time [hh:mm]', fontsize=30)
x = [0, 1]
placeTicks = True
for ax, dd in zip(axs, datesList):
ax.set_title(dd.strftime("%m/%d"), fontsize=24)
ax.set_xticks([])
ax.set_yticks([])
ax.set_ylim(24)
for jj in range(24):
ax.axhline(jj, x[0], x[1], ls='--', color='k', alpha=0.5)
for event in io.events:
if event.startTime.strftime("%m/%d") == dd.strftime("%m/%d"):
startHr = int(event.startTime.strftime("%H"))
startMin = int(event.startTime.strftime("%M"))
endHr = int(event.endTime.strftime("%H"))
endMin = int(event.endTime.strftime("%M"))
ax.fill_between(x, startHr + startMin/60, endHr + endMin/60, color=colors[0], alpha=0.5)
midpoint = (startHr + startMin/60 + endHr + endMin/60)/2
ax.text(0, midpoint, event.eventName, color='w', fontsize=24)
for event in otherEvents:
if event.startTime.strftime("%m/%d") == dd.strftime("%m/%d"):
startHr = int(event.startTime.strftime("%H"))
startMin = int(event.startTime.strftime("%M"))
endHr = int(event.endTime.strftime("%H"))
endMin = int(event.endTime.strftime("%M"))
ax.fill_between(x, startHr + startMin/60, endHr + endMin/60, color=colors[1], alpha=0.5)
midpoint = (startHr + startMin/60 + endHr + endMin/60)/2
ax.text(0, midpoint, event.eventName, color='w', fontsize=24)
cursor = self.mysql.cursor()
# get other User name
getNames = f"""
SELECT *
FROM USERNAME
WHERE UserName='{event.userName}'
"""
userInfo = pd.read_sql(getNames, self.mysql)
first = userInfo['FirstName'].tolist()[0]
last = userInfo['LastName'].tolist()[0]
ax.text(0, midpoint+1, first+" "+last, color='w', fontsize=24)
if placeTicks:
ax.set_yticks(np.arange(len(strTimes)), labels=strTimes, fontsize=24)
placeTicks=False
else:
ax.set_yticks(np.arange(len(strTimes)), labels="", fontsize=24)
fig.suptitle("Year: " + datesList[0].strftime("%Y"), fontsize=36)
return fig
|
noahfranz13/IOU
|
util/Calendar.py
|
Calendar.py
|
py
| 4,071 |
python
|
en
|
code
| 1 |
github-code
|
6
|
4108607988
|
import sys
from PySide import QtGui, QtCore
# mendeklrasikan QtGui applikasi
app = QtGui.QApplication(sys.argv)
window = QtGui.QWidget()
icon = QtGui.QIcon('../smile.png')
button = QtGui.QPushButton('Exit', window)
button.resize(button.sizeHint())
button.move(10, 10)
button.clicked.connect(QtCore.QCoreApplication.instance().quit)
window.resize(800, 600)
window.setWindowTitle('Belajar PySide')
window.setWindowIcon(icon)
# frameSize = window.frameSize()
# screenSize = QtGui.QDesktopWidget().screenGeometry()
# xPoint = screenSize.width() / 2 - frameSize.width() / 2
# yPoint = screenSize.height() / 2 - frameSize.height() / 2
# window.move(xPoint, yPoint)
frameWindow = window.frameGeometry()
centerPoint = QtGui.QDesktopWidget().availableGeometry().center()
frameWindow.moveCenter(centerPoint)
window.move(frameWindow.topLeft())
window.show()
sys.exit(app.exec_())
|
ibnujakaria/tutorial-pyside
|
05. Pembahasan Window/05-pembahasan-window.py
|
05-pembahasan-window.py
|
py
| 877 |
python
|
en
|
code
| 3 |
github-code
|
6
|
21836138389
|
import sys
from pprint import pprint
sys.stdin = open('../input.txt', 'r')
ds = [(-1, 0), (1, 0), (0, -1), (0, 1)]
def dfs(i, j):
stack = [(i, j)]
while stack:
si, sj = stack.pop()
for di, dj in ds:
ni, nj = si+di, sj+dj
if 0 <= ni < M and 0 <= nj < N:
if g[ni][nj] and not visited[ni][nj]:
stack.append((ni, nj))
visited[ni][nj] = 1
return 1
T = int(input())
for test in range(T):
M, N, K = map(int, input().split())
g = [[0] * N for _ in range(M)]
visited = [[0] * N for _ in range(M)]
res = 0
for _ in range(K):
X, Y = map(int, input().split())
g[X][Y] = 1
for i in range(M):
for j in range(N):
if g[i][j] and not visited[i][j]:
res += dfs(i, j)
print(res)
|
liza0525/algorithm-study
|
BOJ/boj_1012_cabbage.py
|
boj_1012_cabbage.py
|
py
| 857 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74842403066
|
def read_raw(filename):
infile = open(filename,"rb")
indata = infile.read()
infile.close()
return indata
def makeString(ASCII_list):
bdata = bytes(ASCII_list)
s = bdata.decode("utf-8")
return s
def decodeFasta(data):
ou = []
nameList = []
proteinList = []
dataList = []
collect = "s" #species, protein, data
for x in data:
#x is a byte
if x==32: #space
if (collect=="s") and (len(nameList)!=0):
collect = "p"
elif (x==13) or (x==10): #return
if (collect=="p") and (len(proteinList)!=0):
collect = "d"
elif x==62: # >
if collect!="d":
raise Exception("Syntax Error")
if len(nameList)==0:
raise Exception("Empty Species Field")
if len(proteinList)==0:
raise Exception("Empty Protein Field")
if len(dataList)==0:
raise Exception("Empty Data Field")
ou.append([makeString(nameList),makeString(proteinList),makeString(dataList)])
nameList = []
proteinList =[]
dataList = []
collect = "s"
else:
if collect=="s":
nameList.append(x)
elif collect=="p":
proteinList.append(x)
elif collect=="d":
dataList.append(x)
else:
raise Exception("Internal coding bug. Please fix.")
if (len(nameList)!=0) and (len(proteinList)!=0) and (len(dataList)!=0):
ou.append([makeString(nameList),makeString(proteinList),makeString(dataList)])
return ou
def merge(oldData,newData):
#oldData is a dict of dicts
#newData is a list of tuples
for x in newData:
#x is a tuple [species,protein,data]
#all of them strings
if x[0] not in oldData: #if you don't know about this species, make it
oldData[x[0]] = dict()
oldData[x[0]][x[1]] = x[2]
#no return
def fasta_parser(list_of_files):
ou = dict()
for x in list_of_files:
rawData = read_raw(x)
newData = decodeFasta(rawData)
del(rawData)
merge(ou,newData)
del(newData)
return ou
|
aaronstanek-bucket/fasta_parser
|
fasta_parser.py
|
fasta_parser.py
|
py
| 2,328 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29486210493
|
# Exercise 1: Write a program which repeatedly reads numbers
# until the user enters "done". Once "done" is entered, print
# out the total, count, and average of the numbers. If the user
# enters anything other than a number, detect their mistake using
# try and except and print an error message and skip to the next number.
count = 0
total = 0
avg = 0
while True:
line = input ('Enter a number \n>')
try:
line = float(line)
count = count + 1
total = total + line
avg = total / count
except:
if line == 'Done' or line == 'done':
break
else:
print('Invalid Input')
continue
print('You entered a total of {} numbers, the sum total of those numbers equals {}, and the average of these numbers is {}.'.format(count, total, avg))
# Exercise 2: Write another program that prompts for a list
# of numbers as above and at the end prints out both the maximum
# and minimum of the numbers instead of the average.
user_list = []
num_count = 0
value = True
# Use a while loop that executes a try block to update the user's list
# Also we want to keep track of the numbers added to the list by updating the
# num_count variable
while value:
try:
user_input = input('Enter a number you want to add to the list:\n>')
user_input = float(user_input)
user_list.append(user_input)
num_count = num_count + 1
except:
if user_input == 'Done' or user_input == 'done':
break
else:
print('Invalid user input.')
continue
smallest_num = min(user_list)
largest_num = max(user_list)
print('The smallest number in your list is {}, and the largest number is {}.'.format(smallest_num, largest_num))
|
tuyojr/pythonUdacity
|
exercises/iterationsI.py
|
iterationsI.py
|
py
| 1,689 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18227651268
|
from flask import Flask, render_template, session, request, url_for, redirect, flash
app = Flask(__name__)
@app.route("/")
def hello():
print ("hello there")
return render_template("home.html")
if __name__ == "__main__":
app.debug = True
app.run()
|
TimMarder/determinants--marderT_canaleB_liuA_hasanA
|
app.py
|
app.py
|
py
| 269 |
python
|
en
|
code
| 2 |
github-code
|
6
|
38867577492
|
from inspect import signature
from functools import wraps
import werdsazxc
from platforms.config import CODE_DICT as config
import json
import threading
import inspect
import pickle
import time
import requests
import logging
import re
logger = logging.getLogger('robot')
requests.packages.urllib3.disable_warnings()
alert_pattern = re.compile('(?<=alert\([\'\"]).*?(?=[\'\"]\))')
default_headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'}
BETSLIP_ALLOW_LIST = {'1_13': '棋牌_银河', '1_2': '棋牌_开元', '1_5': '棋牌_JDB', '1_7': '棋牌_MG', '1_9': '棋牌_LEG', '1_15': '棋牌_SGWIN', '1_18': '棋牌_新世界', '1_20': '棋牌_美天', '1_21': '棋牌_百胜', '1_22': '棋牌_FG', '1_34': '棋牌_PS', '2_13': '捕鱼_银河', '2_2': '捕鱼_开元', '2_3': '捕鱼_CQ9', '2_5': '捕鱼_JDB', '2_7': '捕鱼_MG', '2_8': '捕鱼_BBIN', '2_9': '捕鱼_LEG', '2_10': '捕鱼_AG', '2_16': '捕鱼_BG', '2_18': '捕鱼_新世界', '2_20': '捕鱼_美天', '2_21': '捕鱼_百胜', '2_22': '捕鱼_FG', '2_24': '捕鱼_FC', '2_27': '捕鱼_KA', '2_34': '捕鱼_PS', '3_13': '电子_银河', '3_3': '电子_CQ9', '3_5': '电子_JDB', '3_7': '电子_MG', '3_8': '电子_BBIN(旧)', '3_9': '电子_LEG', '3_10': '电子_AG', '3_14': '电子_PG', '3_20': '电子_美天', '3_21': '电子_百胜', '3_22': '电子_FG', '3_24': '电子_FC', '3_27': '电子_KA', '3_33': '电子_BNG', '3_34': '电子_PS', '3_37': '电子_PP','3_75': '电子_BBIN', '4_8': '真人_BBIN', '4_10': '真人_AG', '4_16': '真人_BG', '4_17': '真人_eBET', '5_6': '体育_利记', '5_8': '体育_BBIN', '5_19': '体育_沙巴', '5_36': '体育_三升', '8_8': '彩票_BBIN', '8_11': '彩票_双赢', '8_35': '彩票_云博'}
spin_betslip_gamedict = {
'140048': '[PG电子]双囍临门', '140050': '[PG电子]嘻游记', '140054': '[PG电子]赏金船长', '140060': '[PG电子]爱尔兰精灵', '140061': '[PG电子]唐伯虎点秋香', '140065': '[PG电子]麻将胡了', '140071': '[PG电子]赢财神', '140074': '[PG电子]麻将胡了2', '140075': '[PG电子]福运象财神', '140079': '[PG电子]澳门壕梦', '140082': '[PG电子]凤凰传奇', '140083': '[PG电子]火树赢花', '140084': '[PG电子]赏金女王', '140087': '[PG电子]寻宝黄金城', '140089': '[PG电子]招财喵', '140091': '[PG电子]冰火双娇', '140095': '[PG电子]宝石传奇', '140100': '[PG电子]糖心风暴', '140104': '[PG电子]亡灵大盗', '140105': '[PG电子]霹雳神偷', '140106': '[PG电子]麒麟送宝', '140119': '[PG电子]百鬼夜行', '140121': '[PG电子]日月星辰', '140122': '[PG电子]神鹰宝石',
'30016': '[CQ9电子]六颗扭蛋', '30022': '[CQ9电子]跳高高', '30023': '[CQ9电子]跳起来', '30025': '[CQ9电子]跳高高2', '30026': '[CQ9电子]五福临门', '30028': '[CQ9电子]鸿福齐天', '30029': '[CQ9电子]武圣', '30030': '[CQ9电子]宙斯', '30031': '[CQ9电子]蹦迪', '30032': '[CQ9电子]跳过来', '30033': '[CQ9电子]直式蹦迪', '30034': '[CQ9电子]单手跳高高', '30037': '[CQ9电子]跳起来2', '30038': '[CQ9电子]野狼Disco', '30040': '[CQ9电子]六颗糖', '30041': '[CQ9电子]直式洪福齐天', '30043': '[CQ9电子]发财神2', '30044': '[CQ9电子]金鸡报喜', '30045': '[CQ9电子]东方神起', '30046': '[CQ9电子]火烧连环船2', '30086': '[CQ9电子]血之吻',
'8003': '[JDB电子]变脸', '8006': '[JDB电子]台湾黑熊', '8015': '[JDB电子]月光秘宝', '8020': '[JDB电子]芝麻开门', '8044': '[JDB电子]江山美人', '8047': '[JDB电子]变脸2', '8048': '[JDB电子]芝麻开门2', '14006': '[JDB电子]亿万富翁', '14016': '[JDB电子]王牌特工', '14030': '[JDB电子]三倍金刚', '14033': '[JDB电子]飞鸟派对', '14035': '[JDB电子]龙舞', '14041': '[JDB电子]雷神之锤', '14061': '[JDB电子]玛雅金疯狂', '14042': '[JDB电子]聚宝盆', '514079': '[JDB电子]富豪哥2',
}
spin_betslip_gametype = {'3_14':'PG 电子','3_3':'CQ9 电子','3_5':'JDB 电子'}
class ThreadProgress(threading.Thread):
def __init__(self, cf, mod_key, detail=True):
super().__init__()
self.lst = []
self.cf = cf
self.mod_key = mod_key
self.running = True
self.detail = detail
def stop(self):
self.running = False
def run(self):
from gui.Apps import return_schedule
# 新版进度回传API保留所有进度状态, 全部回传
if self.detail:
while self.running or self.lst:
try:
time.sleep(.1)
item = self.lst.pop(0)
return_schedule(self.cf, self.mod_key, **item)
except IndexError as e:
continue
# 旧版进度回传API保留最后一个进度状态, 只回传最后一个
else:
while self.running:
try:
time.sleep(1)
item = self.lst.pop()
self.lst.clear()
return_schedule(self.cf, self.mod_key, **item)
except IndexError as e:
continue
# 装饰器, 加上后每隔60秒会延长一次liveTime
def keep_connect(func):
class Keep(threading.Thread):
def __init__(self, cf):
super().__init__()
self.cf = cf
def run(self):
from gui.Apps import keep_connect
t = threading.current_thread()
while getattr(t, 'running', True) and self.cf['token']:
keep_connect(self.cf)
time.sleep(55)
@wraps(func)
def wrapper(*args, **kwargs):
# 读取说明文件第一行, 作为函数名后续纪录log使用
funcname = func.__doc__.split('\n')[0]
# 对应传入参数, 产生参数字典
sig = signature(func)
bound = sig.bind_partial(*args, **kwargs)
bound.apply_defaults()
arguments = bound.arguments
# 建立执行续进行保持连线
cf = arguments['cf']
t = Keep(cf)
t.start()
# 执行功能
result = func(*args, **kwargs)
# 停止执行续
t.running = False
return result
return wrapper
class NotSignError(Exception):
'''自定义当出现账号被登出时, 自动登入GPK平台'''
pass
class NullError(Exception):
'''自定义当出现账号被登出时, 自动登入GPK平台'''
pass
def log_info(func):
@wraps(func)
def wrapper(*args, **kwargs):
# 读取说明文件第一行, 作为函数名后续纪录log使用
funcname = func.__doc__.split('\n')[0]
# 对应传入参数, 产生参数字典
sig = signature(func)
bound = sig.bind_partial(*args, **kwargs)
bound.apply_defaults()
arguments = bound.arguments
# 纪录挡排除参数阵列
exclud_args = ["url", "endpoints", "timeout", "args", "kwargs"]
# 纪录参数
logger.info(f'{funcname} 网址: {arguments.get("url")}{arguments.get("endpoints")}')
# logger.info(f'{funcname} 参数: {dict((k, v) for k, v in arguments.items() if k not in exclud_args)}')
# 执行函数
result = func(*args, **kwargs)
# 有错误则打印整串返回的内容, 打印完后将原始资料删除
if result["IsSuccess"] is False and result['ErrorCode'] != config.SUCCESS_CODE.code:
if result.get('RawStatusCode'):
logger.warning(f"网页原始状态码为: {result.get('RawStatusCode')}")
if result.get('RawContent'):
logger.warning(f"网页原始内容为: {result.get('RawContent')}")
if result.get('RawStatusCode'):
del result["RawStatusCode"]
if result.get('RawContent'):
del result["RawContent"]
# 纪录结果
logger.info(f'{funcname} 返回: {werdsazxc.Dict(result)}')
return result
return wrapper
#检查系统传过来的参数型别是否正常
def check_type(cls):
@wraps(cls)
def wrapper(*args, **kwargs):
from .mission import BaseFunc
# 对应传入参数, 产生参数字典
sig = signature(cls)
bound = sig.bind_partial(*args, **kwargs)
bound.apply_defaults()
arguments = bound.arguments.get('kwargs')
system_dict = {**BaseFunc.Meta.system_dict, **cls.Meta.return_value['include']}
[system_dict.pop(i,None) for i in cls.Meta.return_value['exclude']]
rep = [k for k, v in arguments.items() if k in system_dict and type(v) != system_dict[k]]
if rep:
logger.warning(f"型别异常参数: {rep}")
return_content = {
'IsSuccess': False,
'ErrorCode': config.PARAMETER_ERROR.code,
'ErrorMessage': config.PARAMETER_ERROR.msg
}
for i in cls.Meta.return_value['data']:
if i in arguments:
return_content[i] = arguments[i]
elif i in ['BetAmount','AllCategoryCommissionable','GameCommissionable','SingleCategoryCommissionable']:
return_content[i] = '0.00'
else:
return_content[i] = ''
return return_content
# 执行函数
result = cls(*args, **kwargs)
return result
return wrapper
def catch_exception(func):
@wraps(func)
def wrapper(*args, **kwargs):
# 读取说明文件第一行, 作为函数名后续纪录log使用
funcname = func.__doc__.split('\n')[0]
# 对应传入参数, 产生参数字典
sig = signature(func)
bound = sig.bind_partial(*args, **kwargs)
bound.apply_defaults()
arguments = bound.arguments
# 计算错误次数
count = 1
cf = arguments['cf']
while count <= cf['retry_times']:
try:
result = func(*args, **kwargs)
break
# 检查schema是否输入
except requests.exceptions.MissingSchema as e:
logger.debug(f'{e.__class__.__name__} {e}')
return {
'IsSuccess': False,
'ErrorCode': config.EXCEPTION_CODE.code,
'ErrorMessage': f'平台设定错误, 通讯协定(http或https)未输入',
}
# 检查schema是否合法
except requests.exceptions.InvalidSchema as e:
logger.debug(f'{e.__class__.__name__} {e}')
return {
'IsSuccess': False,
'ErrorCode': config.EXCEPTION_CODE.code,
'ErrorMessage': f'平台设定错误, 通讯协定(http或https)无法解析',
}
# 检查网址是否合法
except requests.exceptions.InvalidURL as e:
logger.debug(f'{e.__class__.__name__} {e}')
return {
'IsSuccess': False,
'ErrorCode': config.EXCEPTION_CODE.code,
'ErrorMessage': f'平台设定错误, 无法解析',
}
# 发生重导向异常
except requests.exceptions.TooManyRedirects as e:
logger.debug(f'{e.__class__.__name__} {e}')
return {
'IsSuccess': False,
'ErrorCode': config.EXCEPTION_CODE.code,
'ErrorMessage': f'平台设定错误, 发生重导向异常',
}
# 捕捉被登出
except (NotSignError, json.JSONDecodeError) as e:
from .module import session
from .module import login
if type(e) == json.JSONDecodeError:
logger.info(f'json.JSONDecodeError: {e.doc}')
if (cf['need_backend_otp'] is False and
hasattr(session, 'url') and
hasattr(session, 'acc') and
hasattr(session, 'pw')):
login(cf=cf, url=session.url, acc=session.acc, pw=session.pw, otp='')
continue
return {
'IsSuccess': False,
'ErrorCode': config.SIGN_OUT_CODE.code,
'ErrorMessage': config.SIGN_OUT_CODE.msg.format(platform=cf.platform),
}
#
except NullError as e:
if count < cf['retry_times']:
time.sleep(1)
continue
return {
'IsSuccess': False,
'ErrorCode': config.SIGN_OUT_CODE.code,
'ErrorMessage': config.SIGN_OUT_CODE.msg.format(platform=cf.platform),
}
# key error
except KeyError as e:
logger.debug(f'{e.__class__.__name__} {e}')
return {
'IsSuccess': False,
'ErrorCode': config.EXCEPTION_CODE.code,
'ErrorMessage': config.EXCEPTION_CODE.msg,
}
# 捕捉连线异常
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.ContentDecodingError) as e:
logger.debug(f'{e.__class__.__name__} {e}')
logger.debug(f'{e.__class__.__name__} ({count}/3)...')
if count >= 3:
return {
'IsSuccess': False,
'ErrorCode': config.CONNECTION_CODE.code,
'ErrorMessage': config.CONNECTION_CODE.msg,
}
if f'与{cf.get("platform")}连线异常...' not in cf['error_msg']:
cf['error_msg'].append(f'与{cf.get("platform")}连线异常...')
time.sleep(3)
count += 1
# 字段异常、无权限等
except IndexError as e:
logger.debug(f'{e.__class__.__name__} {e}')
local_envs = inspect.getinnerframes(e.__traceback__)[-1].frame.f_locals
resp = local_envs.get('resp')
if resp:
status_code = resp.status_code
content = resp.content
else:
status_code = '函数未设定resp变数, 请修改变数命名规则'
content = '函数未设定resp变数, 请修改变数命名规则'
return {
'IsSuccess': False,
'ErrorCode': config.HTML_CONTENT_CODE.code,
'ErrorMessage': config.HTML_CONTENT_CODE.msg,
'RawStatusCode': resp.status_code,
'RawContent': resp.content
}
# 捕捉程式错误
except Exception as e:
werdsazxc.log_trackback()
local_envs = inspect.getinnerframes(e.__traceback__)
local_envs = [frame for frame in local_envs if frame.function == func.__name__]
if local_envs:
local_envs = local_envs[-1].frame.f_locals
resp = local_envs.get('resp')
if resp:
status_code = resp.status_code
content = resp.content
else:
status_code = '函数未设定resp变数, 请修改变数命名规则'
content = '函数未设定resp变数, 请修改变数命名规则'
else:
status_code = ''
content = ''
return {
'IsSuccess': False,
'ErrorCode': config.EXCEPTION_CODE.code,
'ErrorMessage': f'未知异常- {e.__class__.__name__}: {e}',
'RawStatusCode': status_code,
'RawContent': content
}
return result
return wrapper
|
gleam542/platforms
|
platforms/wg/utils.py
|
utils.py
|
py
| 16,213 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
36108675828
|
from calendar import weekday
import os
import zipfile
import numpy as np
import torch
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
# if dataset == "alpha":
# if (not os.path.isfile("DC_STGCN/data/adj_mat_alpha.npy")
# or not os.path.isfile("DC_STGCN/data/node_values_alpha.npy")):
with zipfile.ZipFile("DC_STGCN/data/SCATS_alpha.zip", 'r') as zip_ref:
zip_ref.extractall("DC_STGCN/data/")
X_alpha = np.load("DC_STGCN/data/alpha_data/node_values_alpha.npy").transpose((1, 2, 0))
X_alpha = X_alpha.astype(np.float32)
# elif dataset == "bravo":
# if (not os.path.isfile("DC_STGCN/data/adj_mat_bravo.npy")
# or not os.path.isfile("DC_STGCN/data/node_values_bravo.npy")):
with zipfile.ZipFile("DC_STGCN/data/SCATS_bravo.zip", 'r') as zip_ref:
zip_ref.extractall("DC_STGCN/data/")
# A = np.load("DC_STGCN/data/bravo_data/adj_mat_bravo.npy")
# A = A.astype(np.float32)
X_bravo = np.load("DC_STGCN/data/bravo_data/node_values_bravo.npy").transpose((1, 2, 0))
X_bravo = X_bravo.astype(np.float32)
print(X_alpha.shape)
print(X_bravo.shape)
X_196 = X_alpha[2:5, :, :]
print(X_196.shape)
X_bravoplus = np.concatenate((X_bravo, X_196), axis=0)
print(X_bravoplus.shape)
X_bravoplus = X_bravoplus.transpose((2, 0, 1))
np.save("interpret_csv_bravoplus/node_values_bravoplus", X_bravoplus)
files_string = "TO BE CONFIGURED"
f = open("interpret_csv_bravoplus/nv_info.txt", "w")
info_string = "Num Juncs:\t" + str(X_bravoplus.shape[1]) + "\nNum Channels:\t" + str(X_bravoplus.shape[2]) + "\nNum Days:\t" + str(X_bravoplus.shape[0]/480)
print(info_string)
f.write(info_string)
f.write(files_string)
f.close()
if os.path.isfile("interpret_csv_bravoplus/adj_mat_bravoplus.npy") and os.path.isfile("interpret_csv_bravoplus/adj_info.txt"):
with zipfile.ZipFile("interpret_csv_bravoplus/SCATS_bravoplus.zip", "w") as zip_object:
zip_object.write("interpret_csv_bravoplus/node_values_bravoplus.npy", arcname="bravoplus_data/node_values_bravoplus.npy")
zip_object.write("interpret_csv_bravoplus/adj_mat_bravoplus.npy", arcname="bravoplus_data/adj_mat_bravoplus.npy")
zip_object.write("interpret_csv_bravoplus/adj_info.txt", arcname="bravoplus_data/adj_info.npy")
zip_object.write("interpret_csv_bravoplus/nv_info.txt", arcname="bravoplus_data/nv_info.npy")
print("Zipped")
|
oscarcrowley1/thesis
|
interpret_csv_bravoplus/make_bravoplus_tensor.py
|
make_bravoplus_tensor.py
|
py
| 2,378 |
python
|
en
|
code
| 2 |
github-code
|
6
|
24287753323
|
def consecutive_bits(number):
result = 0
count = 0
while number > 0:
modulus = number % 2
if modulus == 1:
count += 1
else:
count = 0
if count > result:
result = count
number //= 2
return result
def main():
assert consecutive_bits(156) == 3
if __name__ == '__main__':
main()
|
ckallum/Daily-Coding-Problem
|
solutions/#214.py
|
#214.py
|
py
| 379 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9781702318
|
def insertionSort(tab,l,r):
dl=len(tab)
for i in range(l+1,r+1):
klucz=tab[i]
j=i-1
while j>=0 and klucz<tab[j]:
tab[j+1],tab[j]=tab[j],tab[j+1]
j-=1
tab[j+1]=klucz
def partition(tab,p,r):
x=tab[r]
i=p-1
for j in range(p,r):
if tab[j]<=x:
i+=1
tab[i],tab[j]=tab[j],tab[i]
tab[i+1],tab[r]=tab[r],tab[i+1]
return i+1
def select_five(L, left, right, k):
# 1. Jeżeli jest mało elementów, to sortuj.
# p powinno być co najmniej 5, aby ustawiło się i > 0.
p = 5 # może być kilkadziesiąt
if (right-left+1) < p:
insertionSort(L, left, right)
return left+k-1 # zwracam indeks
# 2. Podziel listę na 5-elementowe podzbiory, najwyżej jeden 4-elementowy.
# 3. Posortuj podzbiory.
left2 = left
right2 = left + 4
i = left # pierwszy wolny
while right2 <= right:
insertionSort(L, left2, right2)
print(left2,right2,L)
# Przerzucamy mediany na początek tablicy.
# swap(L, i, left2+2)
L[i], L[left2 + 2] = L[left2 + 2], L[i]
i += 1
left2 += 5
right2 += 5
# Tu można posortować zbiory mniej niż 5-elementowe.
if right2 == right+1 or right2 == right+2:
insertionSort(L, left2, right)
# swap(L, i, left2+1)
L[i], L[left2+1] = L[left2+1], L[i]
i += 1
# 5. Wyznaczamy medianę median rekurencyjnie.
median_idx = select_five(L, left, i-1, (i-left+1) // 2)
L[median_idx],L[right]=L[right],L[median_idx]
if left==right:
return L[left]
pivot = partition(L, left, right)
if k == pivot:
return L[pivot] # zwracam indeks
elif pivot < k:
return select_five(L, pivot+1, right, k)
else:
return select_five(L, left, pivot-1, k)
# tab=[3,6,2,6,4,44,7,1]
tab=[3,6,2,6,4,44,7,1,76,4365,76,437,6,5,2,2,365,88,0,65,45]
# print(len(tab)-1)
print(select_five(tab,0,len(tab)-1,8))
|
wiksat/AlghorithmsAndDataStructures
|
ASD/Algorithms/magicFives.py
|
magicFives.py
|
py
| 2,002 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
38902490932
|
import scrapy
from sanic.log import logger
reviews_tag_identifier = "//span[@data-hook='review-body']/span/text()"
next_page_tag_identifier = "//li[@class='a-last']/a/@href"
class AmazonSpider(scrapy.Spider):
name = 'amazon_spider'
allowed_domains = ['amazon.in']
def __init__(self, name=None, uid=None, asin=None):
super().__init__(name)
if not asin:
logger.warning('| amazon spider | asin param was null - it should not be null')
raise Exception('asin should not be null')
self.asin = asin
self.uid = uid
self.start_urls = [
f'https://www.amazon.in/product-reviews/{asin}'
]
self.reviews = []
self.base_url = 'https://www.google.com'
def parse(self, response, **kwargs):
logger.info('| amazon spider | started parsing the reviews')
reviews_dom = response.xpath(reviews_tag_identifier)
if reviews_dom:
for review in reviews_dom:
self.reviews.append(
review.get()
)
else:
logger.warning('| amazon spider | No reviews element in the html page')
next_page = response.xpath(next_page_tag_identifier).get()
if next_page:
yield scrapy.Request(
response.urljoin(next_page), self.parse
)
else:
yield {
'meta-data': {
'uid': self.uid,
'asin': self.asin,
'is_preprocessed': False,
'total_reviews': len(self.reviews),
},
'reviews': self.reviews
}
|
Mahi-developer/review-analyzer
|
app/scrapper/spiders/amazon_spider.py
|
amazon_spider.py
|
py
| 1,683 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.