seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
29486183593
|
# Exercise 5: This program records the domain name (instead of the address)
# where the message was sent from instead of who the mail came from (i.e.,
# the whole email address). At the end of the program, print out the contents
# of your dictionary.
# python schoolcount.py
# Enter a file name: mbox-short.txt
# {'media.berkeley.edu': 4, 'uct.ac.za': 6, 'umich.edu': 7,
# 'gmail.com': 1, 'caret.cam.ac.uk': 1, 'iupui.edu': 8}
file_name = input('Enter the file name with a correct extension: ')
email_domain = {}
file_list = []
with open(file_name) as f:
for line in f:
file_list = line.split()
if line.startswith('From'):
mail = file_list[1]
# print(mail)
domain = mail.split('@')[1]
if domain not in email_domain:
email_domain[domain] = 1
else:
email_domain[domain] += 1
print(email_domain)
|
tuyojr/pythonUdacity
|
exercises/dictVIII.py
|
dictVIII.py
|
py
| 915 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25418252680
|
from sys import stdin
nums = list(map(int, stdin.readline().split()))
n = nums[0]
m = nums[1]
board = []
for _ in range(n):
board.append(list(map(int, stdin.readline().rstrip())))
dist = [[-1] * m for _ in range(n)]
dir = [(0, 1), (1, 0), (0, -1), (-1, 0)]
queue = []
dist[0][0] = 0
queue.append((0, 0))
while len(queue) > 0:
cur = queue.pop(0)
for k in range(4):
nx = cur[0] + dir[k][0]
ny = cur[1] + dir[k][1]
if nx < 0 or nx >= n or ny < 0 or ny >= m:
continue
if board[nx][ny] == 0 or dist[nx][ny] >= 0:
continue
dist[nx][ny] = dist[cur[0]][cur[1]] + 1
queue.append((nx, ny))
print(dist[n - 1][m - 1] + 1)
|
jaehui327/Algo
|
백준/Silver/2178. 미로 탐색/미로 탐색.py
|
미로 탐색.py
|
py
| 701 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7663203620
|
import numpy as np
import sounddevice as sd
from scipy.io.wavfile import write
from isort import file
from tools import get_device_number
# 録音の設定
fs = 48000 # サンプリング周波数
duration = 5 # 録音時間(秒)
channels = 7
device_num = get_device_number("Azure Kinect") # マイクロフォンアレイのデバイス番号
# マイクロフォンアレイからの録音
print("録音開始...")
audio_data = sd.rec(int(duration * fs), samplerate=fs, device=device_num,
channels=channels, dtype='float64')
sd.wait() # 録音が終了するまで待つ
print("録音終了")
# 各マイクロフォンのデータを別々のファイルに保存
for i in range(audio_data.shape[1]):
filename = f"mic_{i+1}.wav"
write(filename, fs, audio_data[:, i])
print(f"{filename}に保存しました")
# マイクロフォンアレイのデータをファイルに保存
filename = "mic_array.wav"
write(filename, fs, audio_data)
print(f"{filename}に保存しました")
|
fkfk21/Enjoy_Azure_Kinect
|
scripts/record_audio.py
|
record_audio.py
|
py
| 1,024 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
1381693048
|
from decimal import Decimal
array = [1, 2, 3, 4, 5]
# define function for find min in array for elements than 64 bits
def min_array(arr):
min_value = Decimal("Infinity")
for element in arr:
if element < min_value:
min_value = element
return min_value
# define function for find max in array for elements than 64 bits
def max_array(arr):
max_value = Decimal("-Infinity")
for element in arr:
if element > max_value:
max_value = element
return max_value
# define function for find even elements in array for elements than 64 bits
def even_find(arr):
even_elements = ''
for element in arr:
if element % 2 == 0:
even_elements += (str(element) + ' ')
print("Even elements:", even_elements)
# define function for find odd elements in array for elements than 64 bits
def odd_find(arr):
odd_elements = ''
for element in arr:
if element % 2 == 1:
odd_elements += (str(element) + ' ')
print("Odd elements:", odd_elements)
# define function for find min max
def min_max_find(arr):
sum_array = sum(map(int, arr)) # sum for 64bit
min_total_4_elements = sum_array - max_array(arr)
max_total_4_elements = sum_array - min_array(arr)
print(min_total_4_elements, '', max_total_4_elements)
min_max_find(array)
even_find(array)
odd_find(array)
|
chituenguyen/review_test_algorithms
|
minMaxFind.py
|
minMaxFind.py
|
py
| 1,383 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71315302907
|
import requests,sys
import requests
from requests.structures import CaseInsensitiveDict
sys.path.insert(1,".")
from config import TODIST_API_KEY
#Rate Limit: 1 per 2 seconds.
headers = CaseInsensitiveDict()
headers["Accept"] = "application/json"
headers["Authorization"] = "Bearer " + str(TODIST_API_KEY)
#This function gets all the tasks in the "Tasks" project which is the the project to be used.
def getTasks():
#Get all the tasks in project and turn it into a list of dicts.
tasks = requests.get('https://api.todoist.com/rest/v1/tasks?project_id=2208003845',headers=headers).json()
tasksPretty = []
#Filter out all the useless data
for tasks in tasks:
temp = {}
temp["Task"] = tasks["content"]
temp["Description"] = tasks["description"]
temp["Priority"] = tasks["priority"]
temp["Due Date"] = tasks["due"]["date"]
tasksPretty.append(temp)
return tasksPretty
|
leekycauldron/statusAPI
|
TodoistApps/createTask.py
|
createTask.py
|
py
| 937 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32129407183
|
import json
import bitarray
from find_recipes_simple import process_recipe
import time
import sys
PROCESS_SIZE = 50000000
def execute_find_recipe(item_str, recipe_data):
if not item_str:
return 0, 0
hp_crit, hp, price = process_recipe(recipe_data, item_str)
# crit increases hp by 12, we just need to store if it's different
crit_different = hp_crit != hp
main_data = ((price << 7) + hp) & 0xFFFF
return main_data, crit_different
NUM_INGR = 5
def array2d(first_order, second_order):
array = [None] * first_order
for i in range(first_order):
array[i] = [0] * second_order
return array
class RecipeIterator:
def __init__(self, id_data, start, end):
self.current = start
self.end = end
self.id_data = id_data
self.num_items = len(id_data)
data = array2d(NUM_INGR+1, self.num_items+1)
bino = array2d(self.num_items+NUM_INGR, NUM_INGR+1)
# binomial(n, k), k<=NUM_INGR is bino[n][k]
# Compute binomial with dynamic programming
for n in range(self.num_items+NUM_INGR):
bino[n][0] = 1
for k in range(NUM_INGR+1):
bino[k][k] = 1
for n in range(1,self.num_items+NUM_INGR):
for k in range(1, NUM_INGR+1):
bino[n][k] = bino[n-1][k-1] + bino[n-1][k]
# data[i][m] is size of choosing i ingredients from m, so bino[i+m-1][i]
for m in range(self.num_items+1):
data[0][m] = 1
for i in range(1, NUM_INGR+1):
for m in range(self.num_items+1):
data[i][m] = bino[i+m-1][i]
self.data = data
self.total = data[NUM_INGR][self.num_items]
def get_total(self):
return self.total
def __iter__(self):
return self
def __next__(self):
if self.current >= self.end:
raise StopIteration
input = self.current
self.current += 1
rest_items = self.num_items
items = []
good = False
for item in range(NUM_INGR):
index = 0
for m in range(self.num_items-rest_items+1, self.num_items+1):
if index + self.data[NUM_INGR-1-item][self.num_items-m+1] > input:
items.append(m-1)
good = True
break
index += self.data[NUM_INGR-1-item][self.num_items-m+1]
if not good:
break
rest_items=self.num_items-items[item]
input -= index
if good:
items = [self.id_data[i] for i in items if i != 0]
return ",".join(items)
else:
raise StopIteration
sample = "[08]========================= 100% "
def run_dump(part, is_multi=True):
screen_x = (part % 4) * 38 + 1
screen_y = int(part/4) + 1
part_str = f"[0{part}]" if part < 10 else f"[{part}]"
def update_progress(permillage):
percentage = int(permillage/10)
if percentage >= 100:
progress_bar = "="*25
else:
progress_bar = "="*int(percentage/4)+">"
if is_multi:
sys.stdout.write("\x1b7\x1b[%d;%df%s\x1b8" % (screen_y, screen_x, f"{part_str}{progress_bar} {permillage/10}%"))
else:
print(f"\r{part_str}{progress_bar} {permillage/10}%", end="")
sys.stdout.flush()
# Load the items
with open("../ids.json", "r", encoding="utf-8") as ids_file:
id_data_dict = json.load(ids_file)
id_data = []
for k in id_data_dict:
id_data.append(id_data_dict[k])
with open("recipeData.json", "r", encoding="utf-8") as recipe_file:
recipe_data = json.load(recipe_file)
recipes = RecipeIterator(id_data, part*PROCESS_SIZE,(part+1)*PROCESS_SIZE)
crit_buffer = bitarray.bitarray(endian='little')
progress = 0
permillage = 0
update_progress(0)
with open(f"parts/main{part}.db", "wb") as main_db:
for recipe in recipes:
main_data, crit_flag = execute_find_recipe(recipe, recipe_data)
crit_buffer.append(crit_flag)
main_db.write(bytearray(main_data.to_bytes(2, "big")))
progress += 1
new_permillage = int(progress*1000/PROCESS_SIZE)
if new_permillage != permillage:
update_progress(new_permillage)
permillage = new_permillage
update_progress(1000)
with open(f"parts/crit{part}.db", "wb") as crit_db:
crit_db.write(crit_buffer.tobytes())
if not is_multi:
print()
if __name__ == "__main__":
run_dump(int(sys.argv[1]), False)
|
iTNTPiston/botw-recipe
|
dump/dump.py
|
dump.py
|
py
| 4,729 |
python
|
en
|
code
| 1 |
github-code
|
6
|
28808365461
|
import socket
class Network:
def __init__(self):
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server = socket.gethostname()
self.port = 5555
self.addr = (self.server, self.port)
self.connect()
def getPlayer(self):
return self.player
def connect(self):
try:
self.client.connect(self.addr)
self.player = int(self.client.recv(2048).decode())
except:
pass
def send(self, data):
try:
self.client.send(str.encode(data))
res = self.client.recv(2048).decode()
return res
except socket.error as e:
print(e)
def close(self):
try:
self.client.send(str.encode("close"))
except socket.error as e:
print(e)
# n = Network()
# n.close()
# tup = (20,20)
# print(n.send(f"move.{tup}"))
|
yuvayt/PythonPixelGame
|
network.py
|
network.py
|
py
| 986 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72355972669
|
#!/usr/bin/python3
"""Module insert a line to a file"""
def append_after(filename="", search_string="", new_string=""):
"""Initialization of inserting a line of text to a file
after each line containing a specific string.
Args:
filename (str): file name
search_string (str): text to seach for
new_string (str): text to replaced when found
"""
current_string = ""
with open(filename, encoding="utf-8") as fr:
for line in fr:
current_string += line
if search_string in line:
current_string += new_string
with open(filename, "w", encoding="utf-8") as fw:
fw.write(current_string)
|
MrZooM001/alx-higher_level_programming
|
0x0B-python-input_output/100-append_after.py
|
100-append_after.py
|
py
| 689 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30884940545
|
from zope.event import notify
from zope.component import adapts, queryUtility
from zope.interface import implements, alsoProvides
from getpaid.core.interfaces import ILineItemFactory, IShoppingCart
from getpaid.core.item import PayableLineItem, RecurringLineItem
from pfg.donationform.interfaces import IDonationFieldSet, DonationCreatedEvent, IDonationCart
from Products.CMFPlone.utils import safe_unicode
try:
from zope.intid.interfaces import IIntIds
IIntIds
except ImportError:
IIntIds = None
try:
from Products.PloneGetPaid import sessions
sessions
except ImportError:
sessions = None
class DonationFieldLineItemFactory(object):
implements(ILineItemFactory)
adapts(IShoppingCart, IDonationFieldSet)
def __init__(self, cart, field):
self.cart = cart
self.field = field
form = field.REQUEST.form
fname = self.field.getId()
self.amount = form.get(fname + '_level')
if not self.amount:
self.amount = form.get(fname + '_amount', '0')
self.amount = self.amount.lstrip('$')
self.is_recurring = form.get(fname + '_recurring', False)
self.occurrences = form.get(fname + '_occurrences', 9999)
def create(self):
pfg = self.field.aq_parent
if self.is_recurring:
item = RecurringLineItem()
item.interval = 1
item.unit = 'months'
item.total_occurrences = self.occurrences
else:
item = PayableLineItem()
item.item_id = self.field.UID()
if IIntIds:
intid_utility = queryUtility(IIntIds)
if intid_utility:
item.uid = intid_utility.register(self.field)
item.name = safe_unicode(pfg.Title())
item.cost = float(self.amount)
item.quantity = 1
# Clear the cart before adding the donation.
# We don't want to surprise users by charging them for something
# they didn't realize they were buying!
for key in self.cart.keys():
del self.cart[key]
self.cart[item.item_id] = item
alsoProvides(self.cart, IDonationCart)
notify(DonationCreatedEvent(self.cart))
try:
sessions.set_came_from_url(pfg)
except:
pass
return item
|
collective/pfg.donationform
|
pfg/donationform/cart.py
|
cart.py
|
py
| 2,366 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28173684280
|
# importing all libraries
import cadquery as cq
from cadquery import exporters
import pyaudio
import json
from vosk import Model, KaldiRecognizer
import pyttsx3
from tkinter import *
from PIL import ImageTk, Image
import tkinter.font as TkFont
import os
from word2number import w2n
#loading vosk ml audio recognition model
model = Model(
r"path\to\any\vosk\voice recognition model"
)
model = KaldiRecognizer(model, 16000)
mic = pyaudio.PyAudio()
stream = mic.open(format=pyaudio.paInt16,
channels=1,
rate=16000,
input=True,
frames_per_buffer=8192)
# initialize Text-to-speech engine
engine = pyttsx3.init()
voices = engine.getProperty('voices') #getting details of current voice
engine.setProperty('voice', voices[1].id)
engine.setProperty('rate', 150)
def say(text):
engine.say(text)
engine.runAndWait()
stream.start_stream()
#default name
name = "any_default_name"
coldstart=True
def record_audio():
while True:
data = stream.read(44100, exception_on_overflow=False)
if model.AcceptWaveform(data):
text = json.loads(model.Result())["text"]
try:
if len(text.split()) > 0:
return text
except:
continue
def introduce():
say("Welcome everyone to the interactive interface. I hope you are doing well"
)
def voice_set():
z = clicked.get()
if z[-1] == "e":
engine.setProperty('voice', voices[0].id)
elif z[-1] == "1":
engine.setProperty('voice', voices[1].id)
else:
engine.setProperty('voice', voices[2].id)
say("Voice changed successfully")
def entry_box_update(text):
e.delete(0, END)
e.insert(0, text)
root.update()
def end_instance():
b.configure(text="Instance ended. Click to start again.",
bg='red',
fg='white')
root.update()
def finalize_design(result):
exporters.export(result,"output.step")
# end_instance()
os.startfile("output.step")
def start():
#initialize name to change
global name
global coldstart
stop = False
e.delete(0, END)
b.configure(text="Instance started", bg='green', fg='white')
root.update()
temp = "New Instance started successfully."
if coldstart:
temp+=" You can take my name, " + str(
name) + " to start interacting."
entry_box_update(temp)
say(temp)
#wait till name is taken
while coldstart:
text = record_audio()
entry_box_update(text)
text = " " + text + " "
if name in text:
temp = "Welcome, my name is " + str(name) + ". How may I help you?"
entry_box_update(temp)
say(temp)
coldstart=False
break
if " stop " in text or " end " in text:
stop = True
say("Ok, ending the instance")
end_instance()
break
if "repeat" in text:
temp = "New Instance started successfully. You can take my name, " + str(
name) + " to start interacting."
entry_box_update(temp)
say(temp)
while not stop:
text = record_audio()
entry_box_update(text)
text = " " + text + " "
if "repeat" in text:
temp = "Welcome, my name is " + str(name) + ". How may I help you?"
entry_box_update(temp)
say(temp)
if " end " in text or " stop " in text:
say("Ok, ending the instance")
end_instance()
break
if " name " in text:
say("Ok, tell me my new name")
temp = record_audio()
name = temp
n2.delete(0, END)
n2.insert(0, temp)
say("ok my name is " + str(temp))
coldstart=True
start()
end_instance()
break
#shapes start here
if "cube" in text:
say("OK, designing a cube")
result = cq.Workplane("XY").box(1, 1, 1)
finalize_design(result)
# break
if "cylinder" in text:
say("OK, designing a cylinder")
result = cq.Workplane("XY").circle(10).extrude(50)
finalize_design(result)
# break
if "cuboid" in text:
say("OK, designing a cuboid")
result = cq.Workplane("XY").box(5, 10, 20)
finalize_design(result)
# break
if "column" in text:
say("OK, designing a column")
(L, H, W, t) = (100.0, 20.0, 20.0, 1.0)
pts = [
(0, H / 2.0),
(W / 2.0, H / 2.0),
(W / 2.0, (H / 2.0 - t)),
(t / 2.0, (H / 2.0 - t)),
(t / 2.0, (t - H / 2.0)),
(W / 2.0, (t - H / 2.0)),
(W / 2.0, H / -2.0),
(0, H / -2.0),
]
result = cq.Workplane("front").polyline(pts).mirrorY().extrude(L)
finalize_design(result)
# break
if "box" in text:
say("OK, designing a box")
result = cq.Workplane("front").box(2, 2, 2).faces("+Z").shell(0.05)
finalize_design(result)
# break
if "cone" in text:
say("Ok, designing a cone")
result = (cq.Workplane("front").box(4.0, 4.0, 0.25).faces(">Z").circle(1.5).workplane(offset=3.0).rect(0.75, 0.5).loft(combine=True))
finalize_design(result)
# break
if "spring" in text:
say("Ok, designing a spring")
r = 0.5 # Radius of the helix
p = 0.4 # Pitch of the helix - vertical distance between loops
h = 2.4 # Height of the helix - total height
# Helix
wire = cq.Wire.makeHelix(pitch=p, height=h, radius=r)
helix = cq.Workplane(obj=wire)
# Final result: A 2D shape swept along a helix.
result = (
cq.Workplane("XZ") # helix is moving up the Z axis
.center(r, 0) # offset isosceles trapezoid
.polyline(((-0.15, 0.1), (0.0, 0.05), (0, 0.35), (-0.15, 0.3)))
.close() # make edges a wire
.sweep(helix, isFrenet=True) # Frenet keeps orientation as expected
)
finalize_design(result)
# break
def delet(dummy):
n2.delete(0, END)
def change_name():
global name
coldstart=True
name = n2.get()
say("Changed name to " + str(name) + " successfully")
#initialize
root = Tk()
# root.geometry("1280x720")
#structures
root.title("Automated Engine Design using Machine Learning")
root.iconbitmap(r"path\to\ico\file")
#logo
myimg = ImageTk.PhotoImage(Image.open("logo\path"))
DMCE_logo = Label(image=myimg, bg="white")
DMCE_logo.grid(row=1, column=1, rowspan=2)
#title label
title_label = Label(
root,
text=
"'Automated Design using Voice Recognition'",
font=TkFont.Font(family="Times New Roman", size=24, weight="bold"),
).grid(row=1, column=2, columnspan=5, padx=10, pady=10, sticky=W + E)
#subtitle label
subtitle_label = Label(
root,
text="Python Project AY:2022-2023",
font=TkFont.Font(family="Times New Roman", size=15),
bd=1
).grid(
row=2,
column=2,
# padx=10,
# pady=10,
columnspan=5,
sticky=W + E)
#desclabel
desc_label = Label(
root,
text=
"\tThis application has been developed as an interface for 'Automated Design using Voice Recognition'.",
font=TkFont.Font(family="Times New Roman", size=12),
bd=1,
anchor=E,
justify="left").grid(row=3, column=2, columnspan=5, sticky=W + E)
#buttons below description
it = Button(root, text="Introduction", command=introduce)
it.grid(row=4, column=2, pady=10)
#options tab
options = ["Voice Male", "Voice Female 1", "Voice Female 2"]
clicked = StringVar()
clicked.set("Voice Female 1")
#option dropdown
nm = OptionMenu(root, clicked, *options)
nm.grid(row=4, column=3, pady=10)
#setting voices
n1 = Button(root, text="Set voice", command=voice_set)
n1.grid(row=4, column=4, pady=10)
#name
n2 = Entry(root, bg="lightgrey")
n2.insert(0, "Name: " + name)
n2.bind("<1>", delet)
n2.grid(row=4, column=5, pady=10)
#name button
n3 = Button(root, text="Set Name", command=change_name)
n3.grid(row=4, column=6, pady=10)
#credits label
name_label = Label(
root,
text=
"Developed By:\n\nParas Raorane",
font=TkFont.Font(family="Times New Roman", size=12),
bd=1,
anchor=W,
pady=10,
padx=10).grid(row=6, column=1, rowspan=2)
#label before terminal
Label(
root,
text="Interactive Terminal",
font=TkFont.Font(family="Times New Roman", size=12, weight="bold"),
).grid(row=5, column=2, columnspan=5, sticky=W + E)
#main entry
e = Entry(root, bg="lightgrey", width=100) #, borderwidth=10)
e.grid(row=6, column=2, columnspan=5, sticky=W + E)
# #inserting text into the box
e.insert(
0,
"Detected text will be displayed here. You can make changes as required.")
b = Button(root, text="Initialize", command=start)
b.grid(row=7, column=2, columnspan=5, pady=10)
root.mainloop()
|
N3dal/YourFirstContribution
|
Python/interactivedesign.py
|
interactivedesign.py
|
py
| 9,411 |
python
|
en
|
code
| null |
github-code
|
6
|
20001859716
|
from nltk.stem import WordNetLemmatizer
import re
from nltk.tokenize import word_tokenize, sent_tokenize
def myTokenizer(readFileDir, saveFileDir, stopwords, lim):
readFile = open(readFileDir, "r", encoding="UTF8")
saveFile = open(saveFileDir, "w", encoding="UTF8")
preprocessed = []
lemmatizer = WordNetLemmatizer()
while True:
line = readFile.readline()
if len(line) == 0:
print("File read finished")
readFile.close()
break
sentTokenized = sent_tokenize(line)
for sent in sentTokenized:
sent = re.sub("[^a-zA-Z]", " ", sent)
wordTokenized = word_tokenize(sent)
i = 0
while i < len(wordTokenized):
if len(wordTokenized[i]) <= lim or \
wordTokenized[i] in stopwords:
wordTokenized.remove(wordTokenized[i])
else:
wordTokenized[i] = wordTokenized[i].lower()
wordTokenized[i] = lemmatizer.lemmatize(wordTokenized[i])
saveFile.write(wordTokenized[i])
if i < len(wordTokenized) - 1:
saveFile.write(" ")
i += 1
saveFile.write("\n")
preprocessed.append(wordTokenized)
saveFile.close()
return preprocessed
|
jkjan/NLP
|
Word2VecSkip-Gram/myTokenizer.py
|
myTokenizer.py
|
py
| 1,370 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26495796671
|
from django.contrib import admin,messages
from .models import *
from inline_actions.admin import InlineActionsModelAdminMixin
from commun.util import Util
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
media_root = settings.MEDIA_ROOT
admin.site.site_header = "Administration ERP"
admin.site.site_title = "ERP"
admin.site.index_title = "Bienvenu à l'ERP"
@admin.register(Expediteur)
class ExpediteurAdmin(admin.ModelAdmin):
list_display = ("name","structure","employe")
search_fields = ("name",)
@admin.register(TypeCourier)
class TypeCourierAdmin(admin.ModelAdmin):
list_display = ("name",)
@admin.register(Classification)
class ClassificationAdmin(admin.ModelAdmin):
list_display = ("name",)
@admin.register(Status)
class StatusAdmin(admin.ModelAdmin):
list_display = ("name",)
@admin.register(Attachment)
class AttachmentAdmin(admin.ModelAdmin):
list_display = ("name","file",)
##############
class AttachmentTabular(admin.TabularInline):
model = Attachment
extra = 1
@admin.register(Courier)
class CourierAdmin(InlineActionsModelAdminMixin,admin.ModelAdmin):
inlines = [AttachmentTabular]
autocomplete_fields = ("expediteur","destinataires","visible_a")
inline_actions = ('send_by_mail','delete')
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(deleted=False)
def get_list_display(self, request):
list=("objet","expediteur","date_expedition","date_arrivee","type","classification","status")
if request.user.is_superuser:
return list+('deleted',)+ (super().get_list_display(request).pop(),)
return list+(super().get_list_display(request).pop(),)
def send_by_mail(self, request, obj:Courier, parent_obj):
dists=obj.visible_a.all()
to=[]
for dist in dists:
to.append(dist.getExpediteur().email)
source=obj.expediteur.getExpediteur().email
attachments=list(obj.attachments.all())
arr=list()
files=list()
for dist in dists:
arr.append(dist.email)
for attch in attachments:
files.append(attch.file.name)
try:
res=Util.send_email(subject=obj.objet, message=obj.objet, source=source, to=to, cc=[], attachments=files)
messages.success(request, _("Email Envoyé"))
except Exception :
messages.error(request, _("Erreur d'envoie"))
send_by_mail.short_description = 'envoyer email'
def delete(self, request, obj:Courier, parent_obj):
obj.deleted=True
obj.save()
delete.short_description = 'supprimer'
|
bofilio/erp-backend
|
couriers/admin.py
|
admin.py
|
py
| 2,740 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14777796282
|
N, K = list(map(int, input().split()))
W = []
for i in range(N):
W.append(int(input()))
def is_enough(P):
track_index = 0
w_index = 0
while track_index < K and w_index < N: # トラックの数を超える or Wを全て載せ終わる
tmp_sum = 0
while w_index < N and tmp_sum + W[w_index] <= P:
tmp_sum += W[w_index]
w_index += 1
track_index += 1
return w_index == N
left = 0
right = 100000 * 10000 # 最大個数 * 最大重量
mid = (left + right) // 2
answer = right
while left <= right:
if is_enough(mid):
answer = mid
right = mid - 1
else:
left = mid + 1
mid = (left + right) // 2
print(str(answer))
|
kokoakuma/algorithm_practice
|
AOJ/Part5_Search/allocation.py
|
allocation.py
|
py
| 664 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40702687414
|
#coding: utf-8
from . import BaseSuite
class AcceptanceSuite(BaseSuite):
def acceptance_from_mail(self, invoice_id, pointsale_id, date, items):
"""
/acceptance/pointsale/<int:point_id>/invoice/<int:invoice_id>
"""
data = {
'data': {'items': items, 'date': unicode(date)},
}
return self.client.post("/api/acceptance/pointsale/" + str(pointsale_id) + "/invoice/" + str(invoice_id),
data=self._serialize(data), headers=self._get_headers(True))
def acceptance_custom(self, pointsale_id, provider_id, date, items):
"""
date: 2015-02-12T10:20:45.518Z
items: [,…]
pointsale_id: 2
provider_id: 1
"""
data = {
"data": {
"date": unicode(date),
"pointsale_id": pointsale_id,
"provider_id": provider_id,
"items": items
}
}
return self.client.put("/api/invoice", data=self._serialize(data), headers=self._get_headers(True))
def invoice_items(self, invoice_id):
return self.client.get("/api/invoice/" + str(invoice_id) + "/items", headers=self._get_headers())
|
StasEvseev/buy_api
|
tests/suits/acceptance.py
|
acceptance.py
|
py
| 1,224 |
python
|
en
|
code
| 0 |
github-code
|
6
|
779154686
|
from typing import Annotated
from fastapi import APIRouter, Depends, status
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
from pydantic_core import ValidationError
from readconnect.shared.domain.dtos.error_response_dto import ErrorResponse
from readconnect.shared.domain.exceptions.exceptions import (
NotFoundError,
InvalidsCredentialsError,
)
from ...application.login_user.login_user_use_case import LoginUserUseCase
from ...application.signup_user.signup_user_use_case import SignupUserUseCase
from ...domain.dtos.login_request_dto import LoginRequestDTO
from ...domain.dtos.login_response_dto import LoginResponseDTO
from ...domain.dtos.signup_request_dto import SignupRequestDTO
auth_router = APIRouter(prefix="/auth")
@auth_router.post(
path="/login",
responses={
200: {"model": LoginResponseDTO},
502: {"model": ErrorResponse},
422: {"model": ErrorResponse},
404: {"model": ErrorResponse},
},
)
async def login(
body: LoginRequestDTO,
login_use_case: Annotated[LoginUserUseCase, Depends(LoginUserUseCase)],
):
try:
response = await login_use_case.execute(body)
return response
except ValidationError as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.__str__()}"
return JSONResponse(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
content=jsonable_encoder(ErrorResponse(details=details)),
)
except InvalidsCredentialsError as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.details}"
return JSONResponse(
status_code=e.status_code,
content=jsonable_encoder(ErrorResponse(details=details)),
)
except NotFoundError as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.details}"
return JSONResponse(
status_code=e.status_code,
content=jsonable_encoder(ErrorResponse(details=details)),
)
except Exception as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.__str__()}"
return JSONResponse(
status_code=status.HTTP_502_BAD_GATEWAY,
content=jsonable_encoder(ErrorResponse(details=details)),
)
@auth_router.post(
path="/signup",
responses={
200: {"model": SignupRequestDTO},
502: {"model": ErrorResponse},
422: {"model": ErrorResponse},
404: {"model": ErrorResponse},
409: {"model": ErrorResponse},
},
)
async def signup(
body: SignupRequestDTO,
signup_use_case: Annotated[SignupUserUseCase, Depends(SignupUserUseCase)],
):
try:
response = await signup_use_case.execute(body)
return response
except ValidationError as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.__str__()}"
return JSONResponse(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
content=jsonable_encoder(ErrorResponse(details=details)),
)
except InvalidsCredentialsError as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.details}"
return JSONResponse(
status_code=e.status_code,
content=jsonable_encoder(ErrorResponse(details=details)),
)
except NotFoundError as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.details}"
return JSONResponse(
status_code=e.status_code,
content=jsonable_encoder(ErrorResponse(details=details)),
)
except Exception as e:
details = f"Ocurrió un problema al realizar su petición. Detalle: {e.__str__()}"
return JSONResponse(
status_code=status.HTTP_502_BAD_GATEWAY,
content=jsonable_encoder(ErrorResponse(details=details)),
)
|
YeisonKirax/readconnect-back
|
src/readconnect/auth/infrastructure/routes/auth_routes.py
|
auth_routes.py
|
py
| 3,939 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7795341803
|
import json
from random import randint
import ideas.utils as utils
IDEA_PATH = './ideas/ideas.json'
class Generator:
def __init__(self, context='general', keys=[]):
self.idea_path = IDEA_PATH
self.context = context
self.rand_items = []
with open(self.idea_path, 'r') as json_file:
self.lst = json.load(json_file)
if len(keys) == 0:
self.keys = self.lst.keys()
else:
self.keys = keys
def get_keys(self):
return list(self.keys)
def export_lst(self):
temp_lst = json.dumps(self.lst, indent=4)
f = open(self.idea_path, 'w+')
f.write(temp_lst)
f.close()
def get_rand_item(self, key):
rand_num = randint(0, (len(self.lst[key]) - 1))
if self.lst[key][rand_num] in self.rand_items:
rand_num = randint(0, (len(self.lst[key]) - 1))
self.rand_items.append(self.lst[key][rand_num])
return self.lst[key][rand_num]
def generate(self):
generated_msg = ''
for key in self.keys:
generated_msg += f'{self.get_rand_item(key)} \n'
self.print_result()
return generated_msg
def generate_multiple(self, how_many=None, key='items'):
for i in range(how_many):
self.get_rand_item(key)
self.print_result()
def print_result(self):
utils.print_result(self.rand_items, self.context)
@staticmethod
def convert_file_to_array(file_location):
with open(file_location, "r") as file_to_convert:
file_array = file_to_convert.readlines()
stripped_array = []
for item in file_array:
stripped_array.append(item.strip())
return stripped_array
|
kjbyleni/My_Art_tools
|
ideas/generator.py
|
generator.py
|
py
| 1,757 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32119109971
|
from rest_framework.routers import DefaultRouter
from apps.home.views import FeatureView, HomeView, I18nViewSet, SitemapView
router = DefaultRouter()
router.register("", HomeView)
router.register("sitemap", SitemapView, basename="sitemap")
router.register("i18n", I18nViewSet, basename="i18n")
router.register("features", FeatureView, basename="feature")
urlpatterns = router.urls
|
OVINC-CN/iWikiAPI
|
apps/home/urls.py
|
urls.py
|
py
| 384 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14752733594
|
from flask import Flask,render_template, request, session, redirect, url_for
from threading import Thread
def createApp():
app = Flask(
__name__,
template_folder=r"templates",
static_folder=r"static"
)
return app
app = createApp()
@app.route("/")
def home():
return render_template("./index.html")
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
if __name__ == '__main__':
keep_alive()
|
SentientPlatypus/Self-Driving-Car-Simulation
|
services/main.py
|
main.py
|
py
| 485 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71550269628
|
from pathlib import Path
import typer
from meteor import IdentityStage
from meteor import Language
from meteor import StemmingStage
from meteor import meteor_macro_avg
def cli(
hypotheses_file: Path = typer.Option(
...,
"-h",
"--hypotheses",
help="utf-8 encoded file with system output, one sentence per line",
exists=True,
file_okay=True,
dir_okay=False,
readable=True,
resolve_path=True,
),
references_file: Path = typer.Option(
...,
"-r",
"--references",
help="utf-8 encoded file with translation references, one sentence per line", # noqa
exists=True,
file_okay=True,
dir_okay=False,
readable=True,
resolve_path=True,
),
language: Language = typer.Option(
Language.german,
"-l",
"--language",
help="The language to run meteor for. Controls tokenization and stemming.", # noqa
show_default=True,
case_sensitive=False,
),
):
"""
Computes the METEOR score for the given sentence pairs
and returns the macro average.
Input files must be of same length and contain one sentence per line.
Assumes UTF-8 encoding.
"""
with hypotheses_file.open(encoding="utf-8") as infile:
hypotheses = [line.strip() for line in infile if line.strip()]
with references_file.open(encoding="utf-8") as infile:
references = [line.strip() for line in infile if line.strip()]
if len(hypotheses) != len(references):
typer.echo("Error: Input files must be of same length.")
exit(1)
stages = [
IdentityStage(1.0),
StemmingStage(0.6, language),
]
macro_avg = meteor_macro_avg(hypotheses, references, stages, language)
typer.echo(f"METEOR macro average: {round(macro_avg, 3)}")
def main():
typer.run(cli)
|
wbwseeker/meteor
|
meteor/cli.py
|
cli.py
|
py
| 1,909 |
python
|
en
|
code
| 1 |
github-code
|
6
|
38116610119
|
num = int(input("Enter the value num ="))
factorial = 1
if num <0:
print("Factorial does not exist for negative numbers")
elif num == 0:
print("factorial of 0 is 1")
else:
for i in range(1,num+1):
factorial=factorial*i
print("The factorial of",num,"is",factorial)
#using recursion method
# Python recursion is a method which calls itself.
"""
def fact(n):
return 1 if (n==1 or n==0) else n * fact(n - 1);
num = 5
print("Factorial of",num,"is",)
fact(num))
"""
#using built_in function
"""
import math
def fact(n):
return(math.factorial(n))
num = int(input("Enter the number:"))
f = fact(num)
print("Factorial of", num, "is", f)
"""
|
3Sangeetha3/python
|
factorial.py
|
factorial.py
|
py
| 698 |
python
|
en
|
code
| 1 |
github-code
|
6
|
29918439800
|
#Tarea 20, para lenguajes de programacion
import matplotlib.pyplot as plt
import numpy as np
print ("Minimos cuadrados")
print ("Este programa calcula la pendiente (m), la intercepcion(b) y el coeficiente de relacion(r) de una regresion lineal")
print ("¿Cuantos datos (en pares (x1,y1) se considera 1) desea evaluar? (minimo 3)")
n=int(input())
while n<3:
print ("Ingresa un valor mayor a 2: ")
n=int(input())
x=np.arange(n, dtype=float)
y=np.arange(n, dtype=float)
acc=np.arange(n, dtype=float) #es un acumulador
sx=0
sy=0
sxy=0
sx2=0
sy2=0
for i in range(n):
print ("Ingresa el dato x("+str(i+1)+"):")
x[i]=float(input())
print ("Ingresa el dato y("+str(i+1)+"):")
y[i]=float(input())
sx=sx+x[i] #sx es sumatorai en x
sy=sy+y[i] #sy sumatoria en y
sxy=sxy+x[i]*y[i] #sumatoria en x*y
sx2=sx2+x[i]**2 #sumatoria en x**2
sy2=sy2+y[i]**2 #sumatoria en y**2
print ("TABLA DE DATOS")
print ("#dato, x , y")
for i in range(n):
print (str(i+1)+".-",x[i]," ,",y[i])
for i in range(n):
acc[i]=x[i]
for i in range(0,n): #Este servira para poder encontrar el mas grande y el mas chico valor de x, para graficar
for j in range(0,n-1):
if acc[j]>=acc[j+1]: #if evalua que el numero actuar sea menor que el siguiente
yac=acc[j] #si es menor cambiara el orden, almacenando el valor actual en y
acc[j]=acc[j+1] # y el nuevo valor actual seria el del numero siguiente
acc[j+1]=yac #el numero siguiente sera el anterior, asi que le debolvemos el valor con y
#yp=y promedio, xp=x promedio,r=coeficiente de corelacion
#Σx=sx,Σy=sy,Σxy=sxy,Σx²=sx2
#La pendiente esta dada por m=(Σxy-(Σx)*yp)/(Σx²- (Σx)xp)
yp=sy/n
xp=sx/n
m=(sxy-sx*yp)/(sx2-sx*xp)
r=(n*sxy-sx*sy)/(((n*sx2-sx**2)*(n*sy2-sy**2))**.5)
b=yp-m*xp
print ("Σx=",sx,", Σy=",sy,", Σxy=",sxy,", Σx²=",sx2, ", Σy²=",sy2)
print ("m=",m)
print ("r=",r)
print ("b=",b)
xs=(x[n-1]-x[0])/(n-1)
x1=[acc[0],acc[n-1]]
y1=[m*x1[0]+b,m*x1[1]+b] #como es una ecuacion lineal, no necesita mas puntos, solo 2
plt.plot(x,y,"b*", label="Datos") #los puntos son marcados con estrellas para poder diferenciarlos de la linealizacion
plt.plot(x1,y1,"m-",label="Linializado")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.title("Minimos cuadrados")
|
EdmundoD3/Ejercicios-de-Python
|
programas python parte 2/T20_Minimos_cuadrados.py
|
T20_Minimos_cuadrados.py
|
py
| 2,290 |
python
|
es
|
code
| 0 |
github-code
|
6
|
42913447777
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.cm as cm
import matplotlib.patches as mpatches
import json
import sys
from helpers.helpers_visualisation import get_colors
from scipy.misc import imread
import matplotlib.image as mpimg
class Animation():
def __init__(self,parameters_path):
parameters_project = json.load(open(parameters_path))
processed_parameters = json.load(open(parameters_project["data_processed_parameters"]))
evaluation_parameters = json.load(open(parameters_project["evaluation_parameters"]))
raw_parameters = json.load(open(parameters_project["data_raw_parameters"]))
visualization_parameters = json.load(open(parameters_project["visualization_parameters"]))
self.scene = visualization_parameters["scene"]
self.sample_id = visualization_parameters["sample_id"]
self.pixel_meter_ratios = raw_parameters["pixel_meter_ratios"]
self.meter2pixel_ratio = 1.0/ self.pixel_meter_ratios[self.scene]
# report_name = evaluation_parameters["report_name"]
report_name = visualization_parameters["report_name"]
sub_dir_name = parameters_project["evaluation_reports"] + "{}/scene_reports/".format(report_name)
self.scene_samples = sub_dir_name + "{}_samples.json".format(self.scene)
self.gif_name = parameters_project["animations_reports"] + "{}_{}_{}.gif".format(self.scene,self.sample_id,report_name)
self.image = parameters_project["raw_images"] + "{}.jpg".format(self.scene)
self.rev_dict_types = processed_parameters["types_dic_rev"]
def animate_sample(self):
file_ = json.load(open(self.scene_samples))
sample = file_[str(self.sample_id)]
inputs = np.array(sample["inputs"])
labels = np.array(sample["labels"])
outputs = np.array(sample["outputs"])
types = np.array(sample["types"])
print(types)
types = [ self.rev_dict_types[str(int(type_))] for type_ in types]
img = mpimg.imread(self.image)
prediction = np.concatenate([inputs,outputs], axis = 1)
gt = np.concatenate([inputs,labels], axis = 1)
prediction = prediction * self.meter2pixel_ratio
gt = gt * self.meter2pixel_ratio
nb_colors = gt.shape[0]
colors = get_colors(nb_colors)
animator = Animate(prediction,gt,colors,img,types,self.gif_name)
animator.animate()
class Animate():
def __init__(self,data_pred,data_gt,colors,img,types,gif_name = "test.gif", plot_ = False, save = True):
self.img = img
self.xs_pred = data_pred[:,:,0]
self.ys_pred = data_pred[:,:,1]
self.xs_gt = data_gt[:,:,0]
self.ys_gt = data_gt[:,:,1]
self.types = types
self.nb_agents = self.xs_pred.shape[0]
self.margin = 1
self.nb_frames = self.xs_pred.shape[1]
self.gif_name = gif_name
self.plot_ = plot_
self.save = save
self.fps = 1
self.colors = colors
self.lin_size = 100
lin = np.linspace(0.6, 0.8, self.lin_size)
self.color_dict = {
"bicycle":cm.Blues(lin),
"pedestrian":cm.Reds(lin),
"car":cm.Greens(lin),
"skate":cm.Greys(lin),
"cart":cm.Purples(lin),
"bus":cm.Oranges(lin)
}
self.colors = [self.color_dict[type_][np.random.randint(self.lin_size)] for type_ in self.types]
self.history = 4
self.get_plots()
def get_plots(self):
self.fig, self.ax = plt.subplots(1,2,squeeze= False)
red_patch = mpatches.Patch(color='red', label='Pedestrians')
blue_patch = mpatches.Patch(color='b', label='Bycicles')
green_patch = mpatches.Patch(color='green', label='Cars')
grey_patch = mpatches.Patch(color='grey', label='Skates')
purple_patch = mpatches.Patch(color='purple', label='Carts')
orange_patch = mpatches.Patch(color='orange', label='Buses')
plt.legend(handles=[red_patch,blue_patch,green_patch,grey_patch,purple_patch,orange_patch],loc='best',fontsize = 3.5)
self.ax[0][0].imshow(self.img,origin = "upper")
self.ax[0][1].imshow(self.img,origin = "upper")
self.plots1 = []
self.plots2 = []
for i in range(self.nb_agents):
tup = self.ax[0][0].plot([], [], color = self.colors[i],marker = 'o',markersize = 2,linewidth = 0.5)[0]
if i == 0:
tup = self.ax[0][0].plot([], [], color = self.colors[i],marker = '^',markersize = 2,linewidth = 0.5)[0]
self.plots1.append(tup)
tup = self.ax[0][1].plot([], [], color = self.colors[i],marker = 'o',markersize = 2,linewidth = 0.5)[0]
if i == 0:
tup = self.ax[0][1].plot([], [], color = self.colors[i],marker = '^',markersize = 2,linewidth = 0.5)[0]
self.plots2.append(tup)
def animate(self):
self.ax[0][1].set_title("Groundtruth",loc = "left", fontsize=8)
self.ax[0][0].set_title("Predictions",loc = "left", fontsize=8)
plt.tight_layout()
ani = matplotlib.animation.FuncAnimation(self.fig, self.update, frames=self.nb_frames,repeat=True)
if self.plot_:
plt.show()
if self.save:
ani.save(self.gif_name, writer='imagemagick', fps=self.fps,dpi = 200)
def update(self,frame):
frame = int(frame)
end = frame + 1
start = max(0,end-self.history)
if end < 9:
self.fig.suptitle("Timestep: {}, observation time".format(frame+1), fontsize=8)
else:
self.fig.suptitle("Timestep: {}, prediction time".format(frame+1), fontsize=8)
for i,p in enumerate(self.plots1):
xs = self.xs_pred[i]
ys = self.ys_pred[i]
c = 0
for x,y in zip(xs,ys):
if x == 0 and y == 0:
c += 1
else:
break
xs = xs[c:]
ys = ys[c:]
p.set_data(xs[start:end], ys[start:end])
# p.set_color(self.colors[i])
if frame > 7 :
p.set_marker("+")
p.set_markersize(3)
# p.set_fillstyle("none")
for i,p in enumerate(self.plots2):
xs = self.xs_gt[i]
ys = self.ys_gt[i]
c = 0
for x,y in zip(xs,ys):
if x == 0 and y == 0:
c += 1
else:
break
xs = xs[c:]
ys = ys[c:]
p.set_data(xs[start:end], ys[start:end])
# p.set_data(self.xs_gt[i,start:end], self.ys_gt[i,start:end])
# p.set_color(self.colors[i])
if frame > 7 :
p.set_marker("+")
p.set_markersize(3)
if __name__ == "__main__":
main()
|
elbuco1/AttentionMechanismsTrajectoryPrediction
|
src/visualization/classes/animation.py
|
animation.py
|
py
| 7,151 |
python
|
en
|
code
| 49 |
github-code
|
6
|
39660675563
|
from ..libs import *
from . import SubResultUI
class ResultTableUI(QWidget):
def __init__(self):
super().__init__()
self.init_ui()
self.results = None
self.results_ui = {}
def init_ui(self):
self.layout = QHBoxLayout()
self.table = QTableWidget()
self.table.setColumnCount(7)
self.table.setRowCount(6)
self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Interactive)
self.table.setSortingEnabled(True)
self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table.setHorizontalHeaderLabels(['序号', 'res文件', '匹配成功', '匹配上原子数', '加权匹配比例', '坐标匹配残差', '操作'])
self.table.verticalHeader().setHidden(True)
self.layout.addWidget(self.table)
def updateResults(self, results):
self.results = results
self.results_ui = {}
l = len(results)
self.table.clearContents()
self.table.sortByColumn(0, Qt.AscendingOrder)
self.table.setRowCount(l)
for i in range(l):
r = results[i]
index = QTableWidgetItem()
target = QTableWidgetItem()
is_match = QTableWidgetItem()
max_nm = QTableWidgetItem()
max_rwm = QTableWidgetItem()
min_mse = QTableWidgetItem()
index.setData(Qt.DisplayRole, i + 1)
target.setText(r.target.name)
is_match.setText('是' if r.is_matched else '否')
max_nm.setData(Qt.DisplayRole, r.best_feature[0] if r.is_matched else 0)
max_rwm.setText('%.1f%%' % (r.best_feature[1] * 100) if r.is_matched else '')
min_mse.setText('%.2f' % (r.best_feature[2]) if r.is_matched else '')
self.table.setItem(i, 0, index)
self.table.setItem(i, 1, target)
self.table.setItem(i, 2, is_match)
self.table.setItem(i, 3, max_nm)
self.table.setItem(i, 4, max_rwm)
self.table.setItem(i, 5, min_mse)
if r.is_matched:
bt_view = self.generate_button(r)
self.table.setCellWidget(i, 6, bt_view)
def generate_button(self, result):
button = QPushButton('查看')
bt_widget = QWidget()
hLayout = QHBoxLayout()
hLayout.addWidget(button)
hLayout.setContentsMargins(5, 2, 5, 2)
bt_widget.setLayout(hLayout)
button.clicked.connect(lambda: self.view_result(result))
return bt_widget
def view_result(self, result):
if result not in self.results_ui:
self.results_ui[result] = SubResultUI(result)
self.results_ui[result].show()
|
jingshenSN2/CrystalTool
|
crystalsearchgui/output/result_table_ui.py
|
result_table_ui.py
|
py
| 2,788 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31851011006
|
n1 = float(input())
n2 = float(input())
n3 = float(input())
media = (n1 + n2 + n3) / 3
if media >= 9:
conceito = "Ótimo"
elif media >= 7.5:
conceito = "Bom"
elif media >= 6:
conceito = "Satisfatório"
else:
conceito = "Insuficiente"
print(conceito)
|
CaioPinho9/poo
|
scr/selecao/conceito.py
|
conceito.py
|
py
| 268 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
29510369443
|
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import classification_report
data = pd.read_csv('./ner_dataset.csv', encoding='latin1')
data = data.fillna(method="ffill") # ffill前值填充,pfill后值填充
print(data.tail(10))
words = list(set(data["Word"].values)) # 获取词典库
n_words = len(words) # 词典库大小
class MajorityVotingTagger(BaseEstimator, TransformerMixin):
def fit(self, X, y):
"""
:param X: list of words
:param y: list 0f tags
:return:
"""
word2cnt = {}
self.tags = []
for x, t in zip(X, y):
if t not in self.tags:
self.tags.append(t)
if x in word2cnt:
if t in word2cnt[x]:
word2cnt[x][t] += 1
else:
word2cnt[x][t] = 1
else:
word2cnt[x] = {t: 1}
self.majority = {}
for k, d in word2cnt.items():
self.majority[k] = max(d, key=d.get)
def predict(self, X, y=None):
"""Predict the the tag from memory, If word is unknown, predict 'o'"""
return [self.majority.get(x, 'o') for x in X]
words = data["Word"].values.tolist()
tags = data["Tag"].values.tolist()
pred = cross_val_predict(estimator=MajorityVotingTagger(), X=words, y=tags, cv=5)
report = classification_report(y_pred=pred, y_true=tags)
print(report)
|
jiangq195/tanxin
|
starter_code1/NER/majority_voting.py
|
majority_voting.py
|
py
| 1,532 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74558105146
|
import pandas as pd
#data process
train1=pd.read_csv('taijing/df_affinity_train.csv')
train2=pd.read_csv('taijing/df_molecule.csv')
test=pd.read_csv('taijing/df_affinity_test_toBePredicted.csv')
train1=pd.DataFrame(train1)
train2=pd.DataFrame(train2)
test=pd.DataFrame(test)
test.columns = ['Protein_ID','Molecule_ID','Ki']
test['Ki']=0
del test['Ki']
# del test['Ki']
train1.columns = ['Protein_ID','Molecule_ID','Ki']
# train1.dropna(inplace=True)
train1.fillna(0.0,inplace=True)
train2.columns = ['Molecule_ID','Fingerprint','cyp_3a4','cyp_2c9','cyp_2d6','ames_toxicity','fathead_minnow_toxicity','tetrahymena_pyriformis_toxicity','honey_bee','cell_permeability','logP','renal_organic_cation_transporter','CLtotal','hia','biodegradation','Vdd','p_glycoprotein_inhibition','NOAEL','solubility','bbb']
# train2.dropna(inplace=True)
del train2['Fingerprint']
train2.fillna(0.0,inplace=True)
test.fillna(0.0,inplace=True)
train1.fillna(0.0,inplace=True)
# train2.to_csv('taijing/df_molecule_drop.csv')
# test_fianll=test.concat(train2, keys=['Molecule_ID'])
test_fianll=pd.merge(test,train2)
train_finall=pd.merge(train1,train2)
test_fianll.fillna(0.0,inplace=True)
train_finall.fillna(0.0,inplace=True)
# print(train.head(6))
train_finall.to_csv('taijing/df_affinity_train_combine.csv')
test_fianll.to_csv('taijing/df_affinity_test_combine.csv')
|
xiaoqian19940510/python-
|
game/DataCastle/Drug_screening/combine.py
|
combine.py
|
py
| 1,348 |
python
|
en
|
code
| 9 |
github-code
|
6
|
12237739567
|
import tensorflow as tf
import numpy as np
tf.enable_eager_execution()
# Dataset
import tensorflow_datasets as tfds
# Constants to eventually parameterise
LOGDIR = './logs/autoencoder_gg/'
# Activation function to use for layers
act_func = tf.nn.tanh
# Enable or disable GPU
SESS_CONFIG = tf.ConfigProto(device_count = {'GPU': 1})
class Model:
' Simple Image Classification Model (defined by CNN) '
def __init__(self, input_shape, num_layers=4, activation=tf.nn.relu, layer_width=64, bottleneck_chans = 4, learn_rate=1e-4):
' Initializes model parameters and optimizer '
' Assumes an input shape of [height, width, channels]'
# Stores model params
self.vars = []
self.layers = []
self.input_shape = input_shape
self.shape_list = []
self.shape_list.append(input_shape)
# Down-sampling Layers
for l in range(num_layers):
# First layer
if(l == 0):
in_chans = input_shape[2]
out_chans = layer_width
cur_shape = [1,] + input_shape
# Last Layer
elif(l == num_layers-1):
in_chans = out_chans
out_chans = bottleneck_chans
# Middle layers
else:
in_chans = out_chans
out_chans = layer_width
f_height = 5
f_width = 5
layer = tf.layers.Conv2D(out_chans, (f_height, f_width), strides=[1,1], padding='valid', activation=activation, kernel_initializer=tf.initializers.random_normal, bias_initializer=tf.initializers.random_normal, name='Conv'+str(l))
layer.build(cur_shape)
cur_shape = layer.compute_output_shape(cur_shape)
self.shape_list.append(cur_shape)
self.layers.append(layer)
# Up-sampling Layers
for l in range(num_layers):
# First layer
if(l == 0):
in_chans = bottleneck_chans
out_chans = layer_width
# Last Layer
elif(l == num_layers-1):
in_chans = out_chans
out_chans = input_shape[2]
# Middle layers
else:
in_chans = out_chans
out_chans = layer_width
f_height = 5
f_width = 5
layer = tf.layers.Conv2DTranspose(out_chans, (f_height, f_width), strides=[1,1], padding='valid', activation=activation, kernel_initializer=tf.initializers.random_normal, bias_initializer=tf.initializers.random_normal, name='ConvTP'+str(l))
layer.build(cur_shape)
cur_shape = layer.compute_output_shape(cur_shape)
self.shape_list.append(cur_shape)
self.layers.append(layer)
# Our Optimizer
self.optimizer = tf.train.AdamOptimizer(learn_rate)
# Grab all variables
for l in self.layers:
self.vars.extend(l.weights)
for idx,shape in enumerate(self.shape_list):
if(idx == 0):
out_shape = None
else:
out_shape = self.layers[idx-1].weights[0].shape
print('Layer: ', str(idx), shape, 'Weights: ', out_shape)
def crunch(self, x_input):
' Generates outputs (predictions) from inputs to the model '
with tf.name_scope('MainGraph'):
for l in range(len(self.layers)):
if(l == 0):
h = self.layers[0](x_input)
tf.contrib.summary.image(self.layers[l].name, h[:,:,:,:3], max_images=1)
else:
h = self.layers[l](h)
tf.contrib.summary.image(self.layers[l].name, h[:,:,:,:3], max_images=1)
#x_hat = tf.sigmoid(h)
#x_hat = h
x_hat = tf.sigmoid(tf.image.per_image_standardization(h))
return x_hat
def learn(self, x_input):
' Learns from the batch '
# Track gradients
with tf.GradientTape() as tape:
tape.watch(x_input)
output = self.crunch(x_input)
tf.contrib.summary.image('Reconstructed Image', output, max_images=3)
with tf.name_scope('Generation_Loss'):
reconstruction_loss = tf.losses.mean_squared_error(labels=x_input, predictions=output)
tf.contrib.summary.scalar('Recon Loss', reconstruction_loss)
grads = tape.gradient(reconstruction_loss, self.vars)
self.optimizer.apply_gradients(zip(grads, self.vars))
#self.optimizer.apply_gradients(zip(grads, self.layers[0].weights))
global_step.assign_add(1)
return output, reconstruction_loss
def validate(self, x_input):
' Takes an image from the validation set and produces an output from it '
output = self.crunch(x_input)
output_rs = tf.reshape(output, [-1, self.input_shape[0]*self.input_shape[1], self.input_shape[2]])
x_input_rs = tf.reshape(x_input, [-1, self.input_shape[0]*self.input_shape[1], self.input_shape[2]])
# Get last three of each
concat = tf.concat([x_input_rs, output_rs], axis=1)
concat_img = tf.reshape(concat, [-1, self.input_shape[0]*2, self.input_shape[1], self.input_shape[2]])
tf.contrib.summary.image('Validation Pair', concat_img, max_images=3)
for l in self.layers:
tf.contrib.summary.histogram('Weights_'+l.name, l.weights[0])
tf.contrib.summary.histogram('Biases_'+l.name, l.weights[1])
# Get Data
# Construct a tf.data.Dataset
#ds_name = 'mnist'
#ds_name = 'cifar10'
#ds_name = 'cifar100'
#ds_name = 'omniglot'
ds_name = 'celeb_a'
#ds_name = 'fashion_mnist'
(ds_train, ds_test), ds_info = tfds.load(name=ds_name, split=['train', 'test'], with_info=True)
img_shape = tf.TensorShape(ds_info.features['image'].shape)
print('DS Shape: ')
print(img_shape)
summary_writer = tf.contrib.summary.create_file_writer(LOGDIR+ds_name, flush_millis=100)
summary_writer.set_as_default()
global_step = tf.train.get_or_create_global_step()
# Creates a classifier model
model = Model(img_shape.as_list())
# Preparing datasets (training and validation)
# Batch size of 1024 the repeats when iterated through
ds_train = ds_train.batch(64).repeat()
ds_test = ds_test.batch(64).repeat()
# Converts validation set into an iterator so we can iterate through it
ds_test_iter = iter(ds_test)
# Perform the training loop (forever)
for idx,batch in enumerate(ds_train):
# Prepare training inputs
x_inputs = tf.math.divide(tf.cast(batch['image'], tf.float32), tf.constant(255.0, dtype=tf.float32))
# Prepare validation inputs
val_batch = next(ds_test_iter)
val_x_inputs = tf.math.divide(tf.cast(val_batch['image'], tf.float32), tf.constant(255.0, dtype=tf.float32))
# Train and validate
with tf.contrib.summary.record_summaries_every_n_global_steps(10):
preds, loss = model.learn(x_inputs)
print('idx: ', idx, 'Loss: ', loss.numpy())
model.validate(val_x_inputs)
|
bfakhri/TensorflowEager
|
autoencoder.py
|
autoencoder.py
|
py
| 7,158 |
python
|
en
|
code
| 3 |
github-code
|
6
|
37370928668
|
import torch
import torchvision
# post-processing
def handle_preds(preds, device, conf_thresh=0.25, nms_thresh=0.45):
total_bboxes, output_bboxes = [], []
# 将特征图转换为检测框的坐标
N, C, H, W = preds.shape
bboxes = torch.zeros((N, H, W, 6))
pred = preds.permute(0, 2, 3, 1)
# 前背景分类分支
pobj = pred[:, :, :, 0].unsqueeze(dim=-1)
# 检测框回归分支
preg = pred[:, :, :, 1:5]
# 目标类别分类分支
pcls = pred[:, :, :, 5:]
# 检测框置信度
bboxes[..., 4] = (pobj.squeeze(-1) ** 0.6) * (pcls.max(dim=-1)[0] ** 0.4)
bboxes[..., 5] = pcls.argmax(dim=-1)
# 检测框的坐标
gy, gx = torch.meshgrid([torch.arange(H), torch.arange(W)])
bw, bh = preg[..., 2].sigmoid(), preg[..., 3].sigmoid()
bcx = (preg[..., 0].tanh() + gx.to(device)) / W
bcy = (preg[..., 1].tanh() + gy.to(device)) / H
# cx,cy,w,h = > x1,y1,x2,y1
x1, y1 = bcx - 0.5 * bw, bcy - 0.5 * bh
x2, y2 = bcx + 0.5 * bw, bcy + 0.5 * bh
bboxes[..., 0], bboxes[..., 1] = x1, y1
bboxes[..., 2], bboxes[..., 3] = x2, y2
bboxes = bboxes.reshape(N, H*W, 6)
total_bboxes.append(bboxes)
batch_bboxes = torch.cat(total_bboxes, 1)
# 对检测框进行NMS处理
for p in batch_bboxes:
output, temp = [], []
b, s, c = [], [], []
# 阈值筛选
t = p[:, 4] > conf_thresh
pb = p[t]
for bbox in pb:
obj_score = bbox[4]
category = bbox[5]
x1, y1 = bbox[0], bbox[1]
x2, y2 = bbox[2], bbox[3]
s.append([obj_score])
c.append([category])
b.append([x1, y1, x2, y2])
temp.append([x1, y1, x2, y2, obj_score, category])
# Torchvision NMS
if len(b) > 0:
b = torch.Tensor(b).to(device)
c = torch.Tensor(c).squeeze(1).to(device)
s = torch.Tensor(s).squeeze(1).to(device)
keep = torchvision.ops.batched_nms(b, s, c, nms_thresh)
for i in keep:
output.append(temp[i])
output_bboxes.append(torch.Tensor(output))
return output_bboxes
|
Zhefan-Xu/onboard_detector
|
scripts/yolo_detector/utils/tool.py
|
tool.py
|
py
| 2,179 |
python
|
en
|
code
| 9 |
github-code
|
6
|
15169976693
|
from typing import List
from fastapi_utils.inferring_router import InferringRouter
from sqlalchemy.ext.asyncio import AsyncSession
from fastapi import Depends, Request
from admins.models import Category
from crud_handler import BaseHandler
from database import get_async_session
from fastapi_utils.cbv import cbv
from sqlalchemy import select
from permissions import manage_helpdesk
from staff.models import Group, User
from staff.schemas import GroupSchemaReturn, GroupSchemaCreate, UserSchemaReturn, UserSchemaCreate
group_router = InferringRouter(tags=["Group"])
ROUTE = "/api/groups"
user_router = InferringRouter(tags=["User"])
ROUTE_USER = "/api/users"
@cbv(group_router)
class GroupView(BaseHandler):
session: AsyncSession = Depends(get_async_session)
def __init__(self):
super().__init__(Group)
@group_router.post(f"{ROUTE}/", response_model=GroupSchemaReturn, status_code=201)
async def create_item(self, group_object: GroupSchemaCreate, request: Request):
await manage_helpdesk(request)
return await self.create(self.session, group_object.dict(), object_name="Group")
@group_router.get(f"{ROUTE}/", response_model=List[GroupSchemaReturn], status_code=200)
async def read_groups(self,
request: Request,
offset: int = 0,
limit: int = 5):
await manage_helpdesk(request)
query = select(self.model)
return await self.list(query=query,
session=self.session,
limit=limit,
offset=offset)
@group_router.get(f"{ROUTE}/" + "{group_id}", response_model=GroupSchemaReturn, status_code=200)
async def read_group(self, group_id: int, request: Request):
await manage_helpdesk(request)
query = select(self.model)
return await self.retrieve(query, self.session, group_id)
@group_router.delete(f"{ROUTE}/" + "{group_id}", status_code=204)
async def delete_group(self, group_id: int, request: Request):
await manage_helpdesk(request)
return await self.delete(self.session, group_id)
@group_router.put(f"{ROUTE}/" + "{group_id}", response_model=GroupSchemaReturn, status_code=200)
async def update_group(self, group_id: int, group: GroupSchemaReturn, request: Request):
await manage_helpdesk(request)
group_obj = await self.update(self.session, group_id, group.dict())
await self.session.commit()
return group_obj
@cbv(user_router)
class UserView(BaseHandler):
session: AsyncSession = Depends(get_async_session)
def __init__(self):
super().__init__(User)
@user_router.post(f"{ROUTE_USER}/", response_model=UserSchemaReturn, status_code=201)
async def create_item(self, user_object: UserSchemaCreate, request: Request):
await manage_helpdesk(request)
user_dict = user_object.dict()
group_obj = await self.get_obj(select(Group), self.session, {"id": user_dict.get("group").get("id")})
category_object = await self.get_obj(select(Category), self.session, {"id": user_dict.get("category").get("id")})
user_dict["group"] = group_obj
user_dict["category"] = category_object
return await self.create(self.session, user_dict, object_name="User")
|
AlexeyShakov/helpdesk_fast_api
|
src/staff/endpoints.py
|
endpoints.py
|
py
| 3,364 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6484230311
|
import os
import shutil
import subprocess
from pathlib import Path
import numpy as np
from PIL import Image, ImageOps
from lib import BruteForce, database
from .utils import timeit
MAX_RATIO = 0.90 # 0.60
MIN_RATIO = 0
# PARAM FOR THE RADIOMETRIC APPROACH
# Try to normalize respect mean and std to reject static frames
RESIZE_SCALE_FACTOR = 1 # It can be usefull for reduce computation time
INNOVATION_THRESH = 0.001 # 1.5
def RootSift(img_name, desc_folder, N_kpts):
np_kpt_path = Path("{}.kpt.npy".format(img_name))
abs_np_kpt_path = desc_folder / np_kpt_path
np_dsc_path = Path("{}.dsc.npy".format(img_name))
abs_np_dsc_path = desc_folder / np_dsc_path
kp = np.load(abs_np_kpt_path)
desc = np.load(abs_np_dsc_path)
kp_numb = kp.shape[0]
return kp, desc, kp_numb
def NextImg(last_img):
if last_img + 1 < 10:
next_img = "00000{}".format(last_img + 1)
elif last_img + 1 < 100:
next_img = "0000{}".format(last_img + 1)
elif last_img + 1 < 1000:
next_img = "000{}".format(last_img + 1)
elif last_img + 1 < 10000:
next_img = "00{}".format(last_img + 1)
elif last_img + 1 < 100000:
next_img = "0{}".format(last_img + 1)
elif last_img + 1 < 1000000:
next_img = "{}".format(last_img + 1)
return next_img
# @timeit
def StaticRejection(
STATIC_IMG_REJECTION_METHOD,
img1,
img2,
IMGS_FROM_SERVER,
CURRENT_DIR,
KEYFRAMES_DIR,
COLMAP_EXE_PATH,
MAX_N_FEATURES,
ref_matches,
DEBUG,
newer_imgs,
last_img,
img_dict,
img_batch,
pointer,
colmap_exe,
):
# ROOTSIFT APPROACH
if STATIC_IMG_REJECTION_METHOD == "root_sift":
TEMP_DIR = CURRENT_DIR / "temp"
shutil.rmtree(TEMP_DIR / "pair")
os.makedirs(TEMP_DIR / "pair")
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img1), TEMP_DIR / "pair" / "{}".format(img1)
)
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img2), TEMP_DIR / "pair" / "{}".format(img2)
)
subprocess.run(
[
COLMAP_EXE_PATH / f"{colmap_exe}",
"database_creator",
"--database_path",
TEMP_DIR / "db.db",
],
stdout=subprocess.DEVNULL,
)
subprocess.run(
[
COLMAP_EXE_PATH / f"{colmap_exe}",
"feature_extractor",
"--database_path",
TEMP_DIR / "db.db",
"--image_path",
TEMP_DIR / "pair",
"SiftExtraction.max_num_features",
str(MAX_N_FEATURES),
],
stdout=subprocess.DEVNULL,
)
# subprocess.run(["python3", CURRENT_DIR / "lib" / "RootSIFT.py", "--Path", TEMP_DIR / "db.db", "--Output", TEMP_DIR], stdout=subprocess.DEVNULL)
subprocess.run(
[
COLMAP_EXE_PATH / f"{colmap_exe}",
"sequential_matcher",
"--database_path",
TEMP_DIR / "db.db",
"--SequentialMatching.overlap",
"1",
],
stdout=subprocess.DEVNULL,
)
# subprocess.run([COLMAP_EXE_PATH / f"{colmap_exe}", "mapper", "--project_path", CURRENT_DIR / "lib" / "mapper_for_static_rejection.ini"], stdout=subprocess.DEVNULL)
# kp1, desc1, kp_numb1 = RootSift(img1, TEMP_DIR, 8000)
# kp2, desc2, kp_numb2 = RootSift(img2, TEMP_DIR, 8000)
# opencv_matches = BrForce(desc1, desc2, 'Lowe_ratio_test', 'L2', True, 'intersection', print_debug = False, ratio_thresh=0.8
# matches_matrix = np.zeros((len(opencv_matches), 2))
# for l in range(0,len(opencv_matches)):
# matches_matrix[l][0] = int(opencv_matches[l].queryIdx)
# matches_matrix[l][1] = int(opencv_matches[l].trainIdx
db_p = TEMP_DIR / "db.db"
matches = database.dbReturnMatches(db_p.as_posix(), 15)
os.remove(TEMP_DIR / "db.db")
if len(matches.keys()) != 0:
key = list(matches.keys())[0]
matches_matrix = matches[key]
if ref_matches == []:
ref_matches = matches_matrix
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img1),
KEYFRAMES_DIR / "{}.jpg".format(NextImg(int(last_img))),
)
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img2),
KEYFRAMES_DIR / "{}.jpg".format(NextImg(int(last_img) + 1)),
)
img_dict["{}".format(img1)] = "{}.jpg".format(NextImg(int(last_img)))
img_dict["{}".format(img2)] = "{}.jpg".format(
NextImg(int(last_img) + 1)
)
pointer += 1
return (
ref_matches,
newer_imgs,
NextImg(int(last_img) + 1),
img_dict,
img_batch,
pointer,
) # pointer, delta,
else:
vec_ref = ref_matches[:, 1]
vec = matches_matrix[:, 0]
vec_ref = vec_ref.tolist()
vec = vec.tolist()
vec_ref = [int(v) for v in vec_ref]
vec = [int(v) for v in vec]
intersection = [el for el in vec if el in vec_ref]
control_ratio = len(intersection) / len(vec_ref)
print("control_ratio", control_ratio)
if (
control_ratio < MAX_RATIO and control_ratio > MIN_RATIO
): # and os.path.exists(TEMP_DIR / "0"):
# shutil.copy(IMGS_FROM_SERVER / "{}".format(img1), KEYFRAMES_DIR / "{}".format(img1))
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img2),
KEYFRAMES_DIR / "{}.jpg".format(NextImg(int(last_img))),
)
img_dict["{}".format(img2)] = "{}.jpg".format(
NextImg(int(last_img))
)
print("\n.. added img\n")
ref_matches = matches_matrix
pointer += 1 # + delta
# delta = 0
newer_imgs = True
img_batch.append(img2)
return (
ref_matches,
newer_imgs,
NextImg(int(last_img)),
img_dict,
img_batch,
pointer,
) # pointer, delta,
else:
# delta += 1
print("\n.. NO\n")
return (
ref_matches,
newer_imgs,
last_img,
img_dict,
img_batch,
pointer,
) # pointer, delta,
elif len(matches.keys()) == 0:
# delta += 1
print("\n.. NO .. len(matches.keys()) == 0\n")
return (
ref_matches,
newer_imgs,
last_img,
img_dict,
img_batch,
pointer,
) # pointer, delta,
# RADIOMETRIC APPROACH
elif STATIC_IMG_REJECTION_METHOD == "radiometric":
# 'Try' is necessary because main loop looks for new images and the last one can be incomplete because
# it is copied from other folders, and the procedure can be unfineshed
try:
im1 = Image.open(IMGS_FROM_SERVER / img1)
im2 = Image.open(IMGS_FROM_SERVER / img2)
im1.resize(
(
round(im1.size[0] * RESIZE_SCALE_FACTOR),
round(im1.size[1] * RESIZE_SCALE_FACTOR),
)
)
im2.resize(
(
round(im2.size[0] * RESIZE_SCALE_FACTOR),
round(im2.size[1] * RESIZE_SCALE_FACTOR),
)
)
im1_gray = ImageOps.grayscale(im1)
im2_gray = ImageOps.grayscale(im2)
# Normalization
im1_array = np.array(im1_gray)
im1_array = (im1_array - np.min(im1_array)) / np.max(im1_array)
im2_array = np.array(im2_gray)
im2_array = (im2_array - np.min(im2_array)) / np.max(im2_array)
mean1 = np.mean(im1_array)
mean2 = np.mean(im2_array)
# innovation = np.sum(((im1_array - np.mean(im1_array)) * (im2_array - np.mean(im2_array))) / (np.std(im1_array) * np.std(im2_array)))
# ref = np.sum(((im1_array - np.mean(im1_array)) * (im1_array - np.mean(im1_array))) / (np.std(im1_array) * np.std(im1_array)))
# innovation = innovation/ref
innovation = np.absolute(mean2 - mean1)
if innovation > INNOVATION_THRESH:
if ref_matches == []:
ref_matches = [
"-"
] # It is used for compatibilities with frame rejection approches that needs matches matrix
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img1),
KEYFRAMES_DIR / "{}.jpg".format(NextImg(int(last_img))),
)
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img2),
KEYFRAMES_DIR / "{}.jpg".format(NextImg(int(last_img) + 1)),
)
img_dict["{}".format(img1)] = "{}.jpg".format(
NextImg(int(last_img))
)
img_dict["{}".format(img2)] = "{}.jpg".format(
NextImg(int(last_img) + 1)
)
pointer += 1
return (
ref_matches,
newer_imgs,
NextImg(int(last_img) + 1),
img_dict,
img_batch,
pointer,
)
elif ref_matches == ["-"]:
shutil.copy(
IMGS_FROM_SERVER / "{}".format(img2),
KEYFRAMES_DIR / "{}.jpg".format(NextImg(int(last_img))),
)
img_dict["{}".format(img2)] = "{}.jpg".format(
NextImg(int(last_img))
)
pointer += 1
newer_imgs = True
img_batch.append(img2)
return (
ref_matches,
newer_imgs,
NextImg(int(last_img)),
img_dict,
img_batch,
pointer,
)
else:
print("!! Frame rejeccted. innovation < INNOVATION_THRESH !!", end="\r")
return ref_matches, newer_imgs, last_img, img_dict, img_batch, pointer
except:
print("!! Frame truncated !!")
return ref_matches, newer_imgs, last_img, img_dict, img_batch, pointer
else:
print("Choose 'radiometric' or 'root_sift' as STATIC_IMG_REJECTION_METHOD")
quit()
|
franioli/COLMAP_SLAM
|
lib/static_rejection.py
|
static_rejection.py
|
py
| 11,514 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6634246233
|
def solve(length, nums, sumnum):
# method1
hmap = {}
for i in range(length):
cnum = nums[i]
if cnum not in hmap.keys():
hmap[cnum] = i
target = sumnum - cnum
if target in hmap.keys():
return [i, hmap[target]]
return [-1, -1]
length = int(input())
nums = list(map(int, input().split()))
target = int(input().strip())
res = solve(length, nums, target)
print(" ".join(list(map(str, res))))
|
rh01/gofiles
|
lcode1-99/ex64/solve copy.py
|
solve copy.py
|
py
| 467 |
python
|
en
|
code
| 0 |
github-code
|
6
|
648044037
|
import time
import h5py
from affogato.segmentation import InteractiveMWS
def debug():
z = 0
path = '/home/pape/Work/data/ilastik/mulastik/data/data.h5'
with h5py.File(path, 'r') as f:
# raw = f['raw'][z]
affs = f['prediction'][:, z]
strides = [4, 4]
offsets = [[-1, 0], [0, -1], [-3, 0], [0, -3],
[-9, 0], [0, -9], [-27, 0], [0, -27]]
with h5py.File('./seeds.h5') as f:
seeds = f['data'][:]
assert seeds.shape == affs.shape[1:]
imws = InteractiveMWS(affs, offsets, n_attractive_channels=2,
strides=strides, randomize_strides=True)
print("Compute segmentation without seeds ...")
t0 = time.time()
seg1 = imws()
t0 = time.time() - t0
print("... done in %f s" % t0)
print("Add seeds ...")
t0 = time.time()
imws.update_seeds(seeds)
t0 = time.time() - t0
print("... done in %f s" % t0)
print("Compute segmentation with seeds ...")
t0 = time.time()
seg2 = imws()
t0 = time.time() - t0
print("... done in %f s" % t0)
assert seg1.shape == seg2.shape == seeds.shape
if __name__ == '__main__':
debug()
|
constantinpape/affogato
|
example/interactive/debug.py
|
debug.py
|
py
| 1,168 |
python
|
en
|
code
| 9 |
github-code
|
6
|
32644440027
|
"""Shifters Rig Main class."""
import datetime
import getpass
import os.path
import sys
import json
# Maya
import pymel.core as pm
from pymel.core import datatypes
from pymel import versions
# mgear
import mgear
import mgear.core.utils
from . import guide, component
from mgear.core import primitive, attribute, skin, dag, icon, node
from mgear import shifter_classic_components
from mgear import shifter_epic_components
from mgear.shifter import naming
import importlib
from mgear.core import utils
PY2 = sys.version_info[0] == 2
# check if we have loaded the necessary plugins
if not pm.pluginInfo("mgear_solvers", q=True, loaded=True):
try:
pm.loadPlugin("mgear_solvers")
except RuntimeError:
pm.displayError("You need the mgear_solvers plugin!")
if not pm.pluginInfo("matrixNodes", q=True, loaded=True):
pm.loadPlugin("matrixNodes")
COMPONENT_PATH = os.path.join(os.path.dirname(__file__), "component")
TEMPLATE_PATH = os.path.join(COMPONENT_PATH, "templates")
SYNOPTIC_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, "synoptic", "tabs")
)
SHIFTER_COMPONENT_ENV_KEY = "MGEAR_SHIFTER_COMPONENT_PATH"
def log_window():
if mgear.logMode and mgear.use_log_window:
log_window_name = "mgear_shifter_build_log_window"
log_window_field_reporter = "mgear_shifter_log_field_reporter"
# call pm.window(log_window_name, exists=True) 2 times to avoid
# false check in Maya 2024
pm.window(log_window_name, exists=True)
if not pm.window(log_window_name, exists=True):
log_win = pm.window(
log_window_name,
title="Shifter Build Log",
iconName="Shifter Log",
width=800,
height=500,
)
form = pm.formLayout()
reporter = pm.cmdScrollFieldReporter(
log_window_field_reporter, width=400, height=200, clr=True
)
btn_close = pm.button(
label="Close",
command=lambda *args: pm.deleteUI(log_win, window=True),
)
margin_v = 5
margin_h = 5
pm.formLayout(
form,
e=True,
attachForm=[
(reporter, "top", margin_v),
(reporter, "right", margin_h),
(reporter, "left", margin_h),
(btn_close, "bottom", margin_v),
(btn_close, "right", margin_h),
(btn_close, "left", margin_h),
],
attachControl=[
(reporter, "bottom", margin_v, btn_close),
],
)
pm.setParent("..")
pm.showWindow(log_win)
else:
pm.cmdScrollFieldReporter(log_window_field_reporter, e=True, clr=True)
pm.showWindow(log_window_name)
mgear.logInfos()
def getComponentDirectories():
"""Get the components directory"""
# TODO: ready to support multiple default directories
return mgear.core.utils.gatherCustomModuleDirectories(
SHIFTER_COMPONENT_ENV_KEY,
[
os.path.join(os.path.dirname(shifter_classic_components.__file__)),
os.path.join(os.path.dirname(shifter_epic_components.__file__)),
],
)
# return mgear.core.utils.gatherCustomModuleDirectories(
# SHIFTER_COMPONENT_ENV_KEY,
# os.path.join(os.path.dirname(shifter_classic_components.__file__)))
def importComponentGuide(comp_type):
"""Import the Component guide"""
dirs = getComponentDirectories()
defFmt = "mgear.core.shifter.component.{}.guide"
customFmt = "{}.guide"
module = mgear.core.utils.importFromStandardOrCustomDirectories(
dirs, defFmt, customFmt, comp_type
)
return module
def importComponent(comp_type):
"""Import the Component"""
dirs = getComponentDirectories()
defFmt = "mgear.core.shifter.component.{}"
customFmt = "{}"
module = mgear.core.utils.importFromStandardOrCustomDirectories(
dirs, defFmt, customFmt, comp_type
)
return module
def reloadComponents(*args):
"""Reload all componets
Args:
*args: Dummy
"""
compDir = getComponentDirectories()
for x in compDir:
for com in compDir[x]:
try:
if PY2:
reload(importComponent(com))
reload(importComponentGuide(com))
else:
importlib.reload(importComponent(com))
importlib.reload(importComponentGuide(com))
print("reload : {}.{}".format(os.path.basename(x), com))
except ImportError:
pass
class Rig(object):
"""The main rig class.
Attributes:
guide: guide.Rig() initialization.
groups (dic): Rig groups (Maya sets)
components (dic): Dictionary for the rig components.
Keys are the component fullname (ie. 'arm_L0')
componentsIndex (list): Components index list.
"""
def __init__(self):
self.guide = guide.Rig()
self.groups = {}
self.subGroups = {}
self.components = {}
self.componentsIndex = []
self.customStepDic = {}
self.build_data = {}
self.component_finalize = False
def buildFromDict(self, conf_dict):
log_window()
startTime = datetime.datetime.now()
mgear.log("\n" + "= SHIFTER RIG SYSTEM " + "=" * 46)
self.stopBuild = False
self.guide.set_from_dict(conf_dict)
endTime = datetime.datetime.now()
finalTime = endTime - startTime
mgear.log(
"\n"
+ "= SHIFTER FILE READ {} [ {} ] {}".format("=" * 16, finalTime, "=" * 7)
)
# Build
mgear.log("\n" + "= BUILDING RIG " + "=" * 46)
self.from_dict_custom_step(conf_dict, pre=True)
self.build()
self.from_dict_custom_step(conf_dict, pre=False)
# Collect post-build data
build_data = self.collect_build_data()
endTime = datetime.datetime.now()
finalTime = endTime - startTime
pm.flushUndo()
pm.displayInfo(
"Undo history have been flushed to avoid "
"possible crash after rig is build. \n"
"More info: "
"https://github.com/miquelcampos/mgear/issues/72"
)
mgear.log(
"\n"
+ "= SHIFTER BUILD RIG DONE {} [ {} ] {}".format(
"=" * 16, finalTime, "=" * 7
)
)
return build_data
def buildFromSelection(self):
"""Build the rig from selected guides."""
startTime = datetime.datetime.now()
mgear.log("\n" + "= SHIFTER RIG SYSTEM " + "=" * 46)
self.stopBuild = False
selection = pm.ls(selection=True)
if not selection:
selection = pm.ls("guide")
if not selection:
mgear.log(
"Not guide found or selected.\n"
+ "Select one or more guide root or a guide model",
mgear.sev_error,
)
return
# check if is partial build or full guide build
ismodel = False
if selection[0].hasAttr("ismodel"):
self.preCustomStep(selection)
ismodel = True
if not self.stopBuild:
mgear.log("\n" + "= GUIDE VALIDATION " + "=" * 46)
# Check guide is valid
self.guide.setFromSelection()
if not self.guide.valid:
return
# Build
mgear.log("\n" + "= BUILDING RIG " + "=" * 46)
self.build()
if ismodel:
self.postCustomStep()
# Collect post-build data
build_data = self.collect_build_data()
endTime = datetime.datetime.now()
finalTime = endTime - startTime
pm.flushUndo()
pm.displayInfo(
"Undo history have been flushed to avoid "
"possible crash after rig is build. \n"
"More info: "
"https://github.com/miquelcampos/mgear/issues/72"
)
mgear.log(
"\n"
+ "= SHIFTER BUILD RIG DONE {} [ {} ] {}".format(
"=" * 16, finalTime, "=" * 7
)
)
return build_data
def build(self):
"""Build the rig."""
self.options = self.guide.values
self.guides = self.guide.components
self.customStepDic["mgearRun"] = self
self.initialHierarchy()
self.processComponents()
self.finalize()
return self.model
def stepsList(self, checker, attr):
if self.options[checker] and self.options[attr]:
return self.options[attr].split(",")
else:
return None
def from_dict_custom_step(self, conf_dict, pre=True):
if pre:
pre_post = "doPreCustomStep"
pre_post_path = "preCustomStep"
else:
pre_post = "doPostCustomStep"
pre_post_path = "postCustomStep"
p_val = conf_dict["guide_root"]["param_values"]
if p_val[pre_post]:
customSteps = p_val[pre_post_path]
self.customStep(customSteps.split(","))
def customStep(self, customSteps=None):
if customSteps:
for step in customSteps:
if not self.stopBuild:
if step.startswith("*"):
continue
self.stopBuild = guide.helperSlots.runStep(
step.split("|")[-1][1:], self.customStepDic
)
else:
pm.displayWarning("Build Stopped")
break
def preCustomStep(self, selection):
if (
selection[0].hasAttr("ismodel")
and selection[0].attr("doPreCustomStep").get()
):
customSteps = selection[0].attr("preCustomStep").get()
if customSteps:
mgear.log("\n" + "= PRE CUSTOM STEPS " + "=" * 46)
# use forward slash for OS compatibility
if sys.platform.startswith("darwin"):
customSteps = [
cs.replace("\\", "/") for cs in customSteps.split(",")
]
self.customStep(customSteps)
else:
self.customStep(customSteps.split(","))
def postCustomStep(self):
customSteps = self.stepsList("doPostCustomStep", "postCustomStep")
if customSteps:
mgear.log("\n" + "= POST CUSTOM STEPS " + "=" * 46)
# use forward slash for OS compatibility
if sys.platform.startswith("darwin"):
customSteps = [cs.replace("\\", "/") for cs in customSteps]
self.customStep(customSteps)
# @utils.timeFunc
def get_guide_data(self):
"""Get the guide data
Returns:
str: The guide data
"""
if self.guide.guide_template_dict:
return json.dumps(self.guide.guide_template_dict)
else:
return json.dumps(self.guide.get_guide_template_dict())
def initialHierarchy(self):
"""Build the initial hierarchy of the rig.
Create the rig model, the main properties,
and a couple of base organisation nulls.
Get the global size of the rig.
"""
mgear.log("Initial Hierarchy")
# --------------------------------------------------
# Model
self.model = primitive.addTransformFromPos(None, self.options["rig_name"])
attribute.lockAttribute(self.model)
# --------------------------------------------------
# INFOS
self.isRig_att = attribute.addAttribute(self.model, "is_rig", "bool", True)
self.rigName_att = attribute.addAttribute(
self.model, "rig_name", "string", self.options["rig_name"]
)
self.user_att = attribute.addAttribute(
self.model, "user", "string", getpass.getuser()
)
self.isWip_att = attribute.addAttribute(
self.model, "wip", "bool", self.options["mode"] != 0
)
self.date_att = attribute.addAttribute(
self.model, "date", "string", str(datetime.datetime.now())
)
self.mayaVersion_att = attribute.addAttribute(
self.model,
"maya_version",
"string",
str(pm.mel.eval("getApplicationVersionAsFloat")),
)
self.gearVersion_att = attribute.addAttribute(
self.model, "gear_version", "string", mgear.getVersion()
)
self.synoptic_att = attribute.addAttribute(
self.model, "synoptic", "string", str(self.options["synoptic"])
)
self.comments_att = attribute.addAttribute(
self.model, "comments", "string", str(self.options["comments"])
)
self.ctlVis_att = attribute.addAttribute(self.model, "ctl_vis", "bool", True)
if versions.current() >= 201650:
self.ctlVisPlayback_att = attribute.addAttribute(
self.model, "ctl_vis_on_playback", "bool", True
)
self.jntVis_att = attribute.addAttribute(self.model, "jnt_vis", "bool", True)
# adding the always draw shapes on top to global attribute
if versions.current() >= 20220000:
self.ctlXRay_att = attribute.addAttribute(
self.model, "ctl_x_ray", "bool", False
)
self.qsA_att = attribute.addAttribute(self.model, "quickselA", "string", "")
self.qsB_att = attribute.addAttribute(self.model, "quickselB", "string", "")
self.qsC_att = attribute.addAttribute(self.model, "quickselC", "string", "")
self.qsD_att = attribute.addAttribute(self.model, "quickselD", "string", "")
self.qsE_att = attribute.addAttribute(self.model, "quickselE", "string", "")
self.qsF_att = attribute.addAttribute(self.model, "quickselF", "string", "")
self.rigGroups = self.model.addAttr("rigGroups", at="message", m=1)
self.rigPoses = self.model.addAttr("rigPoses", at="message", m=1)
self.rigCtlTags = self.model.addAttr("rigCtlTags", at="message", m=1)
self.rigScriptNodes = self.model.addAttr("rigScriptNodes", at="message", m=1)
self.guide_data_att = attribute.addAttribute(
self.model, "guide_data", "string", self.get_guide_data()
)
# ------------------------- -------------------------
# Global Ctl
if self.options["worldCtl"]:
if self.options["world_ctl_name"]:
name = self.options["world_ctl_name"]
else:
name = "world_ctl"
icon_shape = "circle"
else:
name = "global_C0_ctl"
icon_shape = "crossarrow"
self.global_ctl = self.addCtl(
self.model,
name,
datatypes.Matrix(),
self.options["C_color_fk"],
icon_shape,
w=10,
)
attribute.setRotOrder(self.global_ctl, "ZXY")
# Connect global visibility
pm.connectAttr(self.ctlVis_att, self.global_ctl.attr("visibility"))
if versions.current() >= 201650:
pm.connectAttr(
self.ctlVisPlayback_att, self.global_ctl.attr("hideOnPlayback")
)
attribute.lockAttribute(self.global_ctl, ["v"])
# --------------------------------------------------
# Setup in world Space
self.setupWS = primitive.addTransformFromPos(self.model, "setup")
attribute.lockAttribute(self.setupWS)
# --------------------------------------------------
# Basic set of null
if self.options["joint_rig"]:
self.jnt_org = primitive.addTransformFromPos(self.model, "jnt_org")
if self.options["force_SSC"]:
self.global_ctl.s >> self.jnt_org.s
pm.connectAttr(self.jntVis_att, self.jnt_org.attr("visibility"))
def processComponents(self):
"""
Process the components of the rig, following the creation steps.
"""
# Init
self.components_infos = {}
for comp in self.guide.componentsIndex:
guide_ = self.guides[comp]
mgear.log("Init : " + guide_.fullName + " (" + guide_.type + ")")
module = importComponent(guide_.type)
Component = getattr(module, "Component")
comp = Component(self, guide_)
if comp.fullName not in self.componentsIndex:
self.components[comp.fullName] = comp
self.componentsIndex.append(comp.fullName)
self.components_infos[comp.fullName] = [
guide_.compType,
guide_.getVersion(),
guide_.author,
]
# Creation steps
self.steps = component.Main.steps
for i, name in enumerate(self.steps):
# for count, compName in enumerate(self.componentsIndex):
for compName in self.componentsIndex:
comp = self.components[compName]
mgear.log(name + " : " + comp.fullName + " (" + comp.type + ")")
comp.stepMethods[i]()
if name == "Finalize":
self.component_finalize = True
if self.options["step"] >= 1 and i >= self.options["step"] - 1:
break
def finalize(self):
"""Finalize the rig."""
groupIdx = 0
# Properties --------------------------------------
mgear.log("Finalize")
# clean jnt_org --------------------------------------
if self.options["joint_rig"]:
mgear.log("Cleaning jnt org")
jnt_org_child = dag.findChildrenPartial(self.jnt_org, "org")
if jnt_org_child:
for jOrg in jnt_org_child:
if not jOrg.listRelatives(c=True):
pm.delete(jOrg)
# Groups ------------------------------------------
mgear.log("Creating groups")
# Retrieve group content from components
for name in self.componentsIndex:
component_ = self.components[name]
for name, objects in component_.groups.items():
self.addToGroup(objects, name)
for name, objects in component_.subGroups.items():
self.addToSubGroup(objects, name)
# Create master set to group all the groups
masterSet = pm.sets(n=self.model.name() + "_sets_grp", em=True)
pm.connectAttr(masterSet.message, self.model.rigGroups[groupIdx])
groupIdx += 1
# Creating all groups
pm.select(cl=True)
for name, objects in self.groups.items():
s = pm.sets(n=self.model.name() + "_" + name + "_grp")
s.union(objects)
pm.connectAttr(s.message, self.model.rigGroups[groupIdx])
groupIdx += 1
masterSet.add(s)
for parentGroup, subgroups in self.subGroups.items():
pg = pm.PyNode(self.model.name() + "_" + parentGroup + "_grp")
for sg in subgroups:
sub = pm.PyNode(self.model.name() + "_" + sg + "_grp")
if sub in masterSet.members():
masterSet.remove(sub)
pg.add(sub)
# create geo group
geoSet = pm.sets(n=self.model.name() + "_geo_grp", em=True)
pm.connectAttr(geoSet.message, self.model.rigGroups[groupIdx])
masterSet.add(geoSet)
groupIdx += 1
# Bind pose ---------------------------------------
# controls_grp = self.groups["controllers"]
# pprint(controls_grp, stream=None, indent=1, width=100)
ctl_master_grp = pm.PyNode(self.model.name() + "_controllers_grp")
pm.select(ctl_master_grp, replace=True)
dag_node = pm.dagPose(save=True, selection=True)
pm.connectAttr(dag_node.message, self.model.rigPoses[0])
print(dag_node)
# hide all DG nodes inputs in channel box -----------------------
# only hides if components_finalize or All steps are done
if self.component_finalize:
for c in self.model.listHistory(ac=True, f=True):
if c.type() != "transform":
c.isHistoricallyInteresting.set(False)
# Bind skin re-apply
if self.options["importSkin"]:
try:
pm.displayInfo("Importing Skin")
skin.importSkin(self.options["skin"])
except RuntimeError:
pm.displayWarning(
"Skin doesn't exist or is not correct. "
+ self.options["skin"]
+ " Skipped!"
)
def collect_build_data(self):
"""Collect post build data
by default the data is stored in the root joint.
Returns:
dict: The collected data
"""
self.build_data["Components"] = []
for c, comp in self.customStepDic["mgearRun"].components.items():
self.build_data["Components"].append(comp.build_data)
if self.options["data_collector_embedded"]:
root_jnt = self.get_root_jnt_embbeded()
self.add_collected_data_to_root_jnt(root_jnt=root_jnt)
if self.options["data_collector"]:
self.data_collector_output(self.options["data_collector_path"])
return self.build_data
def data_collector_output(self, file_path=None):
"""Output collected data to a Json file
Args:
file_path (Str, optional): Output path for the Json File
"""
if not file_path:
ext_filter = "Shifter Collected data (*{})".format(guide.DATA_COLLECTOR_EXT)
file_path = pm.fileDialog2(fileMode=0, fileFilter=ext_filter)[0]
with open(file_path, "w") as f:
f.write(json.dumps(self.build_data, indent=4))
file_path = None
def add_collected_data_to_root_jnt(self, root_jnt=None):
"""Add collected data to root joint
Root joint is the first joint generated in the rig.
"""
if not root_jnt:
for c in self.componentsIndex:
comp = self.customStepDic["mgearRun"].components[c]
if not root_jnt and comp.jointList:
root_jnt = comp.jointList[0]
break
if root_jnt:
attribute.addAttribute(
root_jnt,
"collected_data",
"string",
str(json.dumps(self.build_data)),
)
def get_root_jnt_embbeded(self):
"""Get the root joint to embbed the data
Returns:
pyNode: Joint
"""
j_name = self.options["data_collector_embedded_custom_joint"]
if j_name:
try:
return pm.PyNode(j_name)
except pm.MayaNodeError:
pm.displayError("{} doesn't exist or is not unique".format(j_name))
def addCtl(self, parent, name, m, color, iconShape, **kwargs):
"""Create the control and apply the shape, if this is alrealdy stored
in the guide controllers grp.
Args:
parent (dagNode): The control parent
name (str): The control name.
m (matrix): The transfromation matrix for the control.
color (int or list of float): The color for the control in index
or RGB.
iconShape (str): The controls default shape.
kwargs (variant): Other arguments for the iconShape type variations
Returns:
dagNode: The Control.
"""
if "degree" not in kwargs.keys():
kwargs["degree"] = 1
bufferName = name + "_controlBuffer"
if bufferName in self.guide.controllers.keys():
ctl_ref = self.guide.controllers[bufferName]
ctl = primitive.addTransform(parent, name, m)
for shape in ctl_ref.getShapes():
ctl.addChild(shape, shape=True, add=True)
pm.rename(shape, name + "Shape")
else:
ctl = icon.create(parent, name, m, color, iconShape, **kwargs)
self.addToGroup(ctl, "controllers")
# Set the control shapes isHistoricallyInteresting
for oShape in ctl.getShapes():
oShape.isHistoricallyInteresting.set(False)
# connecting the always draw shapes on top to global attribute
if versions.current() >= 20220000:
pm.connectAttr(self.ctlXRay_att, oShape.attr("alwaysDrawOnTop"))
# set controller tag
if versions.current() >= 201650:
pm.controller(ctl)
self.add_controller_tag(ctl, None)
attribute.addAttribute(ctl, "isCtl", "bool", keyable=False)
attribute.addAttribute(
ctl, "ctl_role", "string", keyable=False, value="world_ctl"
)
return ctl
def addToGroup(self, objects, names=["hidden"]):
"""Add the object in a collection for later group creation.
Args:
objects (dagNode or list of dagNode): Object to put in the group.
names (str or list of str): Names of the groups to create.
"""
if not isinstance(names, list):
names = [names]
if not isinstance(objects, list):
objects = [objects]
for name in names:
if name not in self.groups.keys():
self.groups[name] = []
self.groups[name].extend(objects)
def addToSubGroup(self, subGroups, parentGroups=["hidden"]):
"""Add the object in a collection for later SubGroup creation.
Args:
subGroups (dagNode or list of dagNode): Groups (core set) to add
as a Subgroup.
namparentGroupses (str or list of str): Names of the parent groups
to create.
"""
if not isinstance(parentGroups, list):
parentGroups = [parentGroups]
if not isinstance(subGroups, list):
subGroups = [subGroups]
for pg in parentGroups:
if pg not in self.subGroups.keys():
self.subGroups[pg] = []
self.subGroups[pg].extend(subGroups)
def add_controller_tag(self, ctl, tagParent):
ctt = node.add_controller_tag(ctl, tagParent)
if ctt:
ni = attribute.get_next_available_index(self.model.rigCtlTags)
pm.connectAttr(
ctt.message, self.model.attr("rigCtlTags[{}]".format(str(ni)))
)
def getLocalName(self, guideName):
"""This function return the local name, cutting the Maya fullname
and taking the latest part.
ie. "parentA|parentB|arm_C0_root" will return "arm_C0_root"
Args:
guideName (str): The guide name.
Returns:
str: The local Name
"""
if guideName is None:
return None
localName = guideName.split("|")[-1]
return localName
def getComponentName(self, guideName, local=True):
"""
This function return the component name
ie. "arm_C0_root" return "arm_C0"
Args:
guideName (str): The guide name.
Returns:
str: The compnent Name
"""
if guideName is None:
return None
if local:
guideName = self.getLocalName(guideName)
names = naming.get_component_and_relative_name(guideName)
if names:
return names[0]
def getRelativeName(self, guideName):
"""This function return the name of the relative in the guide
ie. "arm_C0_root" return "root"
Args:
guideName (str): The guide name.
Returns:
str: The relative Name
"""
if guideName is None:
return None
localName = self.getLocalName(guideName)
names = naming.get_component_and_relative_name(localName)
if names:
return names[1]
def findRelative(self, guideName, relatives_map={}):
"""Return the objects in the rig matching the guide object.
Args:
guideName (str): Name of the guide object.
relatives_map (dict, optional): Custom relative mapping to
point any object in a component. For example used to point
Auto in upvector reference.
Returns:
transform: The relative object
"""
if guideName is None:
return self.global_ctl
if guideName in relatives_map.keys():
return relatives_map[guideName]
comp_name = self.getComponentName(guideName)
relative_name = self.getRelativeName(guideName)
if comp_name not in self.components.keys():
return self.global_ctl
return self.components[comp_name].getRelation(relative_name)
def findControlRelative(self, guideName):
"""Return the control objects in the rig matching the guide object.
Args:
guideName (str): Name of the guide object.
Returns:
transform: The relative control object
"""
if guideName is None:
return self.global_ctl
# localName = self.getLocalName(guideName)
comp_name = self.getComponentName(guideName)
relative_name = self.getRelativeName(guideName)
if comp_name not in self.components.keys():
return self.global_ctl
return self.components[comp_name].getControlRelation(relative_name)
# TODO: update findComponent and other find methods with new funtions like
# comp_name and others. Better composability
def findComponent(self, guideName):
"""Return the component from a guide Name.
Args:
guideName (str): Name of the guide object.
Returns:
transform: The component
"""
if guideName is None:
return None
comp_name = self.getComponentName(guideName, False)
# comp_name = "_".join(guideName.split("_")[:2])
if comp_name not in self.components.keys():
return None
return self.components[comp_name]
def findUIHost(self, guideName):
"""Return the UI host of the compoent
Args:
guideName (str): Name of the guide object.
Returns:
transform: The relative object
"""
if guideName is None:
return self.ui
comp_name = self.getComponentName(guideName, False)
# comp_name = "_".join(guideName.split("_")[:2])
if comp_name not in self.components.keys():
return self.ui
if self.components[comp_name].ui is None:
self.components[comp_name].ui = pm.UIHost(self.components[comp_name].root)
return self.components[comp_name].ui
|
mgear-dev/mgear4
|
release/scripts/mgear/shifter/__init__.py
|
__init__.py
|
py
| 31,325 |
python
|
en
|
code
| 209 |
github-code
|
6
|
75126804667
|
from mock import Mock
import pytest
import json
from datetime import date, datetime
from src.smallquery.functions.run_query import app
@pytest.fixture()
def run_query_event():
"""
Generates Run Query event
"""
return {
'query': 'select * from unit/test.parquet',
'limit': 10,
}
def test_results_serializer():
a_date = date(year=2020, month=11, day=10)
a_date_time = datetime(year=2021, month=6, day=24,
hour=13, minute=3, second=12, microsecond=2323)
a_str = 'ksjdf'
a_int = 78
data = {
'some_date': a_date,
'some_date_time': a_date_time,
'some_string': a_str,
'some_int': a_int,
}
expected_json = '{"some_date": "2020-11-10", "some_date_time": "2021-06-24T13:03:12.002323", "some_string": "ksjdf", "some_int": 78}'
actual_json = json.dumps(data, default=app.results_serializer)
assert expected_json == actual_json
def test_handler(run_query_event):
ensure_db_connected_mock = Mock()
run_query_mock = Mock()
app.ensure_db_connected = ensure_db_connected_mock
app.run_query = run_query_mock
app.lambda_handler(run_query_event, None)
assert ensure_db_connected_mock.call_count == 1
assert run_query_mock.call_count == 1
|
nxn128/serverless-query
|
test/test_run_query.py
|
test_run_query.py
|
py
| 1,290 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71184254909
|
#!/usr/bin/python
class podcasts(object):
__tablename__ = 'podcasts'
def __init__(self):
self.castid = 0
self.castname = ''
self.feedurl = ''
self.pcenabled = 1
self.lastupdate = 0
self.lastattempt = 0
self.failedattempts = 0
class episodes(object):
__tablename__ = 'episodes'
def __init__(self):
self.castid = 0
self.episodeid = 0
self.title = ''
self.epurl = ''
self.enctype = ''
self.status = ''
self.eplength = 0
self.epfirstattempt = 0
self.eplastattempt = 0
self.epfailedattempts = 0
self.epguid = ''
|
ddboline/programming_tests
|
sqlalchemy/hpodder.py
|
hpodder.py
|
py
| 679 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43633170723
|
from __future__ import absolute_import
import logging
import math
#typing
import torch
import torch.nn.functional as F
#overrides
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules.token_embedders import Embedding
from allennlp.modules import FeedForward
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder, SpanPruner
from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import MentionRecall, ConllCorefScores
try:
from itertools import izip
except:
izip = zip
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class CoreferenceResolver(Model):
u"""
This ``Model`` implements the coreference resolution model described "End-to-end Neural
Coreference Resolution"
<https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83>
by Lee et al., 2017.
The basic outline of this model is to get an embedded representation of each span in the
document. These span representations are scored and used to prune away spans that are unlikely
to occur in a coreference cluster. For the remaining spans, the model decides which antecedent
span (if any) they are coreferent with. The resulting coreference links, after applying
transitivity, imply a clustering of the spans in the document.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``text`` ``TextField`` we get as input to the model.
context_layer : ``Seq2SeqEncoder``
This layer incorporates contextual information for each word in the document.
mention_feedforward : ``FeedForward``
This feedforward network is applied to the span representations which is then scored
by a linear layer.
antecedent_feedforward: ``FeedForward``
This feedforward network is applied to pairs of span representation, along with any
pairwise features, which is then scored by a linear layer.
feature_size: ``int``
The embedding size for all the embedded features, such as distances or span widths.
max_span_width: ``int``
The maximum width of candidate spans.
spans_per_word: float, required.
A multiplier between zero and one which controls what percentage of candidate mention
spans we retain with respect to the number of words in the document.
max_antecedents: int, required.
For each mention which survives the pruning stage, we consider this many antecedents.
lexical_dropout: ``int``
The probability of dropping out dimensions of the embedded text.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self,
vocab ,
text_field_embedder ,
context_layer ,
mention_feedforward ,
antecedent_feedforward ,
feature_size ,
max_span_width ,
spans_per_word ,
max_antecedents ,
lexical_dropout = 0.2,
initializer = InitializerApplicator(),
regularizer = None) :
super(CoreferenceResolver, self).__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._context_layer = context_layer
self._antecedent_feedforward = TimeDistributed(antecedent_feedforward)
feedforward_scorer = torch.nn.Sequential(
TimeDistributed(mention_feedforward),
TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1)))
self._mention_pruner = SpanPruner(feedforward_scorer)
self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1))
self._endpoint_span_extractor = EndpointSpanExtractor(context_layer.get_output_dim(),
combination=u"x,y",
num_width_embeddings=max_span_width,
span_width_embedding_dim=feature_size,
bucket_widths=False)
self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=text_field_embedder.get_output_dim())
# 10 possible distance buckets.
self._num_distance_buckets = 10
self._distance_embedding = Embedding(self._num_distance_buckets, feature_size)
self._max_span_width = max_span_width
self._spans_per_word = spans_per_word
self._max_antecedents = max_antecedents
self._mention_recall = MentionRecall()
self._conll_coref_scores = ConllCorefScores()
if lexical_dropout > 0:
self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout)
else:
self._lexical_dropout = lambda x: x
initializer(self)
#overrides
def forward(self, # type: ignore
text ,
spans ,
span_labels = None,
metadata = None) :
# pylint: disable=arguments-differ
u"""
Parameters
----------
text : ``Dict[str, torch.LongTensor]``, required.
The output of a ``TextField`` representing the text of
the document.
spans : ``torch.IntTensor``, required.
A tensor of shape (batch_size, num_spans, 2), representing the inclusive start and end
indices of candidate spans for mentions. Comes from a ``ListField[SpanField]`` of
indices into the text of the document.
span_labels : ``torch.IntTensor``, optional (default = None)
A tensor of shape (batch_size, num_spans), representing the cluster ids
of each span, or -1 for those which do not appear in any clusters.
Returns
-------
An output dictionary consisting of:
top_spans : ``torch.IntTensor``
A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing
the start and end word indices of the top spans that survived the pruning stage.
antecedent_indices : ``torch.IntTensor``
A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span
the index (with respect to top_spans) of the possible antecedents the model considered.
predicted_antecedents : ``torch.IntTensor``
A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each top span, the
index (with respect to antecedent_indices) of the most likely antecedent. -1 means there
was no predicted link.
loss : ``torch.FloatTensor``, optional
A scalar loss to be optimised.
"""
# Shape: (batch_size, document_length, embedding_size)
text_embeddings = self._lexical_dropout(self._text_field_embedder(text))
document_length = text_embeddings.size(1)
num_spans = spans.size(1)
# Shape: (batch_size, document_length)
text_mask = util.get_text_field_mask(text).float()
# Shape: (batch_size, num_spans)
span_mask = (spans[:, :, 0] >= 0).squeeze(-1).float()
# SpanFields return -1 when they are used as padding. As we do
# some comparisons based on span widths when we attend over the
# span representations that we generate from these indices, we
# need them to be <= 0. This is only relevant in edge cases where
# the number of spans we consider after the pruning stage is >= the
# total number of spans, because in this case, it is possible we might
# consider a masked span.
# Shape: (batch_size, num_spans, 2)
spans = F.relu(spans.float()).long()
# Shape: (batch_size, document_length, encoding_dim)
contextualized_embeddings = self._context_layer(text_embeddings, text_mask)
# Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size)
endpoint_span_embeddings = self._endpoint_span_extractor(contextualized_embeddings, spans)
# Shape: (batch_size, num_spans, emebedding_size)
attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans)
# Shape: (batch_size, num_spans, emebedding_size + 2 * encoding_dim + feature_size)
span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)
# Prune based on mention scores.
num_spans_to_keep = int(math.floor(self._spans_per_word * document_length))
(top_span_embeddings, top_span_mask,
top_span_indices, top_span_mention_scores) = self._mention_pruner(span_embeddings,
span_mask,
num_spans_to_keep)
top_span_mask = top_span_mask.unsqueeze(-1)
# Shape: (batch_size * num_spans_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select spans for each element in the batch.
# This reformats the indices to take into account their
# index into the batch. We precompute this here to make
# the multiple calls to util.batched_index_select below more efficient.
flat_top_span_indices = util.flatten_and_batch_shift_indices(top_span_indices, num_spans)
# Compute final predictions for which spans to consider as mentions.
# Shape: (batch_size, num_spans_to_keep, 2)
top_spans = util.batched_index_select(spans,
top_span_indices,
flat_top_span_indices)
# Compute indices for antecedent spans to consider.
max_antecedents = min(self._max_antecedents, num_spans_to_keep)
# Now that we have our variables in terms of num_spans_to_keep, we need to
# compare span pairs to decide each span's antecedent. Each span can only
# have prior spans as antecedents, and we only consider up to max_antecedents
# prior spans. So the first thing we do is construct a matrix mapping a span's
# index to the indices of its allowed antecedents. Note that this is independent
# of the batch dimension - it's just a function of the span's position in
# top_spans. The spans are in document order, so we can just use the relative
# index of the spans to know which other spans are allowed antecedents.
# Once we have this matrix, we reformat our variables again to get embeddings
# for all valid antecedents for each span. This gives us variables with shapes
# like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which
# we can use to make coreference decisions between valid span pairs.
# Shapes:
# (num_spans_to_keep, max_antecedents),
# (1, max_antecedents),
# (1, num_spans_to_keep, max_antecedents)
valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask =\
self._generate_valid_antecedents(num_spans_to_keep, max_antecedents, util.get_device_of(text_mask))
# Select tensors relating to the antecedent spans.
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
candidate_antecedent_embeddings = util.flattened_index_select(top_span_embeddings,
valid_antecedent_indices)
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
candidate_antecedent_mention_scores = util.flattened_index_select(top_span_mention_scores,
valid_antecedent_indices).squeeze(-1)
# Compute antecedent scores.
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
span_pair_embeddings = self._compute_span_pair_embeddings(top_span_embeddings,
candidate_antecedent_embeddings,
valid_antecedent_offsets)
# Shape: (batch_size, num_spans_to_keep, 1 + max_antecedents)
coreference_scores = self._compute_coreference_scores(span_pair_embeddings,
top_span_mention_scores,
candidate_antecedent_mention_scores,
valid_antecedent_log_mask)
# We now have, for each span which survived the pruning stage,
# a predicted antecedent. This implies a clustering if we group
# mentions which refer to each other in a chain.
# Shape: (batch_size, num_spans_to_keep)
_, predicted_antecedents = coreference_scores.max(2)
# Subtract one here because index 0 is the "no antecedent" class,
# so this makes the indices line up with actual spans if the prediction
# is greater than -1.
predicted_antecedents -= 1
output_dict = {u"top_spans": top_spans,
u"antecedent_indices": valid_antecedent_indices,
u"predicted_antecedents": predicted_antecedents}
if span_labels is not None:
# Find the gold labels for the spans which we kept.
pruned_gold_labels = util.batched_index_select(span_labels.unsqueeze(-1),
top_span_indices,
flat_top_span_indices)
antecedent_labels = util.flattened_index_select(pruned_gold_labels,
valid_antecedent_indices).squeeze(-1)
antecedent_labels += valid_antecedent_log_mask.long()
# Compute labels.
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels,
antecedent_labels)
# Now, compute the loss using the negative marginal log-likelihood.
# This is equal to the log of the sum of the probabilities of all antecedent predictions
# that would be consistent with the data, in the sense that we are minimising, for a
# given span, the negative marginal log likelihood of all antecedents which are in the
# same gold cluster as the span we are currently considering. Each span i predicts a
# single antecedent j, but there might be several prior mentions k in the same
# coreference cluster that would be valid antecedents. Our loss is the sum of the
# probability assigned to all valid antecedents. This is a valid objective for
# clustering as we don't mind which antecedent is predicted, so long as they are in
# the same coreference cluster.
coreference_log_probs = util.last_dim_log_softmax(coreference_scores, top_span_mask)
correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log()
negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum()
self._mention_recall(top_spans, metadata)
self._conll_coref_scores(top_spans, valid_antecedent_indices, predicted_antecedents, metadata)
output_dict[u"loss"] = negative_marginal_log_likelihood
if metadata is not None:
output_dict[u"document"] = [x[u"original_text"] for x in metadata]
return output_dict
#overrides
def decode(self, output_dict ):
u"""
Converts the list of spans and predicted antecedent indices into clusters
of spans for each element in the batch.
Parameters
----------
output_dict : ``Dict[str, torch.Tensor]``, required.
The result of calling :func:`forward` on an instance or batch of instances.
Returns
-------
The same output dictionary, but with an additional ``clusters`` key:
clusters : ``List[List[List[Tuple[int, int]]]]``
A nested list, representing, for each instance in the batch, the list of clusters,
which are in turn comprised of a list of (start, end) inclusive spans into the
original document.
"""
# A tensor of shape (batch_size, num_spans_to_keep, 2), representing
# the start and end indices of each span.
batch_top_spans = output_dict[u"top_spans"].detach().cpu()
# A tensor of shape (batch_size, num_spans_to_keep) representing, for each span,
# the index into ``antecedent_indices`` which specifies the antecedent span. Additionally,
# the index can be -1, specifying that the span has no predicted antecedent.
batch_predicted_antecedents = output_dict[u"predicted_antecedents"].detach().cpu()
# A tensor of shape (num_spans_to_keep, max_antecedents), representing the indices
# of the predicted antecedents with respect to the 2nd dimension of ``batch_top_spans``
# for each antecedent we considered.
antecedent_indices = output_dict[u"antecedent_indices"].detach().cpu()
batch_clusters = []
# Calling zip() on two tensors results in an iterator over their
# first dimension. This is iterating over instances in the batch.
for top_spans, predicted_antecedents in izip(batch_top_spans, batch_predicted_antecedents):
spans_to_cluster_ids = {}
clusters = []
for i, (span, predicted_antecedent) in enumerate(izip(top_spans, predicted_antecedents)):
if predicted_antecedent < 0:
# We don't care about spans which are
# not co-referent with anything.
continue
# Find the right cluster to update with this span.
# To do this, we find the row in ``antecedent_indices``
# corresponding to this span we are considering.
# The predicted antecedent is then an index into this list
# of indices, denoting the span from ``top_spans`` which is the
# most likely antecedent.
predicted_index = antecedent_indices[i, predicted_antecedent]
antecedent_span = (top_spans[predicted_index, 0].item(),
top_spans[predicted_index, 1].item())
# Check if we've seen the span before.
if antecedent_span in spans_to_cluster_ids:
predicted_cluster_id = spans_to_cluster_ids[antecedent_span]
else:
# We start a new cluster.
predicted_cluster_id = len(clusters)
# Append a new cluster containing only this span.
clusters.append([antecedent_span])
# Record the new id of this span.
spans_to_cluster_ids[antecedent_span] = predicted_cluster_id
# Now add the span we are currently considering.
span_start, span_end = span[0].item(), span[1].item()
clusters[predicted_cluster_id].append((span_start, span_end))
spans_to_cluster_ids[(span_start, span_end)] = predicted_cluster_id
batch_clusters.append(clusters)
output_dict[u"clusters"] = batch_clusters
return output_dict
#overrides
def get_metrics(self, reset = False) :
mention_recall = self._mention_recall.get_metric(reset)
coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset)
return {u"coref_precision": coref_precision,
u"coref_recall": coref_recall,
u"coref_f1": coref_f1,
u"mention_recall": mention_recall}
@staticmethod
def _generate_valid_antecedents(num_spans_to_keep ,
max_antecedents ,
device ):
u"""
This method generates possible antecedents per span which survived the pruning
stage. This procedure is `generic across the batch`. The reason this is the case is
that each span in a batch can be coreferent with any previous span, but here we
are computing the possible `indices` of these spans. So, regardless of the batch,
the 1st span _cannot_ have any antecedents, because there are none to select from.
Similarly, each element can only predict previous spans, so this returns a matrix
of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal to
(i - 1) - j if j <= i, or zero otherwise.
Parameters
----------
num_spans_to_keep : ``int``, required.
The number of spans that were kept while pruning.
max_antecedents : ``int``, required.
The maximum number of antecedent spans to consider for every span.
device: ``int``, required.
The CUDA device to use.
Returns
-------
valid_antecedent_indices : ``torch.IntTensor``
The indices of every antecedent to consider with respect to the top k spans.
Has shape ``(num_spans_to_keep, max_antecedents)``.
valid_antecedent_offsets : ``torch.IntTensor``
The distance between the span and each of its antecedents in terms of the number
of considered spans (i.e not the word distance between the spans).
Has shape ``(1, max_antecedents)``.
valid_antecedent_log_mask : ``torch.FloatTensor``
The logged mask representing whether each antecedent span is valid. Required since
different spans have different numbers of valid antecedents. For example, the first
span in the document should have no valid antecedents.
Has shape ``(1, num_spans_to_keep, max_antecedents)``.
"""
# Shape: (num_spans_to_keep, 1)
target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1)
# Shape: (1, max_antecedents)
valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0)
# This is a broadcasted subtraction.
# Shape: (num_spans_to_keep, max_antecedents)
raw_antecedent_indices = target_indices - valid_antecedent_offsets
# In our matrix of indices, the upper triangular part will be negative
# because the offsets will be > the target indices. We want to mask these,
# because these are exactly the indices which we don't want to predict, per span.
# We're generating a logspace mask here because we will eventually create a
# distribution over these indices, so we need the 0 elements of the mask to be -inf
# in order to not mess up the normalisation of the distribution.
# Shape: (1, num_spans_to_keep, max_antecedents)
valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log()
# Shape: (num_spans_to_keep, max_antecedents)
valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long()
return valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask
def _compute_span_pair_embeddings(self,
top_span_embeddings ,
antecedent_embeddings ,
antecedent_offsets ):
u"""
Computes an embedding representation of pairs of spans for the pairwise scoring function
to consider. This includes both the original span representations, the element-wise
similarity of the span representations, and an embedding representation of the distance
between the two spans.
Parameters
----------
top_span_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the top spans. Has shape
(batch_size, num_spans_to_keep, embedding_size).
antecedent_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the antecedent spans we are considering
for each top span. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size).
antecedent_offsets : ``torch.IntTensor``, required.
The offsets between each top span and its antecedent spans in terms
of spans we are considering. Has shape (1, max_antecedents).
Returns
-------
span_pair_embeddings : ``torch.FloatTensor``
Embedding representation of the pair of spans to consider. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size)
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings)
# Shape: (1, max_antecedents, embedding_size)
antecedent_distance_embeddings = self._distance_embedding(
util.bucket_values(antecedent_offsets,
num_total_buckets=self._num_distance_buckets))
# Shape: (1, 1, max_antecedents, embedding_size)
antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0)
expanded_distance_embeddings_shape = (antecedent_embeddings.size(0),
antecedent_embeddings.size(1),
antecedent_embeddings.size(2),
antecedent_distance_embeddings.size(-1))
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape)
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
span_pair_embeddings = torch.cat([target_embeddings,
antecedent_embeddings,
antecedent_embeddings * target_embeddings,
antecedent_distance_embeddings], -1)
return span_pair_embeddings
@staticmethod
def _compute_antecedent_gold_labels(top_span_labels ,
antecedent_labels ):
u"""
Generates a binary indicator for every pair of spans. This label is one if and
only if the pair of spans belong to the same cluster. The labels are augmented
with a dummy antecedent at the zeroth position, which represents the prediction
that a span does not have any antecedent.
Parameters
----------
top_span_labels : ``torch.IntTensor``, required.
The cluster id label for every span. The id is arbitrary,
as we just care about the clustering. Has shape (batch_size, num_spans_to_keep).
antecedent_labels : ``torch.IntTensor``, required.
The cluster id label for every antecedent span. The id is arbitrary,
as we just care about the clustering. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
Returns
-------
pairwise_labels_with_dummy_label : ``torch.FloatTensor``
A binary tensor representing whether a given pair of spans belong to
the same cluster in the gold clustering.
Has shape (batch_size, num_spans_to_keep, max_antecedents + 1).
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
target_labels = top_span_labels.expand_as(antecedent_labels)
same_cluster_indicator = (target_labels == antecedent_labels).float()
non_dummy_indicator = (target_labels >= 0).float()
pairwise_labels = same_cluster_indicator * non_dummy_indicator
# Shape: (batch_size, num_spans_to_keep, 1)
dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True)
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
pairwise_labels_with_dummy_label = torch.cat([dummy_labels, pairwise_labels], -1)
return pairwise_labels_with_dummy_label
def _compute_coreference_scores(self,
pairwise_embeddings ,
top_span_mention_scores ,
antecedent_mention_scores ,
antecedent_log_mask ) :
u"""
Computes scores for every pair of spans. Additionally, a dummy label is included,
representing the decision that the span is not coreferent with anything. For the dummy
label, the score is always zero. For the true antecedent spans, the score consists of
the pairwise antecedent score and the unary mention scores for the span and its
antecedent. The factoring allows the model to blame many of the absent links on bad
spans, enabling the pruning strategy used in the forward pass.
Parameters
----------
pairwise_embeddings: ``torch.FloatTensor``, required.
Embedding representations of pairs of spans. Has shape
(batch_size, num_spans_to_keep, max_antecedents, encoding_dim)
top_span_mention_scores: ``torch.FloatTensor``, required.
Mention scores for every span. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
antecedent_mention_scores: ``torch.FloatTensor``, required.
Mention scores for every antecedent. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
antecedent_log_mask: ``torch.FloatTensor``, required.
The log of the mask for valid antecedents.
Returns
-------
coreference_scores: ``torch.FloatTensor``
A tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1),
representing the unormalised score for each (span, antecedent) pair
we considered.
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
antecedent_scores = self._antecedent_scorer(
self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1)
antecedent_scores += top_span_mention_scores + antecedent_mention_scores
antecedent_scores += antecedent_log_mask
# Shape: (batch_size, num_spans_to_keep, 1)
shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1]
dummy_scores = antecedent_scores.new_zeros(*shape)
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1)
return coreference_scores
CoreferenceResolver = Model.register(u"coref")(CoreferenceResolver)
|
plasticityai/magnitude
|
pymagnitude/third_party/allennlp/models/coreference_resolution/coref.py
|
coref.py
|
py
| 32,507 |
python
|
en
|
code
| 1,607 |
github-code
|
6
|
33639465646
|
import concurrent.futures
import time
def do_something(seconds):
print (f'休眠{seconds}秒')
time.sleep(seconds)
return 'test'
start_time = time.perf_counter()
with concurrent.futures.ThreadPoolExecutor() as executor:
#用list comprehension创建10个future objects并运行,到这里10个并发任务就完成了,下面调用results()的代码为可选内容
results = [executor.submit(do_something, 1) for _ in range(10)]
#用as_completed来遍历所有的futures,并用result()打印出每个任务返回的结果
for f in concurrent.futures.as_completed(results):
print (f.result())
end_time = time.perf_counter()-start_time
print (f'总共耗时{round(end_time,2)}秒')
|
nnzmj/ParryWang
|
Multi_Threading/多线程演示(ThreadPoolExecutor-2).py
|
多线程演示(ThreadPoolExecutor-2).py
|
py
| 744 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17168418038
|
import json
import re
from urllib import request
import requests
from panopto_oauth2 import PanoptoOAuth2
server = "sph.hosted.panopto.com"
client_id = "29bd20b2-fd78-4bdd-9c40-af7a0133c139"
client_secret = "oZVXzyYlRQun/+xrxaItsdSDm1n7Np6rNqlmjHjgcyQ="
def read_coursera_to_time_sentence(input_path, video_id):
with open(input_path) as f:
lines = f.readlines()
if not lines:
print("{} has caption url but doesn't have caption".format(video_id))
index = 1
start, timestamp, sen, sen_list, time_list = True, False, "", [], []
for line in lines:
if line == "{}\n".format(index):
if index > 1:
sen_list.append(sen.replace("\n", " ").strip())
index += 1
sen = ""
timestamp = True
elif timestamp:
time_list.append(line.replace("\n", ""))
timestamp = False
else:
sen += line
sen_list.append(sen.replace("\n", " ").strip())
return time_list, sen_list
def convert_timestamp_to_sec(timestamp):
timestamp_split = timestamp.split(":")
timestamp_second = int(timestamp_split[0])*3600+int(timestamp_split[1])*60+float(timestamp_split[2].replace(",", "."))
return timestamp_second
def convert_time_list_to_seconds(time_list):
time_list_second = []
for i in range(len(time_list)):
start_time = time_list[i].split(" --> ")[0]
end_time = time_list[i].split(" --> ")[1]
start_time_sec = convert_timestamp_to_sec(start_time)
end_time_sec = convert_timestamp_to_sec(end_time)
time_list_second.append([start_time_sec, end_time_sec])
return time_list_second
def generate_word_list(time_list_second, sen_list):
word_dict, word_list = {}, []
for i in range(len(time_list_second)):
start_time, end_time = time_list_second[i]
sen = sen_list[i]
# split the sentence
sen_split = re.sub(r'[^\w\s]', '', sen_list[0])
sen_split = sen.split(" ")
# sen_split: ['Hi', 'everyone', 'Welcome', 'to']
# sen: 'Hi, everyone. Welcome to'
# start_time, end_time: [10.29, 12.94]
# start assigning each timestamp to each word
## c_index: iterate sen
## w_index: iterate word
w_index, c_index = 0, 0
while c_index < len(sen):
if sen[c_index: (c_index + len(sen_split[w_index]))] == sen_split[w_index]:
time_for_each_word = (end_time - start_time) / len(sen)
word_start = round(c_index * time_for_each_word + start_time, 2)
word_end = round(word_start + len(sen_split[w_index]) * time_for_each_word, 2)
word_dict['word'] = sen_split[w_index]
word_dict['start_time'] = word_start
word_dict['end_time'] = word_end
word_list.append(word_dict)
word_dict = {}
c_index += len(sen_split[w_index])
w_index += 1
else:
c_index += 1
return word_list
def generate_output_dictionary(sen_list, word_list):
full_transcript = ""
for sen in sen_list:
full_transcript += sen + " "
full_transcript = full_transcript.strip()
output_dict = dict()
output_dict['timedtext'] = word_list
output_dict['full_transcript'] = full_transcript
return output_dict
def output_json(output_dict):
with open("output_with_caption/output.json", 'w', encoding="utf-8") as file_obj:
json.dump(output_dict, file_obj, indent=2)
def main():
with open("output_with_caption_url/output.json") as json_file:
videos = json.load(json_file)
video_list = []
count = 1
for i in range(len(videos['Results'])):
video = videos['Results'][i]
url = video['Urls']['CaptionDownloadUrl']
if url is not None:
print("================={}=================".format(i))
count += 1
video_dict = video.copy()
time_list, sen_list = read_coursera_to_time_sentence("caption/" + video['Id'] + ".txt", video['Id'])
time_list_second = convert_time_list_to_seconds(time_list)
word_list = generate_word_list(time_list_second, sen_list)
caption_dict = generate_output_dictionary(sen_list, word_list)
video_dict['caption'] = caption_dict
video_list.append(video_dict)
# Just convert one caption
# To convert all captions, please comment the following if statement
if count == 2:
break
print("================")
print(len(video_list))
output_dict = {"Results": video_list}
output_json(output_dict)
if __name__ == '__main__':
main()
|
Zhou-Xun/panopto_video_extraction
|
convert_caption_url.py
|
convert_caption_url.py
|
py
| 4,773 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6015616240
|
#!/usr/bin/env python
#coding: utf-8
import asyncio
import yaml
import sys
from NetDevices import DeviceHandler
from git import Repo
import time
FILEPATH = "/root/device_cfg/"
async def get_config(device):
hostname = device.get("hostname")
conn = DeviceHandler(device)
conn.connect()
await conn.login()
r = await conn.get_config()
file_name = FILEPATH + hostname
open(file_name, "w").write(r[1])
print("%s is saved" %file_name)
deviceinfos = {}
try:
yaml_cfg = sys.argv[1]
except IndexError:
print("please give yaml configure file")
sys.exit(1)
f = open(yaml_cfg)
deviceinfos = yaml.load(f.read())
loop = asyncio.get_event_loop()
tasks = []
for device in deviceinfos.get("devices"):
tasks.append(loop.create_task(get_config(device)))
loop.run_until_complete(asyncio.wait(tasks))
localtime = time.asctime(time.localtime(time.time()))
repo = Repo(FILEPATH)
repo.index.add("*")
repo.index.commit("Configs auto saving at %s" %localtime)
|
netdevops-engineer/newbie_book
|
Chapter13/Chapter13/Device.asyncio/Device6.py
|
Device6.py
|
py
| 989 |
python
|
en
|
code
| 36 |
github-code
|
6
|
15317442487
|
#!/usr/local/bin/python -tt
import sys
import os
import random
from copy import deepcopy
class Blackjack():
def __init__(self,coins):
self.player_coins = coins
self.player = 'challenger'
self.player_cards = 0
self.dealer_cards = 0
self.cards = []
self.hands = []
def new_deck(self,):
self.player = 'challenger'
self.player_cards = 0
self.dealer_cards = 0
cards = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
hands = [4]*13
self.cards = deepcopy(cards)
self.hands = deepcopy(hands)
def players_input(self, player):
while True:
try:
coins = int(input('Player please place your bet:\n'))
except TypeError:
print('Please enter only realnumbers\n')
continue
else:
if coins > self.player_coins:
print(f'You are low on funds !!! Bet below: {self.player_coins}')
continue
self.player_coins -= coins
break
def shuffle_cards(self):
while True:
num = random.randint(0,12)
if self.hands[num] == 0:
continue
else:
self.hands[num] -= 1
switcher={
1:11,
11:10,
12:10,
13:10
}
print(f'card : {self.cards[num]}')
return switcher.get(self.cards[num], self.cards[num])
def first_deal(self):
print(f'Dealer\'s cards:\n')
card1 = self.shuffle_cards()
card2 = self.shuffle_cards()
if card1 == card2 == 11:
card2 = 1
self.dealer_cards = card1 + card2
print(f'Dealer\'s Cards show: {self.dealer_cards}\n')
print(f'Players\'s cards:\n')
card1 = self.shuffle_cards()
card2 = self.shuffle_cards()
if card1 == card2 == 11:
card2 = 1
self.player_cards = card1 + card2
print(f'Player\'s Cards show: {self.player_cards}\n')
def deal_cards(self, player):
if not self.dealer_cards:
self.first_deal()
return
card = self.shuffle_cards()
if player == 'challenger':
print(f'Players\'s card:\n')
if card == 11 and self.player_cards > 10:
card = 1
self.player_cards += card
print(f'Player\'s Cards show: {self.player_cards}\n')
else:
print(f'Dealer\'s card:\n')
if card == 11 and self.dealer_cards > 10:
card = 1
self.dealer_cards += card
print(f'Dealer\'s Cards show: {self.dealer_cards}\n')
def bust(self, player):
if player == 'challenger' and self.player_cards > 21:
print('Player Busted!!! and Dealer Won the Game !!!\n')
return True
elif player == 'dealer' and self.dealer_cards > 21:
print('Dealer Busted!!! and Player Won the Game !!!\n')
return True
return False
def winnner(self, player):
if player == 'challenger' and self.player_cards == 21:
print('BlackJack:\nPlayer Won the Game !!!\n')
return True
elif player == 'dealer' and self.dealer_cards == 21:
print('BlackJack:\nDealer Won the Game !!!\n')
return True
return False
def bet(self, player, amount):
pass
def replay(self):
play = input('Try Your Luck Again ? Enter "yes" or "no"\n')
if play == 'yes':
self.new_deck()
return True
return False
def main():
coins = int(input('Player enter the no of chips you want to buy:\n'))
bjack = Blackjack(coins)
bjack.new_deck()
bjack.players_input(bjack.player)
while True:
bjack.deal_cards(bjack.player)
if bjack.winnner(bjack.player) or bjack.bust(bjack.player):
if bjack.replay():
continue
break
while True:
next_deal = input(f'{bjack.player} Choose your next move:\n Enter "hit" for deal or "stand" for hold\n')
if next_deal == 'hit':
break
elif next_deal == 'stand':
if bjack.player == 'challenger':
bjack.player = 'dealer'
else:
bjack.player = 'challenger'
continue
else:
print('Please enter a valid input!:\n')
if __name__ == '__main__':
main()
|
Pankaj-Ra/Basic_Python_programs
|
google-python-exercises/blackjack.py
|
blackjack.py
|
py
| 4,643 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34075662644
|
import random
import sys
from UI import Ui_MainWindow
from PyQt5.QtCore import QPoint, QRect
from PyQt5.QtGui import QPainter, QColor
from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow
MAX_X = MAX_Y = 400
class MyWidget(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.circles = []
self.setupUi(self)
self.pushButton.clicked.connect(self.add_circle)
def add_circle(self):
size = random.randint(0, min(MAX_X, MAX_Y))
rx, ly = random.randint(size, MAX_X), random.randint(size, MAX_Y)
self.circles.append((QRect(QPoint(rx - size, ly - size), QPoint(rx, ly)),
QColor(random.randrange(256),
random.randrange(256),
random.randrange(256))))
self.update()
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
self.draw(qp)
qp.end()
def draw(self, qp):
for rect, color in self.circles:
qp.setPen(color)
qp.drawEllipse(rect)
app = QApplication(sys.argv)
ex = MyWidget()
ex.show()
sys.exit(app.exec_())
|
Orisphera/random-circles
|
main.py
|
main.py
|
py
| 1,188 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3081253994
|
# -*- coding: utf-8 -*
"""Input
.. module:: input
:synopsis: Module for processing input
"""
import resoncalc.output as output
import resoncalc.detection as detection
from argparse import ArgumentParser
from os import path
from json import load, JSONDecodeError
def process_command():
"""Process command
usage: resoncalc [-h] [-o OUTPUT] [-v] [-s] [-g] [-t TITLE] input
Calculate eigenstates for potential
positional arguments:
input input file
options:
-h, --help show this help message and exit
-o OUTPUT, --output OUTPUT
output directory
-v, --verbose verbose mode
-s, --silent silent mode
-g, --generate generate graphs from data
-t, --title output title, used for generate
"""
# command options
parser = ArgumentParser(description='Calculate eigenstates for potential')
parser.add_argument('string', metavar='input', help='input file')
parser.add_argument('-o', '--output', help='output directory')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose mode')
parser.add_argument('-s', '--silent', action='store_true', help='silent mode')
parser.add_argument('-g', '--generate', action='store_true', help='generate graphs from data')
parser.add_argument('-t', '--title', help='output title, used for generate')
args = parser.parse_args()
# verbose
verbose = args.verbose
if (verbose):
output.log_level = 2
# silent
silent = args.silent
if (silent):
output.stdout = False
# generate
generate = args.generate
# title
title = args.title
# output
outdir = args.output
if (outdir is not None):
if (path.exists(outdir)):
output.outdir = outdir
else:
print('Output directory {0} not found'.format(outdir))
return -1
# input
infile = args.string
tests = None
if (path.exists(infile)):
if (generate):
output.generate_graphs(infile, title)
return 0
else:
tests = load_input(infile)
else:
print('Input file {0} not found'.format(infile))
return -1
# test processing
if (tests is not None):
try:
process_tests(tests)
return 0
except KeyboardInterrupt as ex:
print('Program terminated by user')
return -1
def load_input(fname):
"""Load input tests file
Args:
fname (str): input filename
Returns:
list: tests
"""
try:
with open(fname, 'r') as f:
tests = load(f)
if (type(tests) is not list):
tests = [tests]
return tests
except JSONDecodeError as ex:
print('Failed to parse input file {0}: {1}'.format(fname, ex))
return None
def process_tests(tests):
"""Process tests
Args:
tests (list): tests configuration
"""
for test in tests:
detection.perform_detection_loop(test)
|
hydratk/resoncalc
|
src/resoncalc/input.py
|
input.py
|
py
| 3,159 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28616374779
|
def main():
# establish connections to IO files
input_file = open('test2.txt', 'r')
output_file = open('output2.txt', 'w')
# initialize variables
average = 0
count = 0
# compute average
for line in input_file:
# int ignores whitespace and \n's
average = average + int(line)
count = count + 1
average = average / count
# save average to file
output_file.write(str(average))
input_file.close()
output_file.close()
main()
|
MasterNobikan/First_Python_Scripts
|
Read_Calculate.py
|
Read_Calculate.py
|
py
| 506 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7318961586
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^modules/$', views.modules, name='modules'),
url(r'^module/(?P<module>\w+)/list$', views.module_list, name='module_list'),
url(r'^module/(?P<module>\w+)/detail/(?P<id>[\w-]+)/$',
views.module_detail, name='module_detail'),
url(r'^module/(?P<module>\w+)/edit/(?P<id>[\w-]+)/$',
views.module_edit, name='module_edit'),
url(r'^module/(?P<module>\w+)/create/$',
views.module_create, name='module_create'),
url(r'^module/(?P<module>\w+)/remove/$',
views.module_remove_record, name='module_remove_record'),
url(r'^layouts/$', views.edit_layouts, name='edit_layouts'),
url(r'^user_records/(?P<module>\w+)/$',
views.user_records, name='user_records'),
url(r'^layout/list/(?P<module>\w+)/$',
views.edit_list_layout, name='edit_list_layout'),
url(r'^layout/filter/(?P<module>\w+)/$',
views.edit_filter_layout, name='edit_filter_layout'),
url(r'^layout/detail/(?P<module>\w+)/$',
views.edit_detail_layout, name='edit_detail_layout'),
url(r'^layout/edit/(?P<module>\w+)/$',
views.edit_edit_layout, name='edit_edit_layout'),
url(r'^layout/create/(?P<module>\w+)/$',
views.edit_create_layout, name='edit_create_layout'),
url(r'^roles/$', views.edit_roles, name='edit_roles'),
url(r'^role/(?P<role>\w+)$', views.edit_role, name='edit_role'),
url(r'^roles/delete$', views.delete_role, name='delete_role'),
url(r'^roles/create$', views.create_role, name='create_role'),
url(r'^note_attachment/(?P<id>[\w-]+)/$',
views.note_attachment, name='note_attachment'),
url(r'^add_case_update/$', views.add_case_update, name='add_case_update'),
url(r'^close_case/$', views.close_case, name='close_case'),
url(r'^reopen_case/$', views.reopen_case, name='reopen_case'),
url(r'^users/$', views.edit_users, name='edit_users'),
url(r'^user/(?P<user_id>\d+)$', views.edit_user, name='edit_user'),
url(r'^user_profile/$', views.user_profile, name='user_profile'),
url(r'^cache/$', views.cache, name='cache'),
url(r'^pdf_templates/$', views.pdf_templates, name='pdf_templates'),
url(r'^get_pdf/(?P<module>\w+)/(?P<id>[\w-]+)/$',
views.get_pdf, name='get_pdf'),
url(r'^index.php$', views.crm_entry_point, name='crm_entry_point'),
]
|
sanchezfauste/bPortal
|
portal/urls.py
|
urls.py
|
py
| 2,424 |
python
|
en
|
code
| 1 |
github-code
|
6
|
8926398030
|
def cpn(a):
x=2
while x<a:
if a%x==0:
break
else:
x+=1
if x==a-1:
print(a)
def main():
c=1
n=int(input('ENTER NO. TILL WHICH YOU WANT PRIME NOS. : '))
while c<=n:
cpn(c)
c+=1
print('done')
input()
main()
|
adarsh2818/beginningwithpy
|
primenos list.py
|
primenos list.py
|
py
| 339 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28978147496
|
import tkinter as tk
import pygubu
import cv2
import copy
import numpy as np
class Application:
def __init__(self, master):
self.master = master
#create builder
self.builder = builder = pygubu.Builder()
#load ui file
builder.add_from_file('hw1.ui')
#create a widget
self.mainwindow = builder.get_object('window', master)
#connect callback
builder.connect_callbacks(self)
def btn_quit_on_click(self):
self.master.quit()
#button for problem 1.1
def btn_11_on_click(self):
#add your code here
img = cv2.imread('dog.bmp')
height, width, channels = img.shape
cv2.imshow('img1.1',img)
print('Height = '+str(height))
print('Width = '+str(width))
cv2.waitKey(0)
#button for problem 1.2
def btn_12_on_click(self):
#add your code here
img = cv2.imread('color.png')
'''img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imshow('img1.2',img)'''
for x in range(0,len(img)):
for y in range(0,len(img[x])):
a = img[x][y][0]
b = img[x][y][1]
c = img[x][y][2]
img[x][y][0] = b
img[x][y][1] = c
img[x][y][2] = a
cv2.imshow('img1.2',img)
cv2.waitKey(0)
#button for problem 1.3
def btn_13_on_click(self):
#add your code here
img = cv2.imread('dog.bmp')
height, width, channels = img.shape
new_img = copy.deepcopy(img)
for x in range(height):
for y in range(width):
new_img[x][width-1-y] = img[x][y]
cv2.imshow('img1.3',new_img)
cv2.waitKey(0)
#button for problem 1.4
def btn_14_on_click(self):
def callback(x):
pass
#add your code here
cv2.namedWindow('img1.4')
cv2.createTrackbar('BLEND','img1.4',0,100,callback)
cv2.createTrackbar('OFF','img1.4',0,1,callback)
img = cv2.imread('dog.bmp')
height, width, channels = img.shape
new_img = copy.deepcopy(img)
for x in range(height):
for y in range(width):
new_img[x][width-1-y] = img[x][y]
while(True):
off = cv2.getTrackbarPos('OFF','img1.4')
if(off == 1):
break
blend = cv2.getTrackbarPos('BLEND','img1.4')
blend = blend/100
img_mix = cv2.addWeighted(img, (1-blend), new_img, blend, 0)
cv2.imshow('img1.4',img_mix)
cv2.waitKey(1)
#button for problem 2.1
def btn_21_on_click(self):
#add your code here
img = cv2.imread('eye.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray_img2.1', img)
detect_img = cv2.Canny(img, 150, 300)
cv2.imshow('detect_img2.1', detect_img)
cv2.waitKey(0)
#button for problem 2.2
def btn_22_on_click(self):
#add your code here
img = cv2.imread('eye.jpg')
cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
draw_img = np.ones(img.shape, dtype=np.uint8)
# HoughCircles has Canny detector itself
circles = cv2.HoughCircles(cimg, cv2.HOUGH_GRADIENT, 1, 20,param1=300,param2=40,minRadius=10,maxRadius=50)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(draw_img,(i[0],i[1]),i[2],(0,0,255),2)
# draw the center of the circle
#cv2.circle(fimg,(i[0],i[1]),2,(0,0,255),3)
# get Canny result to draw (has the same Canny parameter with HoughCircles)
fimg = cv2.Canny(img,150,300)
fimg = cv2.cvtColor(fimg, cv2.COLOR_GRAY2RGB)
# combine draw and Canny result
mix_draw = cv2.addWeighted(draw_img, 1, fimg, 1, 0)
cv2.imshow('detected circles',mix_draw)
cv2.waitKey(0)
#button for problem 2.3
def btn_23_on_click(self):
#add your code here
cv2.waitKey(0)
#button for problem 3
def btn_3_on_click(self):
#add your code here
cv2.waitKey(0)
#button for problem 4.1
def btn_41_on_click(self):
#add your code here
img = cv2.imread('shoes.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
#cv2.imshow('img4.1_1',img)
blur = cv2.GaussianBlur(img,(5,5),0)
#cv2.imshow('img4.1_2',blur)
median = cv2.medianBlur(blur,5)
cv2.imshow('img4.1',median)
cv2.waitKey(0)
#button for problem 4.2
def btn_42_on_click(self):
#add your code here
img = cv2.imread('shoes.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
cv2.imshow('img4.2_local',img)
blur = cv2.GaussianBlur(img,(5,5),0)
cv2.imshow('img4.2_Gussian_smooth',blur)
median = cv2.medianBlur(blur,5)
cv2.imshow('img4.2_median_filter',median)
if __name__ == '__main__':
root = tk.Tk()
app = Application(root)
root.mainloop()
|
F74036378/IMAGE_DEAL1
|
hw1.py
|
hw1.py
|
py
| 4,397 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27262011000
|
from database import DBHelper
from photo_filter import PhotoFilter
from photo_dao import PhotoDao
def task_gen_photo_meta(root, query, do_save_meta):
print('generating photo meta for %s'%(query));
filter = PhotoFilter();
dbHelper = DBHelper();
dbHelper.init(root);
photos = filter.get_photo_with_tag_and_unique(query, dbHelper);
if (do_save_meta):
photo_dao = PhotoDao(dbHelper)
for photo in photos:
photo_dao.savePhotoMeta(query, photo);
photos = filter.filter_photo_without_tags(photos);
outputPath = ''.join([dbHelper.datasetDir, '/', query, '.txt']);
print(outputPath);
fout = open(outputPath, 'w');
for photo in photos:
fout.write(photo.photoId)
fout.write('\n')
fout.close();
|
Asperado/iconic
|
data_collection/database_builder/core/task_gen_photo_meta.py
|
task_gen_photo_meta.py
|
py
| 708 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29047649100
|
import atexit
import os
import re
import shutil
import subprocess
import sys
import tempfile
def usage():
sys.stderr.write('Usage: {} [-h] -d <directory> [-b <bitrate>]\n'.format(sys.argv[0]))
sys.stderr.write(' -h display help\n')
sys.stderr.write(' -b bitrate [32-320, default 192]\n')
sys.stderr.write(' -d the target directory\n')
sys.exit(1)
def get_track_file(path_prefix, track):
return "%s/%02d %s.mp3" % (path_prefix, int(track['track']), sanitize(track['title']))
def sanitize(s):
return s.replace("/", "-").replace(":", " -")
if __name__ == "__main__":
target_dir = None
bitrate = 192
argc = 1
while argc + 1 < len(sys.argv):
if sys.argv[argc] == "-h":
usage()
elif sys.argv[argc] == "-d":
target_dir = os.path.abspath(sys.argv[argc + 1])
argc += 2
elif sys.argv[argc] == "-b":
bitrate = int(sys.argv[argc + 1])
argc += 2
else:
break
if target_dir is None or argc < len(sys.argv):
usage()
if not os.path.isdir(target_dir):
sys.stderr.write("Directory '{}' doesn't exist\n".format(target_dir))
exit(1)
artist = None
title = None
year = None
genre = None
tracks = list()
work_dir = tempfile.mkdtemp()
os.chdir(work_dir)
atexit.register(shutil.rmtree, work_dir)
subprocess.check_call(["cdda2wav", "-alltracks", "-cddb", "1"])
for file in os.listdir(work_dir):
if file.endswith(".inf"):
path = os.path.join(work_dir, file)
with open(path, mode="r", encoding="iso8859-1") as f:
track = dict()
track['file'] = path[:-3] + "wav"
for line in f:
parts = re.split(r"\s*=\s*", line.rstrip(), 1)
if parts[0] == "Albumperformer" and artist is None:
artist = parts[1].rstrip("'").lstrip("'")
elif parts[0] == "Albumtitle" and title is None:
title = parts[1].rstrip("'").lstrip("'")
elif parts[0] == "Tracknumber":
track['track'] = parts[1]
elif parts[0] == "Tracktitle":
track['title'] = parts[1].rstrip("'").lstrip("'")
tracks.append(track)
cddb_file = os.path.join(work_dir, "audio.cddb")
if os.path.exists(cddb_file):
with open(cddb_file, mode="r", encoding="iso8859-1") as f:
for line in f:
parts = re.split(r"\s*=\s*", line.rstrip(), 1)
if parts[0] == "DYEAR":
year = parts[1]
elif parts[0] == "DGENRE":
genre = parts[1]
track_count = len(tracks)
if track_count == 0:
sys.stderr.write("No CDDB information available. Please process the files in {} manually\n".format(work_dir))
atexit.unregister(shutil.rmtree)
exit(1)
path_prefix = os.path.join(target_dir, sanitize(artist), sanitize(title))
os.makedirs(path_prefix, exist_ok=True)
album_args = ["--ta", artist, "--tl", title, "--ty", year, "--tg", genre]
for track in tracks:
subprocess.check_call(["lame", "-b", str(bitrate), "-B", str(bitrate), "--tt", track['title'], "--tn",
"{}/{}".format(track['track'], track_count)] + album_args +
[track['file'], get_track_file(path_prefix, track)])
|
eskaton/py-ripcd
|
ripcd.py
|
ripcd.py
|
py
| 3,527 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43987707276
|
# test CurlypivSetup
"""
Notes about program
"""
# 1.0 import modules
import numpy as np
# plotting
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import colors
import matplotlib.image as mgimg
from matplotlib import animation
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
matplotlib.rcParams['figure.figsize'] = (7, 6)
import matplotlib.font_manager as fm
fontprops = fm.FontProperties(size=16, weight='bold')
font = {'family' : 'sans-serif',
'weight' : 'light',
'size' : 16}
matplotlib.rc('font', **font)
matplotlib.rcParams['font.sans-serif'] = ['Helvetica']
# OpenPIV
# ----- imports for OpenPIV -----
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '/Users/mackenzie/PythonProjects/openpiv')
import openpiv.piv
from openpiv import windef
from openpiv.windef import Settings
# 2.0 define class
class CurlypivPIVSetup(object):
def __init__(self, name, save_text, save_plot, testCollection, testSetup,
win1=128, win2=64, show_plot=False, save_plot_path=None, save_text_path=None, vectors_on_image=True,
calculate_zeta=False, replace_Nans_with_zeros=True, save_u_mean_plot=False,
u_min=-40, u_max=40, v_min=-2.5, v_max=2.5):
"""
Notes
"""
# setup
self._name = name
self.save_text = save_text
self.save_plot = save_plot
self.save_u_mean_plot = save_u_mean_plot
self.save_text_path = save_text_path
self.save_plot_path = save_plot_path
self.show_plot = show_plot
self.calculate_zeta = calculate_zeta
# OpenPIV
self.settings = Settings()
# plotting
self.vectors_on_image = vectors_on_image
self.settings.scale_plot = 1
self.colorMap = 'plasma'
self.colorNorm = colors.Normalize(vmin=0, vmax=75)
self.alpha = 0.65
self.scalebar_microns = int(2500 / testSetup.optics.microscope.objective.magnification) # units are microns
self.dpi = 200
# camera
self.img_acq = testSetup.optics.microscope.ccd.img_acq_rate
self.dtf = 1/self.img_acq
self.pixels_to_microns = testSetup.optics.microscope.objective.pixel_to_micron
self.pix_per_um = 1/self.pixels_to_microns
# experimental
self.E_max = 10e3
self.particle_diameter = testSetup.optics.fluorescent_particles.diameter
self.est_zeta = testSetup.chip.channel.material_bottom_wall_surface.zeta
# scientific
self.epsr = 80
self.eps = self.epsr*8.854e-12
self.mu = testSetup.chip.channel.material_fluid.viscosity
# outputs
self.est_u_eof = self.eps*self.est_zeta*self.E_max/self.mu*1e6
self.char_u_eof = -self.est_u_eof*self.pix_per_um*self.dtf
self.char_u = int(np.round(self.char_u_eof))
# more OpenPIV
self.settings.correlation_method = 'linear'
self.settings.normalized_correlation = True
self.settings.deformation_method = 'symmetric' # 'symmetric' or 'second image'
self.settings.windowsizes = (win1, win2) # sizex//4, sizex//8 suggestion is these are power of 2 of each other
self.settings.overlap = (win1//2, win2//2) # should be 50%-75% of window size (Raffel)
self.settings.num_iterations = len(self.settings.windowsizes)
self.settings.subpixel_method = 'gaussian' # subpixel interpolation: 'gaussian','centroid','parabolic'
self.settings.interpolation_order = 3 # interpolation order for the window deformation (suggested: 3-5)
self.settings.scaling_factor = self.pix_per_um # scaling factor pixel/meter
self.settings.dt = self.dtf # time between to frames (in seconds)
self.settings.ROI = ('full')
# snr parameters
self.mask_first_pass = True # Mask first pass
self.mask_multi_pass = True
#self.settings.extract_sig2noise = True # Compute SNR for last pass / if False: SNR set to NaN in output txt.
self.settings.image_mask = True # Do image masking
self.settings.sig2noise_method = 'peak2peak' # Method to compute SNR: 'peak2peak' or 'peak2mean'
self.settings.sig2noise_mask = 3 # (2 - 5) exclusion distance between highest peak and second highest peak in correlation map
# min/max velocity vectors for validation
self.u_min = u_min # microns / second
self.u_max = u_max
self.v_min = v_min # microns / second
self.v_max = v_max
self.settings.MinMax_U_disp = (self.u_min * self.pix_per_um * self.dtf, self.u_max * self.pix_per_um * self.dtf) # filter u (units: pixels/frame)
self.settings.MinMax_V_disp = (self.v_min * self.pix_per_um * self.dtf, self.v_max * self.pix_per_um * self.dtf) # filter v (units: pixels/frame)
# vector validation
self.settings.validation_first_pass = True # Vector validation of first pass
self.u_uncertainty = 10 # if std(u)*2 < uncertainty: don't apply global std threshold
self.v_uncertainty = 10 # if std(v)*2 < uncertainty: don't apply global std threshold
self.settings.std_threshold = 2.75 # global std validation threshold: global mean +/- stdev * std_threshold
self.settings.median_threshold = 2.75 # median validation threshold
self.settings.median_size = 2 # defines the size of the local median kernel
self.settings.sig2noise_validate = True # Enables validation by SNR ratio
self.settings.sig2noise_threshold = 1.2 # [1.2-1.5] Sets snr threshold for removing vectors (R. D. Keane and R. J. Adrian, Measurement Science & Technology, 1990)
# outlier replacement and smoothing
self.settings.replace_vectors = True # Outlier replacement for last pass
self.replace_Nans_with_zeros = replace_Nans_with_zeros # Outlier replacement where all Nans = 0
self.settings.smoothn = False # Enables Garcia smoothing function of velocity fields
self.settings.smoothn_p = [0.01] # [0.5] Smoothing parameter or auto-calculated using generalized cross-validation (GCV) method
self.settings.filter_method = 'distance' # Replace outlier vector method: localmean [square] or disk (unweighted circle), distance (weighted circle)
self.settings.max_filter_iteration = 3 # maximum iterations performed to replace the outliers (max 10)
self.settings.filter_kernel_size = 2 # kernel size for replacing outlier vectors (default
self.settings._freeze()
# print PIV settings
print('Min/Max U-displacement: ', self.settings.MinMax_U_disp, ' (pixels/frame)')
print('Min/Max U-displacement: ', np.array([self.settings.MinMax_U_disp[0], self.settings.MinMax_U_disp[1]], dtype=int)*self.pixels_to_microns/self.dtf, ' (um/s)')
print('Min/Max V-displacement: ', self.settings.MinMax_V_disp, ' (pixels/frame)')
print('Min/Max V-displacement: ', np.array([self.settings.MinMax_V_disp[0],self.settings.MinMax_V_disp[1]], dtype=int)*self.pixels_to_microns/self.dtf, ' (um/s)')
|
sean-mackenzie/curlypiv
|
curlypiv/CurlypivPIVSetup.py
|
CurlypivPIVSetup.py
|
py
| 7,120 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38299570465
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def rotateRight(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:
if not head or not head.next or k == 0: return head
copy = head
length = 0
slow = head
while slow:
slow = slow.next
length += 1
k %= length
if k == 0:
return head
i = length - k
curr = head
for _ in range(i - 1):
curr = curr.next
next = curr.next
head = curr.next
curr.next = None
curr = head
for _ in range(k - 1):
curr = curr.next
curr.next = copy
return head
|
nanup/Data-Structures-And-Algorithms
|
61-rotate-list/61-rotate-list.py
|
61-rotate-list.py
|
py
| 811 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4592746162
|
__all__ = [
'Canceled',
'DuplicateDestinationError',
'Record',
'get_download_manager',
]
import os
import dbus
import logging
from collections import namedtuple
from io import StringIO
from pprint import pformat
try:
import pycurl
except ImportError: # pragma: no cover
pycurl = None
log = logging.getLogger('systemimage')
class Canceled(Exception):
"""Raised when the download was canceled."""
class DuplicateDestinationError(Exception):
"""Raised when two files are downloaded to the same destination."""
def __init__(self, duplicates):
super().__init__()
self.duplicates = duplicates
def __str__(self):
return '\n' + pformat(self.duplicates, indent=4, width=79)
# A namedtuple is convenient here since we want to access items by their
# attribute names. However, we also want to allow for the checksum to default
# to the empty string. We do this by creating a prototypical record type and
# using _replace() to replace non-default values. See the namedtuple
# documentation for details.
_Record = namedtuple('Record', 'url destination checksum')('', '', '')
_RecordType = type(_Record)
def Record(url, destination, checksum=''):
return _Record._replace(
url=url, destination=destination, checksum=checksum)
class DownloadManagerBase:
"""Base class for all download managers."""
def __init__(self):
"""
:param callback: If given, a function that is called every so often
during downloading.
:type callback: A function that takes two arguments, the number
of bytes received so far, and the total amount of bytes to be
downloaded.
"""
# This is a list of functions that are called every so often during
# downloading. Functions in this list take two arguments, the number
# of bytes received so far, and the total amount of bytes to be
# downloaded.
self.callbacks = []
self.total = 0
self.received = 0
self._queued_cancel = False
def __repr__(self): # pragma: no cover
return '<{} at 0x{:x}>'.format(self.__class__.__name__, id(self))
def _get_download_records(self, downloads):
"""Convert the downloads items to download records."""
records = [item if isinstance(item, _RecordType) else Record(*item)
for item in downloads]
destinations = set(record.destination for record in records)
# Check for duplicate destinations, specifically for a local file path
# coming from two different sources. It's okay if there are duplicate
# destination records in the download request, but each of those must
# be specified by the same source url and have the same checksum.
#
# An easy quick check just asks if the set of destinations is smaller
# than the total number of requested downloads. It can't be larger.
# If it *is* smaller, then there are some duplicates, however the
# duplicates may be legitimate, so look at the details.
#
# Note though that we cannot pass duplicates destinations to udm, so we
# have to filter out legitimate duplicates. That's fine since they
# really are pointing to the same file, and will end up in the
# destination location.
if len(destinations) < len(downloads):
by_destination = dict()
unique_downloads = set()
for record in records:
by_destination.setdefault(record.destination, set()).add(
record)
unique_downloads.add(record)
duplicates = []
for dst, seen in by_destination.items():
if len(seen) > 1:
# Tuples will look better in the pretty-printed output.
duplicates.append(
(dst, sorted(tuple(dup) for dup in seen)))
if len(duplicates) > 0:
raise DuplicateDestinationError(sorted(duplicates))
# Uniquify the downloads.
records = list(unique_downloads)
return records
def _do_callback(self):
# Be defensive, so yes, use a bare except. If an exception occurs in
# the callback, log it, but continue onward.
for callback in self.callbacks:
try:
callback(self.received, self.total)
except:
log.exception('Exception in progress callback')
def cancel(self):
"""Cancel any current downloads."""
self._queued_cancel = True
def pause(self):
"""Pause the download, but only if one is in progress."""
pass # pragma: no cover
def resume(self):
"""Resume the download, but only if one is in progress."""
pass # pragma: no cover
def _get_files(self, records, pausable, signal_started):
raise NotImplementedError # pragma: no cover
def get_files(self, downloads, *, pausable=False, signal_started=False):
"""Download a bunch of files concurrently.
Occasionally, the callback is called to report on progress.
This function blocks until all files have been downloaded or an
exception occurs. In the latter case, the download directory
will be cleared of the files that succeeded and the exception
will be re-raised.
This means that 1) the function blocks until all files are
downloaded, but at least we do that concurrently; 2) this is an
all-or-nothing function. Either you get all the requested files
or none of them.
:params downloads: A list of `download records`, each of which may
either be a 2-tuple where the first item is the url to download,
and the second item is the destination file, or an instance of a
`Record` namedtuple with attributes `url`, `destination`, and
`checksum`. The checksum may be the empty string.
:type downloads: List of 2-tuples or `Record`s.
:param pausable: A flag specifying whether this download can be paused
or not. In general, data file downloads are pausable, but
preliminary downloads are not.
:type pausable: bool
:param signal_started: A flag indicating whether the D-Bus
DownloadStarted signal should be sent once the download has
started. Normally this is False, but it should be set to True
when the update files are being downloaded (i.e. not for the
metadata files).
:type signal_started: bool
:raises: FileNotFoundError if any download error occurred. In
this case, all download files are deleted.
:raises: DuplicateDestinationError if more than one source url is
downloaded to the same destination file.
"""
if self._queued_cancel:
# A cancel is queued, so don't actually download anything.
raise Canceled
if len(downloads) == 0:
# Nothing to download. See LP: #1245597.
return
records = self._get_download_records(downloads)
# Better logging of the requested downloads. However, we want the
# entire block of multiline log output to appear under a single
# timestamp.
fp = StringIO()
print('[0x{:x}] Requesting group download:'.format(id(self)), file=fp)
for record in records:
if record.checksum == '':
print('\t{} -> {}'.format(*record[:2]), file=fp)
else:
print('\t{} [{}] -> {}'.format(*record), file=fp)
log.info('{}'.format(fp.getvalue()))
self._get_files(records, pausable, signal_started)
@staticmethod
def allow_gsm():
"""Allow downloads on GSM.
This is a temporary override for the `auto_download` setting.
If a download was attempted on wifi-only and not started because
the device is on GSM, calling this issues a temporary override
to allow downloads while on GSM, for download managers that
support this (currently only UDM).
"""
pass # pragma: no cover
def get_download_manager(*args):
# We have to avoid circular imports since both download managers import
# various things from this module.
from systemimage.curl import CurlDownloadManager
from systemimage.udm import DOWNLOADER_INTERFACE, UDMDownloadManager
# Detect if we have ubuntu-download-manager.
#
# Use PyCURL based downloader if no udm is found, or if the environment
# variable is set. However, if we're told to use PyCURL and it's
# unavailable, throw an exception.
cls = None
use_pycurl = os.environ.get('SYSTEMIMAGE_PYCURL')
if use_pycurl is None:
# Auto-detect. For backward compatibility, use udm if it's available,
# otherwise use PyCURL.
try:
bus = dbus.SystemBus()
bus.get_object(DOWNLOADER_INTERFACE, '/')
udm_available = True
except dbus.exceptions.DBusException:
udm_available = False
if udm_available:
cls = UDMDownloadManager
elif pycurl is None:
raise ImportError('No module named {}'.format('pycurl'))
else:
cls = CurlDownloadManager
else:
cls = (CurlDownloadManager
if use_pycurl.lower() in ('1', 'yes', 'true')
else UDMDownloadManager)
return cls(*args)
|
ubports/system-image
|
systemimage/download.py
|
download.py
|
py
| 9,745 |
python
|
en
|
code
| 2 |
github-code
|
6
|
43848174354
|
import warnings
from threading import Lock
from queue import Queue, PriorityQueue, Empty
class PubSubBase():
"""
Internal base class should not be instanced,
Please use classes PubSub and PubSubPriority
The algorithms for thread safe functionnalities were designed
by Zhen Wang : congratulation to him !
For limitations, see __init__() constructor parameters and default
values.
This class is based on thread-safe FIFO queue Python
implementation and was designed thread-safe by Zhen Wang.
"""
def __init__(self, max_queue_in_a_channel=100, max_id_4_a_channel=2**31):
"""
Create an object to be used as a communicator in a project
between publishers and subscribers
Optionals parameters :
- max_queue_in_a_channel : (be careful, modify if necessary)
- Maximum number of message in a channel.
- Default value: 100
- If you intend to send a lot of message in a channel,
Please increase this parameters value to suit you,
else the channel is going to overflow and
listener will receive None and the channel will be
closed.
- max_id_4_a_channel : (don't modify)
- Maximum value for message 'id' field value on a
communication channel.
Used to prevent negative message ids
to appear when number of messages broadcasted by
this channel is very big.
- Default value: 2**31
"""
self.max_queue_in_a_channel = max_queue_in_a_channel
self.max_id_4_a_channel = max_id_4_a_channel
self.channels = {}
self.count = {}
self.channels_lock = Lock()
self.count_lock = Lock()
def subscribe_(self, channel, is_priority_queue):
"""
Return a synchronised FIFO queue object used by a subscriber
to listen at messages sent by publishers on a given channel.
No problem if channel doesn't exists yet.
Ref.: https://docs.python.org/3/library/queue.html
Parameters:
- channel : the channel to listen to.
- is_priority_queue : True if FIFO queue give message according
their priority else FIFO queue without
priority.
"""
if not channel:
raise ValueError('channel : None value not allowed')
if channel not in self.channels:
self.channels_lock.acquire()
# Need to check again
if channel not in self.channels:
self.channels[channel] = []
self.channels_lock.release()
message_queue = None
if is_priority_queue:
message_queue = ChanelPriorityQueue(self, channel)
else:
message_queue = ChanelQueue(self, channel)
self.channels[channel].append(message_queue)
return message_queue
def unsubscribe(self, channel, message_queue):
"""
Used by a subscriber who doesn't want to receive messages
on a given channel and on a queue (message_queue)
obtained previously by subscribe method.
"""
if not channel:
raise ValueError('channel : None value not allowed')
if not message_queue:
raise ValueError('message_queue : None value not allowed')
if channel in self.channels:
self.channels[channel].remove(message_queue)
def publish_(self, channel, message, is_priority_queue, priority):
"""
Called by publisher.
Send a message in a channel, all subscribers registered on this
communication channel are going to receive the message.
If the channel doesn't exists, it is created.
If Nobody listen to the channel (like often in real life) :
no matter...
If channel overflows, ie the actual message number in channel
is bigger than max_queue_in_a_channel parameter value,
send a warning and ignore message.
Queue can be used later when it is not full.
Parameters :
- channel : a string identifying the channel
- message : payload that will be carried by the message.
- is_priority_queue : True if FIFO queue give message according
their priority else FIFO queue without
priority.
- priority lowest = first send to listeners :
- Integer for importance of this message.
- Default value: 100
- 0 is the higther priority
Message received by subscribers using listen() method is a
python dictionary with 2 keys registered inside, see listen()
method documentation for more.
"""
if priority < 0:
raise ValueError('priority must be > 0')
if not channel:
raise ValueError('channel : None value not allowed')
if not message:
raise ValueError('message : None value not allowed')
if channel not in self.channels:
self.channels_lock.acquire()
# Need to check again
if channel not in self.channels:
self.channels[channel] = []
self.channels_lock.release()
# Update message self.counts
self.count_lock.acquire()
if channel not in self.count:
self.count[channel] = 0
else:
self.count[channel] = ((self.count[channel] + 1) %
self.max_id_4_a_channel)
self.count_lock.release()
# ID of current message
_id = self.count[channel]
# Push message to all subscribers in channel
for channel_queue in self.channels[channel]:
# Check if queue overflowed
if channel_queue.qsize() >= self.max_queue_in_a_channel:
warnings.warn((
f"Queue overflow for channel {channel}, "
f"> {self.max_queue_in_a_channel} "
"(self.max_queue_in_a_channel parameter)"))
else: # No overflow on this channel_queue
# Build and send message for this queue
if is_priority_queue:
# OrderedDict dictionnary for sorting message
# on their id if they have the same priority.
channel_queue.put((priority,
OrderedDict(data=message, id=_id)),
block=False)
else:
channel_queue.put({'data': message, 'id': _id},
block=False)
class ChanelQueue(Queue):
"""
A FIFO queue for a channel.
"""
def __init__(self, parent, channel):
"""
Create a new queue for the channel
Parameters :
- parent : communicator parent
- channel : string for the name of the channel
"""
super().__init__()
self.parent = parent
self.name = channel
def listen(self, block=True, timeout=None):
"""
Called by a subscriber when he wants to get messages from
a channel.
This is an iterator that can be used to get messages sent by a
publisher in the queue.
Iterator can be casted in Python list to get all messages in it
with : msgs = list(messageQueue.listen(block=False))
Messages returned are of type dictionary with 2 keys registered by
by publish() method:
'data' : the message's payload that was put in the queue by
publishers (see publish() method).
'id' : Number of this message on the current channel
Parameters :
- block (default value: True) and timeout (default value: None)
and behaviours if no message is in the queue.
Documentation can be found in
Python official Queue documentation and especially in its get()
method : see https://docs.python.org/3/library/queue.html
- timeout : None : no timeout or positive integer see
Python official Queue documentation and especially in its get()
method : see https://docs.python.org/3/library/queue.html
"""
while True:
try:
data = self.get(block=block, timeout=timeout)
assert isinstance(data, dict) and len(data) == 2,\
"Bad data in chanel queue !"
yield data
except Empty:
return
def unsubscribe(self):
"""
Used by a subscriber who doesn't want to receive messages
on a given this channel and on a this queue
"""
self.parent.unsubscribe(self.name, self)
class ChanelPriorityQueue(PriorityQueue):
"""
A FIFO priority queue for a channel.
"""
def __init__(self, parent, channel):
"""
See : ChanelQueue.__init__() method
"""
super().__init__()
self.parent = parent
self.name = channel
def listen(self, block=True, timeout=None):
"""
See : ChanelQueue.listen() method
"""
while True:
try:
priority_data = self.get(block=block, timeout=timeout)
assert isinstance(priority_data, tuple) and \
len(priority_data) == 2 and \
isinstance(priority_data[1], dict) and \
len(priority_data[1]) == 2, "Bad data in chanel queue !"
yield priority_data[1]
except Empty:
return
def unsubscribe(self):
"""
Used by a subscriber who doesn't want to receive messages
on a given this channel and on a this queue
"""
self.parent.unsubscribe(self.name, self)
class PubSub(PubSubBase):
"""
Implement communication Design Pattern : Publish-subscribe
Ref : https://en.wikipedia.org/wiki/Publish–subscribe_pattern
Publishers write messages on channels and subscribers get them
in an asynchronous way.
For limitations, see PubSubBase.__init__() constructor
parameters and default values.
This class is based on thread-safe FIFO queue standard Python
implementation and was designed thread-safe by Zhen Wang.
"""
def subscribe(self, channel):
"""
Return a synchronised normal FIFO queue object
used by a subscriber to listen at messages sent
by publishers on a given channel.
No problem if channel doesn't exists yet.
See PubSubBase.subscribe() for more details
Parameter:
- channel : the channel to listen to.
"""
return self.subscribe_(channel, False)
def publish(self, channel, message):
"""
See PubSubBase.publish() for more details
"""
self.publish_(channel, message, False, priority=100)
class PubSubPriority(PubSubBase):
"""
Same as PubSub class but deal with messages priorities.
Send registred messages in priority order (lowest first)
For limitations, see PubSub __init__() constructor parameters and
default values.
This class is based on thread-safe FIFO PriorityQueue Python
implementation.
"""
def subscribe(self, channel):
"""
Return a synchronised FIFO priority queue object
used by a subscriber to listen at messages sent
by publishers on a given channel.
No problem if channel doesn't exists yet.
See PubSubBase.subscribe_() for more details
Parameter:
- channel : the channel to listen to.
"""
return self.subscribe_(channel, True)
def publish(self, channel, message, priority=100):
"""
See PubSubBase.publish() for more details
"""
self.publish_(channel, message, True, priority)
class OrderedDict(dict):
"""
A dictionary sub-class that implements < operator
that use the id field to order messages with
the same priority
"""
def __lt__(self, other):
"""
For sorting messages with same priority from oldest to newest
Return True if this element id is lower than other element
given in parameter.
"""
return self['id'] < other['id']
|
Thierry46/pubsub
|
pubsub.py
|
pubsub.py
|
py
| 12,494 |
python
|
en
|
code
| 5 |
github-code
|
6
|
73928144189
|
import django_rq
import logging
from datetime import datetime, timedelta
import time
from django.core.management.base import BaseCommand
from django_rq import job
from reviews.models import Review
#@job
def get_count_reviews():
logger = logging.getLogger('counter')
count = Review.objects.count()
time.sleep(4)
if count:
logger.info(f'Всего отзывов в системе: {count}')
else:
logger.error('Something went wrong!')
class Command(BaseCommand):
help = "Выводит на экран и в лог количество записей в таблице Review"
def handle(self, *args, **options):
scheduler = django_rq.get_scheduler('default')
scheduler.schedule(
scheduled_time=datetime.now(),
func=get_count_reviews,
interval=10,
repeat=4,
)
|
idesu/review_moderation_lite
|
reviews/management/commands/log.py
|
log.py
|
py
| 881 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42542599040
|
while True:
try:
rows, columns = [int(w) for w in input().strip().split(" ")]
matrix = []
for _ in range(rows):
matrix.append([int(w) for w in input().strip().split(" ")])
initial_position = ()
pokemon_position = ()
for idx_row, row in enumerate(matrix):
for idx_column, element in enumerate(row):
if element == 1:
initial_position = (idx_row,idx_column)
if element == 2:
pokemon_position = (idx_row,idx_column)
time_to_get_pokemon = abs(initial_position[0] - pokemon_position[0]) + abs(initial_position[1] - pokemon_position[1])
print(time_to_get_pokemon)
except EOFError:
break
|
Trypion/aula_OOP
|
modulo7/ultimo_analogimon.py
|
ultimo_analogimon.py
|
py
| 679 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31440178142
|
from django.contrib import admin
from django.urls import path,include
from . import views
urlpatterns = [
path('',views.login,name='loginform'),
# path('OTP/',views.otp,name="otp"),
path('verifyotp/',views.verifyotp,name='verifyotp'),
path('multiform/',views.multiform,name='multiform'),
path("payment_index/", views.payment_home, name="payment_home"),
path("payment/", views.order_payment, name="payment"),
path("callback/", views.callback, name="callback"),
]
|
HIRAPARANAYAN/verni-by-s
|
authentication/urls.py
|
urls.py
|
py
| 490 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39312594943
|
import os
import requests
from bs4 import BeautifulSoup, Tag
from collections import Counter
import re
import string
import nltk
from nltk.corpus import stopwords
from nltk.corpus import words
from nltk.tokenize import word_tokenize
from gensim.models import Word2Vec
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tempfile
import gradio as gr
import openai
from googlesearch import search
from pytrends.request import TrendReq
from sklearn.manifold import MDS, TSNE
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import KMeans
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from IPython.display import HTML
import numpy as np
import matplotlib.cm as cm
from urllib.parse import urlparse, urljoin
import os
from celery import Celery
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('words')
# Set your OpenAI API key here
openai.api_key = os.environ['OPENAI_API_KEY']
#@title Define functions
def get_image_html(fig):
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
return '<img src="data:image/png;base64,{}"/>'.format(base64.b64encode(buf.getvalue()).decode('ascii'))
def search_top_competitors(keywords, num_results=10):
competitors = set()
for keyword in keywords:
for url in search(keyword, num_results=num_results):
competitors.add(url)
return list(competitors)
def get_page_content(url):
response = requests.get(url)
return BeautifulSoup(response.text, 'html.parser')
def get_meta_tags(soup):
meta_tags = soup.find_all('meta')
return {tag.get('name'): tag.get('content') for tag in meta_tags if tag.get('name')}
def get_heading_tags(soup):
headings = {}
for tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
headings[tag] = [heading.text for heading in soup.find_all(tag)]
return headings
def analyze_keywords(keywords_counter, top_n=10):
return keywords_counter.most_common(top_n)
def visualize_keywords(keywords_counter, top_n=10):
common_keywords = analyze_keywords(keywords_counter, top_n)
df = pd.DataFrame(common_keywords, columns=['Keyword', 'Count'])
df.set_index('Keyword', inplace=True)
df.plot(kind='bar', figsize=(12, 6))
plt.title('Top Keywords')
plt.xlabel('Keywords')
plt.ylabel('Frequency')
fig = plt.gcf() # Get the current figure
plt.tight_layout()
temp_image_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
plt.savefig(temp_image_file.name, format='png')
plt.close()
return temp_image_file.name
def plot_trends(keywords):
pytrends = TrendReq(hl='en-US', tz=360, retries=3)
pytrends.build_payload(keywords, cat=0, timeframe='today 12-m', geo='', gprop='')
trends_data = pytrends.interest_over_time()
return trends_data
def preprocess_text(text, min_word_length=3):
stop_words = set(stopwords.words('english'))
words = word_tokenize(text.lower())
words = [word for word in words if word.isalnum()]
words = [word for word in words if len(word) >= min_word_length and word not in stop_words]
return words
def visualize_clusters(words, model):
matrix = np.zeros((len(words), model.vector_size))
for i, word in enumerate(words):
matrix[i, :] = model.wv[word]
mds = MDS(n_components=2, dissimilarity='precomputed', random_state=42)
distance_matrix = 1 - cosine_similarity(matrix)
coords = mds.fit_transform(distance_matrix)
x, y = coords[:, 0], coords[:, 1]
for i, word in enumerate(words):
plt.scatter(x[i], y[i], alpha=0.5)
plt.text(x[i], y[i], word, fontsize=10)
plt.title('Word Clusters based on Thematic Relatedness')
plt.show()
def create_cluster_table(words, model, clusters):
matrix = np.zeros((len(words), model.vector_size))
for i, word in enumerate(words):
matrix[i, :] = model.wv[word]
# Create a dictionary to store words per cluster
cluster_dict = {}
for i, word in enumerate(words):
cluster_id = clusters[i]
if cluster_id not in cluster_dict:
cluster_dict[cluster_id] = []
cluster_dict[cluster_id].append(word)
# Create a DataFrame from the dictionary
max_words = max(len(cluster_words) for cluster_words in cluster_dict.values())
num_clusters = len(cluster_dict)
data = {f"Cluster {i}": cluster_dict.get(i, []) + [None] * (max_words - len(cluster_dict.get(i, [])))
for i in range(num_clusters)}
df = pd.DataFrame(data)
return df
def clean_text(text):
# Separate words that are meant to be separated
text = re.sub(r'([a-z])([A-Z])', r'\1 \2', text)
# Tokenize the text
tokens = nltk.word_tokenize(text)
# Remove nonsensical words
try:
english_words = set(words)
except:
english_words = set(words.words())
clean_tokens = [token for token in tokens if token.lower() in english_words or token.istitle()]
# Join tokens back into a string
clean_text = ' '.join(clean_tokens)
return clean_text
def visualize_clusters_og(words, model):
matrix = np.zeros((len(words), model.vector_size))
for i, word in enumerate(words):
matrix[i, :] = model.wv[word]
n_clusters = 5
kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
clusters = kmeans.fit_predict(matrix)
tsne = TSNE(n_components=2, random_state=42)
coords = tsne.fit_transform(matrix)
x, y = coords[:, 0], coords[:, 1]
colors = cm.rainbow(np.linspace(0, 1, n_clusters))
plt.figure(figsize=(8, 8))
for i, word in enumerate(words):
plt.scatter(x[i], y[i], c=[colors[clusters[i]]], alpha=0.7)
plt.text(x[i], y[i], word, fontsize=10)
plt.xticks([])
plt.yticks([])
plt.title('Word Clusters based on Thematic Relatedness')
plt.show()
def visualize_clusters_plot(words, model):
matrix = np.zeros((len(words), model.vector_size))
for i, word in enumerate(words):
matrix[i, :] = model.wv[word]
n_clusters = 4
kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
clusters = kmeans.fit_predict(matrix)
try:
tsne = TSNE(n_components=2, random_state=42)
coords = tsne.fit_transform(matrix)
except ValueError:
max_perplexity = len(words) - 1
tsne = TSNE(n_components=2, random_state=42, perplexity=max_perplexity)
coords = tsne.fit_transform(matrix)
x, y = coords[:, 0], coords[:, 1]
colors = cm.rainbow(np.linspace(0, 1, n_clusters))
fig, axs = plt.subplots(2, 2, figsize=(8, 8), gridspec_kw={'width_ratios': [sum(clusters == 0) + sum(clusters == 1), sum(clusters == 2) + sum(clusters == 3)], 'height_ratios': [sum(clusters == 0) + sum(clusters == 2), sum(clusters == 1) + sum(clusters == 3)]})
fig.subplots_adjust(wspace=0, hspace=0)
for ax in axs.ravel():
ax.axis('off')
for i, word in enumerate(words):
cluster_idx = clusters[i]
ax = axs[cluster_idx // 2, cluster_idx % 2]
ax.scatter(x[i], y[i], c=[colors[cluster_idx]], alpha=0.7)
ax.text(x[i], y[i], word, fontsize=10)
plt.legend(loc="best", fontsize=13)
plt.tight_layout()
temp_image_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
plt.savefig(temp_image_file.name, format='png')
plt.close()
return temp_image_file.name, clusters
def sanitize_url(url):
if not re.match('^(http|https)://', url):
url = 'http://' + url
if not re.match('^(http|https)://www\.', url):
url = re.sub('^(http|https)://', r'\g<0>www.', url)
return url
# Configure the Celery app
app = Celery("tasks", broker=os.environ['REDIS_URL'], backend=os.environ['REDIS_URL'])
# Define the inputs and outputs
competitor_url_input = gr.inputs.Textbox(label="Competitor URL", placeholder="Enter a competitor URL")
full_site_scrape_checkbox = gr.inputs.Checkbox(label="Tick for full site scrape (otherwise landing page only)")
meta_tags_output = gr.outputs.Textbox(label="Meta Tags")
heading_tags_output = gr.outputs.Textbox(label="Heading Tags")
top10keywords_output = gr.outputs.Textbox(label="Top 10 Keywords")
cluster_table_output = gr.outputs.HTML(label="Cluster Table")
cluster_plot_output = gr.outputs.Image(type='filepath', label="Cluster Plot")
keyword_plot_output = gr.outputs.Image(type='filepath', label="Keyword Plot")
seo_analysis_output = gr.outputs.Textbox(label="SEO Analysis")
def append_unique_elements(source, target):
for element in source:
if isinstance(element, Tag) and element not in target:
target.append(element)
def get_internal_links(url: str):
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
internal_links = set()
for link in soup.find_all("a"):
href = link.get("href")
if href:
joined_url = urljoin(url, href)
parsed_url = urlparse(joined_url)
if parsed_url.netloc == urlparse(url).netloc:
internal_links.add(joined_url)
return internal_links
def analyze_single_page(competitor_url: str):
sanitized_url = sanitize_url(competitor_url)
soup = get_page_content(sanitized_url)
# Scrape and analyze meta tags
meta_tags = get_meta_tags(soup)
topmetatags = ""
for name, content in meta_tags.items():
if "description" in name.lower():
topmetatags += (f"{name}: {content}\n")
# Scrape and analyze heading tags
heading_tags = get_heading_tags(soup)
topheadingtags = ""
for tag, headings in heading_tags.items():
filtered_headings = [heading for heading in headings if len(heading) > 2]
if filtered_headings:
topheadingtags += (f"{tag}: {', '.join(filtered_headings)}\n")
# Scrape, analyze, and visualize keywords from page content
page_text = soup.get_text()
page_text_cleaned = clean_text(page_text)
preprocessed_text = preprocess_text(page_text_cleaned)
keywords_counter = Counter(preprocessed_text)
top10keywords = ""
for keyword, count in analyze_keywords(keywords_counter, top_n=10):
top10keywords += (f"{keyword}: {count}\n")
# Semantic clustering and visualization
sentences = [preprocessed_text[i:i+10] for i in range(0, len(preprocessed_text), 10)]
model = Word2Vec(sentences, vector_size=100, window=5, min_count=1, workers=4)
words = [word for word, _ in analyze_keywords(keywords_counter, top_n=50)]
clusters = [model.wv.doesnt_match(words)] * len(words)
cluster_plot,clusters = visualize_clusters_plot(words, model)
cluster_table = create_cluster_table(words, model, clusters)
keyword_plot = visualize_keywords(keywords_counter, top_n=10)
table_string = cluster_table.to_string(index=False)
SEO_prompt = f"""The following information is given about a company's website:
Meta Tags:
{{meta_tags}}
Heading Tags:
{{heading_tags}}
Top 10 Keywords:
{{top10keywords}}
The following table represents clusters of thematically related words identified using NLP and clustering techniques. Each column represents a different cluster, and the words in each column are thematically related.
{table_string}
Please analyze the provided information and perform the following tasks:
1. Predict what the website is all about (the market sector).
2. Based on the market sector of the company, give a name to each cluster based on the theme it represents. The name needs to be the best summary of all the words in the cluster.
3. Perform a SWOT analysis (Strengths, Weaknesses, Opportunities, and Threats) from an SEO perspective for the company as a whole, taking into account the meta tags, heading tags, top 10 keywords, and the clusters.
Please provide your analysis in a clear and concise manner.
4. Lastly, suggest a list of 5 single words and 5 phrases (no longer than 3 words each) that the company should be using to improve their SEO
""".format(meta_tags=meta_tags, heading_tags=heading_tags, top10keywords=top10keywords, table_string=table_string)
def analyse_SEO(SEO_prompt):
response = openai.Completion.create(
model="text-davinci-003",
prompt = SEO_prompt,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
gpt3_response = response.get('choices')[0].text
return gpt3_response,response
seo_analysis = analyse_SEO(SEO_prompt)
return topmetatags, topheadingtags, top10keywords, cluster_table.to_html(), cluster_plot, keyword_plot, seo_analysis[0]
# Wrap the analyze_website function with the Celery app.task decorator
@app.task
def analyze_website_task(competitor_url: str, full_site_scrape: bool = False):
if not full_site_scrape:
topmetatags, topheadingtags, top10keywords, cluster_table, cluster_plot, keyword_plot, seo_analysis = analyze_single_page(competitor_url)
return topmetatags, topheadingtags, top10keywords, cluster_table, cluster_plot, keyword_plot, seo_analysis
sanitized_url = sanitize_url(competitor_url)
internal_links = get_internal_links(sanitized_url)
soup_collection = BeautifulSoup("<html><head></head><body></body></html>", "html.parser")
for link in internal_links:
try:
soup = get_page_content(link)
append_unique_elements(soup.head, soup_collection.head)
append_unique_elements(soup.body, soup_collection.body)
except Exception as e:
print(f"Failed to analyze link: {link}. Error: {e}")
print('got all the links')
# Scrape and analyze meta tags
meta_tags = get_meta_tags(soup_collection)
topmetatags = ""
for name, content in meta_tags.items():
if "description" in name.lower():
topmetatags += (f"{name}: {content}\n")
print('fetched metatags')
# Scrape and analyze heading tags
heading_tags = get_heading_tags(soup_collection)
topheadingtags = ""
for tag, headings in heading_tags.items():
filtered_headings = [heading for heading in headings if len(heading) > 2]
if filtered_headings:
topheadingtags += (f"{tag}: {', '.join(filtered_headings)}\n")
print("fetched heading tags")
# Scrape, analyze, and visualize keywords from page content
page_text = soup_collection.get_text()
page_text_cleaned = clean_text(page_text)
preprocessed_text = preprocess_text(page_text_cleaned)
keywords_counter = Counter(preprocessed_text)
top10keywords = ""
for keyword, count in analyze_keywords(keywords_counter, top_n=10):
top10keywords += (f"{keyword}: {count}\n")
print("fetched keywords")
# Semantic clustering and visualization
sentences = [preprocessed_text[i:i+10] for i in range(0, len(preprocessed_text), 10)]
model = Word2Vec(sentences, vector_size=100, window=5, min_count=1, workers=4)
words = [word for word, _ in analyze_keywords(keywords_counter, top_n=50)]
clusters = [model.wv.doesnt_match(words)] * len(words)
print("calculated clusters")
cluster_plot,clusters = visualize_clusters_plot(words, model)
cluster_table = create_cluster_table(words, model, clusters)
keyword_plot = visualize_keywords(keywords_counter, top_n=10)
print("plotted figures")
table_string = cluster_table.to_string(index=False)
print("created table string")
heading_tags_compressed = {}
for key, values in heading_tags.items():
count = Counter(values)
sorted_values = sorted(count.keys(), key=lambda x: count[x], reverse=True)
filtered_values = [value for value in sorted_values if value.strip() != ""]
heading_tags_compressed[key] = filtered_values[:10]
heading_tags_clean = {}
for key, values in heading_tags.items():
count = Counter(values)
sorted_values_clean = sorted(count.keys(), key=lambda x: count[x], reverse=True)
heading_tags_clean = [value for value in sorted_values_clean if value.strip() != ""]
print("cleaned up heading tags")
SEO_prompt = f"""The following information is given about a company's website:
Meta Tags:
{{meta_tags}}
Heading Tags:
{{heading_tags_compressed}}
Top 10 Keywords:
{{top10keywords}}
The following table represents clusters of thematically related words identified using NLP and clustering techniques. Each column represents a different cluster, and the words in each column are thematically related.
{table_string}
Please analyze the provided information and perform the following tasks:
1. Predict what the website is all about (the market sector).
2. Based on the market sector of the company, give a name to each cluster based on the theme it represents. The name needs to be the best summary of all the words in the cluster.
3. Perform a SWOT analysis (Strengths, Weaknesses, Opportunities, and Threats) from an SEO perspective for the company as a whole, taking into account the meta tags, heading tags, top 10 keywords, and the clusters.
Please provide your analysis in a clear and concise manner.
4. Lastly, suggest a list of 10 words and 10 phrases that the company should be using to improve their SEO
""".format(meta_tags=meta_tags, heading_tags_compressed=heading_tags_compressed, top10keywords=top10keywords, table_string=table_string)
print("defined SEO prompt")
def analyse_SEO(SEO_prompt):
response = openai.Completion.create(
model="text-davinci-003",
prompt = SEO_prompt,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
gpt3_response = response.get('choices')[0].text
return gpt3_response,response
seo_analysis = analyse_SEO(SEO_prompt)
print("ran seo analysis")
print(topmetatags, heading_tags_clean,top10keywords,cluster_table.to_html(), cluster_plot, keyword_plot,seo_analysis[0])
return topmetatags, heading_tags_clean, top10keywords, cluster_table.to_html(), cluster_plot, keyword_plot, seo_analysis[0]
gr.Interface(
fn=analyze_website_task,
inputs=[competitor_url_input, full_site_scrape_checkbox],
outputs=[
meta_tags_output,
heading_tags_output,
top10keywords_output,
cluster_table_output,
cluster_plot_output,
keyword_plot_output,
seo_analysis_output,
],
title="SEO Analysis Tool",
description="Enter a competitor URL to perform a SEO analysis (some javascript pages will deny full scrape).",
layout="vertical"
).launch(share=True,debug=True)
|
PhiloSolares/seo_analysis
|
seo_analysis_tool.py
|
seo_analysis_tool.py
|
py
| 18,812 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73931877949
|
#!python
"""
The number 145 is well known for the property that the sum of the factorial of its digits is equal to 145:
1! + 4! + 5! = 1 + 24 + 120 = 145
Perhaps less well known is 169, in that it produces the longest chain of numbers that link back to 169; it turns out that there are only three such loops that exist:
169 → 363601 → 1454 → 169
871 → 45361 → 871
872 → 45362 → 872
It is not difficult to prove that EVERY starting number will eventually get stuck in a loop. For example,
69 → 363600 → 1454 → 169 → 363601 (→ 1454)
78 → 45360 → 871 → 45361 (→ 871)
540 → 145 (→ 145)
Starting with 69 produces a chain of five non-repeating terms, but the longest non-repeating chain with a starting number below one million is sixty terms.
How many chains, with a starting number below one million, contain exactly sixty non-repeating terms?
"""
from math import factorial
def next_term(n):
return sum(map(factorial, map(int, list(str(n)))))
def non_repeating_term_count(n):
terms = set([])
while n not in terms:
terms.add(n)
n = next_term(n)
return len(terms)
targets = []
for i in range(1000000):
if non_repeating_term_count(i)==60:
print(i)
targets.append(i)
print(len(targets))
|
DanMayhem/project_euler
|
074.py
|
074.py
|
py
| 1,242 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15018836415
|
import tkinter as tk
import customtkinter as ctk
from customtkinter import ThemeManager
from View.GUI.CustomWidgets.NotebookCloseableTabs import NotebookCloseableTabs
from View.GUI.Windows.ResultWindow.ComputationStatusBoard import ComputationStatusBoard
from View.Observer import Observer
class ComputationNotebook(NotebookCloseableTabs, Observer):
def __init__(self, parent, computation, controller):
fg_color = ThemeManager.theme["color_scale"]["inner"]
super().__init__(parent, color_notebook_bg=fg_color)
self.add_widget(ComputationStatusBoard(self, computation, controller), "Status Board")
self.results = []
self.handler_id = None
self.subscribed_results = []
Observer.__init__(self, computation)
for result in computation.results:
if result.is_finished:
self.results.append(result)
else:
result.subscribe(self)
self.subscribed_results.append(result)
self.handle_tasks()
def update_(self, updated_component):
if updated_component[1] == "finished_result":
result = updated_component[0]
self.results.append(result)
def start_task_handler(self):
"""
Starts the task handler.
"""
self.handler_id = self.after(2000, self.handle_tasks)
def handle_tasks(self):
"""
Handles received results.
"""
for result in list(self.results):
self.add_result(result)
self.results.remove(result)
self.start_task_handler()
def destroy(self):
self.after_cancel(self.handler_id)
self.observed_subject.unsubscribe(self)
self.observed_subject = None
for result in self.subscribed_results:
result.unsubscribe(self)
self.subscribed_results.clear()
super().destroy()
def add_result(self, result):
"""
Adds results to the notebook.
:param result: result object
"""
fg_color = ThemeManager.theme["color_scale"]["inner"]
textbox = ctk.CTkTextbox(self, fg_color=fg_color, bg_color=fg_color, wrap="word")
textbox.insert(tk.END, result.result_text)
self.add_widget(textbox, result.configuration_name)
|
Moni5656/npba
|
View/GUI/Windows/ResultWindow/ComputationNotebook.py
|
ComputationNotebook.py
|
py
| 2,306 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39287970735
|
def bub(l1: list):
n = len(l1)
for i in range(n):
flag = True
for j in range(n-1-i):
if l1[j] > l1[j+1]:
l1[j], l1[j+1] = l1[j+1], l1[j]
flag = False
if flag:
break
return l1
if __name__ == "__main__":
l = ['12', '34', '5']
t = tuple(map(list, list(set(l))))
print(t)
# print(bub([4, 3, 8, 2, 6]))
a = ["hacker", "earth", "1", "2", "python", "language", "10"]
b = a[0::2]
c = a[1::2]
d = zip(b,c)
#print(dict(d))
|
biswajeetpadhi/Data_structure_Algorithm
|
practice.py
|
practice.py
|
py
| 549 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20571972816
|
from ply.lex import LexToken
from windyquery.validator import ValidationError
from ._base import Base
TOKEN = 'SCHEMA'
class SchemaToken(LexToken):
def __init__(self, value):
self.type = TOKEN
self.value = value
self.lineno = 0
self.lexpos = 0
class Schema(Base):
def schema(self, s: str):
try:
s = self.validator.validate_schema(s)
except ValidationError as err:
raise UserWarning(f'invalid schema: {err}') from None
self.append(SchemaToken(s))
|
bluerelay/windyquery
|
windyquery/collector/schema.py
|
schema.py
|
py
| 540 |
python
|
en
|
code
| 68 |
github-code
|
6
|
27082652563
|
import time
from datetime import timedelta
import requests
from celery import Celery
from celery.utils.log import get_task_logger
app = Celery('parsing')
app.conf.update(broker_url='amqp://guest:guest@rabbitmq:5672', broker_connection_retry_on_startup=True)
app.conf.beat_schedule = {
'add-every-monday-morning': {
'task': 'parsing',
'schedule': timedelta(seconds=15)
},
}
# crontab(seconds='*/15')
celery_logger = get_task_logger(__name__)
@app.task(name='parsing')
def parse_excel_task():
time.sleep(5)
print('zalupa')
celery_logger.info('parsing')
requests.post('http://api:8000/api/v1/parser/parse-excel')
|
puplishe/testproject
|
fastapi1/celery/celery.py
|
celery.py
|
py
| 654 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28797169675
|
"""
Programma per la determinazione delle tautologie
"""
L = ["&","%",">","-","!"] #And, Or, Implica, Se e solo se (bicondizionale), Negazione
F = []
semF = {"&" : [ [False,True],[True,False], [False,False] ],
"%" : [ [False,False] ],
">" : [ [True,False] ],
"-" : [ [False,True],[True,False] ],
"!" : [ [True] ]
} #Dominio delle coppie di valori che danno valori falsi alle cinque funzioni logiche
semV = {"&" : [ [True,True] ],
"%" : [ [True,False], [False, True], [True,True] ],
">" : [ [True,True], [False, True], [False, False] ],
"-" : [ [True,True], [False,False] ],
"!" : [ [False] ]
} #Dominio delle coppie di valori che danno valori falsi alle cinque funzioni logiche
lettereEnunciativeF = []
global checkAssegnamenti
checkAssegnamenti = False
def isLenun(p): #Determina se p è una lettera enunciativa
for i in L: #Se non vi sono connettori la formula è una lettera enunciativa
if i in p:
return False
return True
def creaAssegnamento(lettere):
#Crea un assegnamento con tutte le lettere enunciative, di default settate su True
#Questa funzione serve solitamente per creare un assegnamento qualsiasi per testare se una formula è sempre vera o sempre falsa.
ass = {}
for i in lettere:
ass[i]=True
return ass
def leggiAssegnamento(ass):
#Legge un assegnamento che è una stringa del tipo a:0 - b:1 - ecc // 0 è False e 1 è true
ass = ass.replace(" ", "") #fa il trim
assegnamento = dict()
ass = ass.split("-") #trasforma ass in una lista
for i in ass:
i = i.split(":")
assegnamento[i[0]] = bool(int(i[1])) #In Pyton il bool di una stringa è riferito alla sua lunghezza e quindi è sempre True a meno che non sia nulla
return assegnamento
def lettereEnunciative(p):
#Crea un assegnamento contenente tutte le lettere enunciative di p
p = p.replace(" ", "") #Fa un trim sulla stringa
lenuns = []
start= 0
control = False
for i in range(len(p)):
print("len p=", len(p))
print("ciclo ",i, "control = ", control)
if p[i] in (["(",")"] + L):
print("Trovato simbolo")
if control:
print("lettera enunciativa")
lenuns.append(p[start:i])
start = i+1
control = False
continue
if not control: start = i
control = True
if control: #Se termina senza parentesi lascia fuori l'ultima lettera enunciativa
lenuns.append(p[start:len(p)])
return lenuns
def checkParentesi(p):
#Controlla se non ci siano delle parentesi esterne superflue
barra = 0
parSup = True
if not(p.startswith("(") and p.endswith(")") ): return p #se non inizia e finisce con delle parentesi certamente non è chiuso tra partentesi superflue
test = p[1:len(p)-1] #elimina la prima e l'ultima partentesi e controlla se la formula ha ancora senso
for i in test:
if not i in ["(",")"]: continue
if i == "(":
barra+=1
elif i == ")":
barra-=1
if barra<0:
parSup = False
break
## if parSup: #Se le parentesi esterne erano superflue, restituisce la versione senza di esse, altrimenti restituisce p
## print("Parentesi superflue. Versione migliorata: ", test)
## else:
## print("Parentesi non superflue. Restituisco ", p)
if parSup: #Se le parentesi esterne erano superflue, restituisce la versione senza di esse, altrimenti restituisce p
return test
else:
return p
def computa(p): #p è la formula, f l'insieme delle formule
counter=0
start = 0
index=0
par =False
f = []
p = p.replace(" ", "") #Fa un trim sulla stringa
if isLenun(p): #Se la formula è una lettera enunciativa allora ritorna False
f.append(p)
if checkAssegnamenti and not p in lettereEnunciativeF :
lettereEnunciativeF.append(p) #Inserisce la lettera nella lista relativa per il check finale
return False
p = checkParentesi(p)
if p[0] == "!": #Se il primo simbolo è la negazione allora si risolve tutto immediatamente
f.append(p[1:])
f.append("!")
return f
for i in p:
index+=1
if i=="(":
if not par: start= index #Se è la prima parentesi aperta allora posiziona lo start in quel punto.
par = True #Indica che ha trovato parentesi aperte
counter+=1
elif i==")":
counter-=1
if counter == 0 and par:
par = False
counter = 0
f.append(p[start:index-1]) #Aggiunge la funzione alla lista delle funzioni
#Se ha appena inserito una formula, significa che subito dopo ci deve essere un operatore, altrimenti la formula non è ben formata
if len(f)==1:
if p[index] in L:
f.append(p[index]) #Aggiunge l'operatore
start = index
else:
print("ERRORE: formula non ben formata")
return None
if len(f)<3:
f.append(p[start+1:index])
return f
def computaricorsiva(f):
#f deve essere un vettore di due o tre elementi del tipo (a, operatore binario, b) o (a, negazione)
x = computa(f[0]) #Primo ramo
if x != False:
f[0] = x
f[0] = computaricorsiva(f[0])
if len(f)>2: #Se non si tratta di una negazione valuta anche la seconda formula
x = computa(f[2])
if x != False:
f[2] = x
f[2] = computaricorsiva(f[2])
return f
def isTautologia(f, val): #f è la funzione da valutare, val il valore che deve avere, dominio è l'insieme dei valori necessari delle lettere enunciative
#La funzione ritorna True se è una tautologia, o il dominio se non lo è
if isLenun(f): #Se è una lettera enunciativa l'unico assegnamento è il suo valore
return [ {f : val} ]
assegnamenti = []
condizione = []
if val:
condizione = semV[f[1]]
else:
condizione = semF[f[1]]
print("\nLe condizioni per ", (f,val), " sono: ", condizione)
for cond in condizione: #Cond è una lista di coppie ordinate di True, False e None. None sta ad indicare che non importa il valore di quell'assegnamento
print("\nEsamino la condizione: ", cond, " per ", f)
x = isTautologia(f[0], cond[0])
if x == []: #Se x è un insieme vuoto allora f[0] è una tautologia o una formula sempre falsa. Questo implica che non è nemmeno necessario valutare l'altra formula. f non può sostenere questa configurazione
continue
if len(f) <= 2 or cond[1] == None: #Se la funzione è solo una negazione, allora non possiede un altro ramo da valutare. Anche se non è necessario valoutare il ramo (cond = None) si passa avanti
assegnamenti += x #Inserisce direttamente l'assegnamento tra quelli coerenti
continue
y = isTautologia(f[2], cond[1])
if y == []: #Se y è un insieme vuoto allora f[0] è una tautologia o una formula sempre falsa.
continue
print("\nConfronto assegnamenti per", f, "\nGli assegnamenti sono:",x,y)
ass = confrontaAssegnamenti([x,y])
assegnamenti += ass
print("\nAssegnamenti coerenti: ", ass)
print("\nAssegnamenti: ", assegnamenti)
#Infine si passa a valutare la compatibilità degli assegnamenti
return assegnamenti
def confrontaAssegnamenti(d):
#Da una lista di due classi di assegnamenti, d, opera un confronto e restituisce soltanto gli assegnameti compatibili tra loro
# d è composto da assegnamenti parziali, che non ricopriono tutti i valori delle lettere enunciative
ass = []
if d[0] == []: #E' possibile che uno dei due tronconi sia completamente vuoto. In questo caso restituisce direttamente gli assegnamenti
return d[1]
elif d[1] == []:
return d[0]
for a1 in d[0]:
for a2 in d[1]:
assegnamento = dict()
incompatibile = False
for i in a1:
#print(i, a1[i], a2[i])
if (not(i in a2)) or (a1[i] == a2[i]): #Se in a2 non c'è la lettera enunciativa, o se ha lo stesso valore
assegnamento[i] = a1[i]
else: #Se vi è una contraddizione tra i valori necessari allora i due assegnamenti a1 e a2 sono incompatibili
incompatibile = True
break #Passa al prossimo dizionario da confrontare
if not incompatibile: #Esamina anche gli elementi di a2, che potrebbero non essere nell'assegnamento
for i in a2:
if (not(i in a1)): #Se in a1 non c'è la lettera enunciativa
assegnamento[i] = a2[i]
if not incompatibile:
ass.append(assegnamento)
return ass
def Valuta(f, ass): #valuta una formula usando l'assegnamento dato
if isLenun(f):
return ass[f]
#Primo ramo
A = Valuta(f[0], ass)
if len(f) <=2: #Se è una negazione, il suo valore sarà necessariamente il contrario valore di A
return not A
#Secondo ramo
B = Valuta(f[0],ass)
#Valuta il valore finale. f[1] è il connettivo
if [A,B] in semV[f[1]]:
return True
else:
return False
# FUNZIONI DI TEST
def testcomputa():
frase = input("Frase: ")
formule = computaricorsiva(computa(frase))
#formule = computa(frase, [])
print(formule)
def testvalore():
global checkAssegnamenti
checkAssegnamenti = True
frase = input("Frase: ")
formule = computaricorsiva(computa(frase))
print ("L'albero è: ", formule)
print("Le lettere enunciative sono", lettereEnunciativeF)
x = isTautologia(formule, False)
if len(x)>0:
print("I contromodelli sono: ", x, "\nNon è una tautologia")
else:
if Valuta(formule, creaAssegnamento(lettereEnunciativeF)):
print("\nNon ci sono contromodelli: ", x, "\nE' una tautologia")
else:
print("\nNon ci sono modelli che la rendano vera: ", x, "\nE' sempre falsa")
def testparentesi():
frase = input("Frase: ")
checkParentesi(frase)
def testvaluta():
global checkAssegnamenti
checkAssegnamenti = True
frase = input("Frase: ")
ass = input("Scrivere l'assegnamento rispettando la seguente forma: a:1 - b:0 - ecc\nRicorda che 0 è False e 1 è True\n Assegnamento: ")
ass = leggiAssegnamento(ass)
print("L'assegnamento è ", ass)
formule = computaricorsiva(computa(frase))
x = Valuta(formule, ass)
print("\nIl valore finale è ", x)
def main():
while(True):
global checkAssegnamenti
checkAssegnamenti = False
testvalore()
if input("Continua? y/n") !="y": break
main()
|
dariochiaiese/tautologies
|
tautologie 4.0.py
|
tautologie 4.0.py
|
py
| 11,567 |
python
|
it
|
code
| 1 |
github-code
|
6
|
18187153319
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
t = np.linspace(0, 2*np.pi, 20)
p = np.linspace(0, np.pi, 10)
theta,phi = np.meshgrid(t,p)
x = np.cos(theta)*np.sin(phi)
y = np.sin(theta)*np.sin(phi)
z = np.cos(phi)
fig = plt.figure(figsize=(10,4))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122, projection='3d')
ax1.plot(theta.flatten(), phi.flatten(), 'o')
ax1.set_xlabel("$\\theta$")
ax1.set_ylabel("$\\phi$")
ax2.plot_surface(x,y,z, edgecolors='0.2')
plt.show()
|
notmatthancock/notmatthancock.github.io
|
code/py/sphere-sampling/sphere-uniform-theta-phi.py
|
sphere-uniform-theta-phi.py
|
py
| 524 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39184026326
|
# ----------------------------------------------------------------------
# |
# | Setup_custom.py
# |
# | David Brownell <[email protected]>
# | 2022-10-14 12:37:50
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2022
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
# pylint: disable=missing-module-docstring
import copy
import os
import uuid
import sys
import textwrap
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
from semantic_version import Version as SemVer # pylint: disable=unused-import
from Common_Foundation.ContextlibEx import ExitStack # type: ignore # pylint: disable=import-error,unused-import
from Common_Foundation import PathEx # type: ignore # pylint: disable=import-error,unused-import
from Common_Foundation.Shell.All import CurrentShell # type: ignore # pylint: disable=import-error,unused-import
from Common_Foundation.Shell import Commands # type: ignore # pylint: disable=import-error,unused-import
from Common_Foundation.Streams.DoneManager import DoneManager # type: ignore # pylint: disable=import-error,unused-import
from Common_Foundation import SubprocessEx # type: ignore # pylint: disable=import-error,unused-import
from Common_Foundation import TextwrapEx # type: ignore # pylint: disable=import-error,unused-import
from Common_Foundation import Types # type: ignore # pylint: disable=import-error,unused-import
from RepositoryBootstrap import Configuration # type: ignore # pylint: disable=import-error,unused-import
from RepositoryBootstrap import Constants # type: ignore # pylint: disable=import-error,unused-import
from RepositoryBootstrap.SetupAndActivate.Installers.DownloadNSISInstaller import DownloadNSISInstaller # type: ignore # pylint: disable=import-error,unused-import
from RepositoryBootstrap.SetupAndActivate.Installers.DownloadSevenZipInstaller import DownloadSevenZipInstaller # type: ignore # pylint: disable=import-error,unused-import
from RepositoryBootstrap.SetupAndActivate.Installers.DownloadZipInstaller import DownloadZipInstaller # type: ignore # pylint: disable=import-error,unused-import
from RepositoryBootstrap.SetupAndActivate.Installers.Installer import Installer # type: ignore # pylint: disable=import-error,unused-import
from RepositoryBootstrap.SetupAndActivate.Installers.LocalSevenZipInstaller import LocalSevenZipInstaller # type: ignore # pylint: disable=import-error,unused-import
# ----------------------------------------------------------------------
from _install_data import GRCOV_VERSIONS, LLVM_VERSIONS
del sys.modules["_install_data"]
# ----------------------------------------------------------------------
def GetConfigurations() -> Union[
Configuration.Configuration,
Dict[
str, # configuration name
Configuration.Configuration,
],
]:
configurations: Dict[str, Configuration.Configuration] = {}
if CurrentShell.family_name == "Windows":
target_architectures = ["x64", ] # TODO: "x86"
else:
target_architectures = [CurrentShell.current_architecture, ]
common_foundation_dependency = Configuration.Dependency(
uuid.UUID("DD6FCD30-B043-4058-B0D5-A6C8BC0374F4"),
"Common_Foundation",
"python310",
"https://github.com/davidbrownell/v4-Common_Foundation.git",
)
for llvm_version in LLVM_VERSIONS.keys():
version_specs = Configuration.VersionSpecs(
[Configuration.VersionInfo("LLVM", SemVer(llvm_version)), ],
{},
)
if CurrentShell.family_name == "Windows":
for target_architecture in target_architectures:
configurations["{}-mingw-{}".format(llvm_version, target_architecture)] = Configuration.Configuration(
"""Uses LLVM 'v{}' (using mingw (aka "Msys2 MinGW Clang" at https://blog.conan.io/2022/10/13/Different-flavors-Clang-compiler-Windows.html)) targeting '{}'.""".format(llvm_version, target_architecture),
[common_foundation_dependency, ],
version_specs,
)
for msvc_version in [
"17.4",
]:
for target_architecture in target_architectures:
configurations["{}-msvc-{}-{}".format(llvm_version, msvc_version, target_architecture)] = Configuration.Configuration(
"""Uses LLVM 'v{}' (using Microsoft Visual Studio 'v{}' (aka "LLVM/Clang" at https://blog.conan.io/2022/10/13/Different-flavors-Clang-compiler-Windows.html)) targeting '{}'.""".format(
llvm_version,
msvc_version,
target_architecture,
),
[
Configuration.Dependency(
uuid.UUID("6e6cbb2c-6512-470f-ba88-a6e4ad85fed0"),
"Common_cpp_MSVC",
"{}-{}".format(msvc_version, target_architecture),
"https://github.com/davidbrownell/v4-Common_cpp_MSVC.git",
),
],
version_specs,
)
else:
for target_architecture in target_architectures:
configurations["{}-{}".format(llvm_version, target_architecture)] = Configuration.Configuration(
"Uses LLVM 'v{}' (without any external dependencies) targeting '{}'.".format(
llvm_version,
target_architecture,
),
[common_foundation_dependency, ],
version_specs,
)
return configurations
# ----------------------------------------------------------------------
def GetCustomActions(
# Note that it is safe to remove any parameters that are not used
dm: DoneManager,
explicit_configurations: Optional[List[str]],
force: bool,
interactive: Optional[bool],
) -> List[Commands.Command]:
commands: List[Commands.Command] = []
root_dir = Path(__file__).parent
assert root_dir.is_dir(), root_dir
# Create a link to the foundation's .pylintrc file
foundation_root_file = Path(Types.EnsureValid(os.getenv(Constants.DE_FOUNDATION_ROOT_NAME))) / ".pylintrc"
assert foundation_root_file.is_file(), foundation_root_file
commands.append(
Commands.SymbolicLink(
root_dir / foundation_root_file.name,
foundation_root_file,
remove_existing=True,
relative_path=True,
),
)
with dm.Nested("\nProcessing 'Common_LLVM' tools...") as extract_dm:
with extract_dm.Nested("Processing 'grcov'...") as grcov_dm:
for index, (grcov_version, install_data) in enumerate(GRCOV_VERSIONS.items()):
with grcov_dm.Nested("'{}' ({} of {})...".format(grcov_version, index + 1, len(GRCOV_VERSIONS))) as version_dm:
install_data.installer.Install(
version_dm,
force=force,
prompt_for_interactive=install_data.prompt_for_interactive,
interactive=interactive,
)
with extract_dm.Nested("Processing 'LLVM'...") as llvm_dm:
for index, (version, install_data_items) in enumerate(LLVM_VERSIONS.items()):
with llvm_dm.Nested(
"'{}' ({} of {})...".format(
version,
index + 1,
len(LLVM_VERSIONS),
),
) as version_dm:
if explicit_configurations and not any(explicit_configuration.startswith(version) for explicit_configuration in explicit_configurations):
version_dm.WriteVerbose("The version was skipped.\n")
continue
for install_data_item in install_data_items:
with version_dm.Nested("'{}'...".format(install_data_item.name)) as this_dm:
install_data_item.installer.Install(
this_dm,
force=force,
prompt_for_interactive=install_data_item.prompt_for_interactive,
interactive=interactive,
)
if CurrentShell.family_name != "Windows":
# Create a simple test program to ensure that LLVM was installed correctly
with version_dm.Nested("Validating installation...") as validate_dm:
temp_directory = CurrentShell.CreateTempDirectory()
was_successful = False
# ----------------------------------------------------------------------
def OnExit():
if was_successful:
PathEx.RemoveTree(temp_directory)
return
validate_dm.WriteInfo("The temporary directory '{}' has not been deleted.".format(temp_directory))
# ----------------------------------------------------------------------
with ExitStack(OnExit):
source_filename = temp_directory / "test.cpp"
with validate_dm.Nested("Creating source file..."):
with source_filename.open("w") as f:
f.write(
textwrap.dedent(
"""\
#include <iostream>
int main() {
std::cout << "Hello world!\\n";
return 0;
}
""",
),
)
with validate_dm.Nested("Compiling...") as compile_dm:
command_line = 'clang++ "{}"'.format(source_filename.name)
compile_dm.WriteVerbose("Command Line: {}\n\n".format(command_line))
modified_env = copy.deepcopy(os.environ)
modified_env["PATH"] = "{}:{}".format(
modified_env["PATH"],
install_data_item.installer.output_dir / "bin",
)
modified_env["LD_LIBRARY_PATH"] = "{}".format(
install_data_item.installer.output_dir / "lib" / "x86_64-unknown-linux-gnu",
)
result = SubprocessEx.Run(
command_line,
cwd=temp_directory,
env=modified_env, # type: ignore
)
compile_dm.result = result.returncode
if compile_dm.result != 0:
compile_dm.WriteError(
textwrap.dedent(
"""\
Errors here generally indicate that glibc has not been installed (especially if the error is associated with 'features.h').
Visit https://www.gnu.org/software/libc/ for more information.
Please install glibc using your distro's favorite package manager.
Examples:
Ubuntu: `apt-get install -y libc6-dev`
COMPILER ERROR
--------------
{}
""",
).format(
TextwrapEx.Indent(result.output.strip(), 4),
),
)
return []
with compile_dm.YieldVerboseStream() as stream:
stream.write(result.output)
with validate_dm.Nested("Testing...") as testing_dm:
command_line = "./a.out"
testing_dm.WriteVerbose("Command Line: {}\n\n".format(command_line))
result = SubprocessEx.Run(
command_line,
cwd=temp_directory,
)
testing_dm.result = result.returncode
if testing_dm.result == 0:
testing_dm.result = 0 if result.output == "Hello world!\n" else -1
if testing_dm.result != 0:
compile_dm.WriteError(result.output)
return []
with testing_dm.YieldVerboseStream() as stream:
stream.write(result.output)
was_successful = True
return commands
|
davidbrownell/v4-Common_LLVM
|
Setup_custom.py
|
Setup_custom.py
|
py
| 15,433 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39672847864
|
import fileinput
from typing import List
from number import Number
def solve(input_file):
n = Number.parse(input_file.readline().strip())
for line in input_file:
n += Number.parse(line.strip())
n.reduce()
return n.magnitude
if __name__ == "__main__":
print(solve(fileinput.FileInput()))
|
cmatsuoka/aoc
|
2021 - submarine/18 - snailfish numbers/solution1.py
|
solution1.py
|
py
| 324 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71740452349
|
#/usr/bin/env python
import yaml
import os, sys, re
import subprocess
import argparse
## Arguments
parser = argparse.ArgumentParser(description='Create a shared cuda.yml for docker-compose')
parser.add_argument('--gpu', '-g',
action='append',
dest='gpus',
default=[])
parser.add_argument('--verbose',
action='store_true',
help='Verbose logging')
parser.add_argument('--out', '-o',
dest='save_directory',
default='shared/',
help='Directory to write the shared docker-compose')
args = parser.parse_args()
cuda_version = '7.0'
nv_device = '/dev/nvidia'
uvm_device = '{0}-uvm'.format(nv_device)
ctl_device = '{0}ctl'.format(nv_device)
cuda_version_label = 'com.nvidia.cuda.version'
nv_bins_volume = '/usr/local/bin'
nv_bins = ['nvidia-cuda-mps-control',
'nvidia-cuda-mps-server',
'nvidia-debugdump',
'nvidia-persistenced',
'nvidia-smi'
]
nv_libs_volume = '/usr/local/nvidia'
nv_libs_cuda = ['cuda', 'nvcuvid', 'nvidia-compiler', 'nvidia-encode', 'nvidia-ml']
def log(msg, **kwargs):
print('DEBUG: {0}'.format(msg));
if kwargs.get('body'):
print(kwargs.get('body', ''))
def no_error(cmds):
try:
for cmd in cmds.split():
subprocess.Popen([cmd], stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return False
def grep(cmd, grp):
grep = subprocess.Popen(['grep', grp], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
orig = subprocess.Popen(cmd, stdout=grep.stdin)
output, errs = grep.communicate()
orig.wait()
if output:
return output.decode('ascii')
def query_nvsmi(section, gpu_id=False):
cmd = ['nvidia-smi','-q']
if gpu_id:
cmd.extend(['-i', gpu_id])
res = grep(cmd, section)
return res.split()[-1]
def library_path(lib):
pat = grep(['ldconfig', '-p'], 'lib{0}.so'.format(lib))
if pat:
return pat.split('=>')[-1].strip(' \t\n\r')
else:
print('Could not find library: {0}'.format(lib))
def library_arch(lib):
proc = subprocess.Popen(['file', '-L', lib], stdout=subprocess.PIPE)
out, errs = proc.communicate()
if errs:
print('There was an error with `which {0}`: {1}'.format(b, errs))
elif out:
return re.sub('-bit', '', out.decode('ascii').split()[2])
def which(b):
proc = subprocess.Popen(['which', b], stdout=subprocess.PIPE)
out, errs = proc.communicate()
if errs:
print('There was an error with `which {0}`: {1}'.format(b, errs))
elif out:
return out.decode('ascii').strip(' \n\t\r')
def format_mount(a, b=None):
if not b:
b = a
return '{0}:{1}'.format(a, b)
driver_version = query_nvsmi('Driver Version')
no_error('nvidia-smi')
#no_error('nvidia-smi nvidia-modprobe')
d = {
'devices': [],
'volumes': []
}
## Add devices
devices = [ctl_device, uvm_device]
d['devices'] = [format_mount(dev) for dev in devices]
if args.gpus:
for gpu in args.gpus:
gpu_minor_version = query_nvsmi('Minor Number', gpu)
if gpu_minor_version:
d['devices'].append(format_mount('{0}{1}'.format(nv_device, gpu_minor_version)))
else:
print('Could not find minor version for gpu: {0}'.format(gpu))
library_paths = [library_path(lib) for lib in nv_libs_cuda]
for lib in library_paths:
if lib:
basename = os.path.basename(lib)
arch = library_arch(lib)
if arch:
mount = None
if arch == '32':
mount = format_mount(lib, '{0}/lib/{1}'.format(nv_libs_volume, basename))
if arch == '64':
mount = format_mount(lib, '{0}/lib64/{1}'.format(nv_libs_volume, basename))
if mount:
d['volumes'].append(mount)
for binary in nv_bins:
b = which(binary)
if b:
d['volumes'].append(format_mount(b, '{0}/{1}'.format(nv_bins_volume, binary)))
cuda_dir = '/usr/local/cuda-{0}/lib64'.format(cuda_version)
files = [x for x in os.listdir(cuda_dir) if os.path.isfile(cuda_dir+os.sep+x)]
for lib in files:
local_file = os.path.join(cuda_dir, lib)
remote_volume = '{0}/{1}'.format(nv_libs_volume, lib)
d['volumes'].append(format_mount(local_file, remote_volume))
d['environment'] = {}
d['environment'].update({'LD_LIBRARY_PATH': '$LD_LIBRARY_PATH:{0}:{1}'.format(cuda_dir, nv_libs_volume)})
out = yaml.safe_dump({'cuda_base': d},
indent=4,
allow_unicode=True,
default_flow_style=False)
log('Writing cuda file', body=out)
with open('{0}/cuda.yml'.format(args.save_directory), 'w') as outfile:
outfile.write(out)
|
auser/docker-tensorflow-ipython-celery-rest-server
|
shared/create_cuda.py
|
create_cuda.py
|
py
| 4,297 |
python
|
en
|
code
| 7 |
github-code
|
6
|
16539466565
|
# -*- coding: utf-8 -*-
"""Главный и единственный модуль в игре.
Игра специально написана в минималистичном стиле, мне
хотелось воплотить текстовый движок настолько лаконично,
насколько это вообще возможно.
Подкладывая этому скрипту различные json с метаданными, можно
запускать произвольные текстовые истории.
"""
import json
import os
import textwrap
from collections import defaultdict
from typing import Dict, Any, NewType
Context = NewType('Context', Dict[str, Any])
Location = NewType('Location', Dict[str, Any])
Locations = Dict[str, Location]
BASE_PATH = os.path.join('game', 'locations')
def clear_screen_windows() -> None:
"""Очистить содержимое экрана на Windows.
"""
os.system('cls')
def clear_screen_nix() -> None:
"""Очистить содержимое экрана на *nix.
"""
os.system('clear')
def get_locations() -> Locations:
"""Загрузить локации с жёсткого диска и вернуть в виде словаря.
Пример данных на выходе:
{
'start':
{
'title': 'Стартовая локация',
'options': ...,
},
...
}
"""
locations = {}
for path, dirs, filenames in os.walk(BASE_PATH):
for filename in filenames:
full_path = os.path.join(path, filename)
with open(full_path, mode='r', encoding='utf-8') as file:
contents = json.load(file)
for name, content in contents.items():
locations[name] = Location(content)
return locations
def get_context() -> Context:
"""Вернуть набор переменных игры.
Здесь можно заполнить параметры по умолчанию.
"""
return Context({
'times_visited': defaultdict(int),
})
def get_header(position: str, location: Location, context: Context) -> str:
"""Извлечь заголовок локации.
Текст зависит от того, бывали ли мы тут раньше.
"""
if all([context['times_visited'][position] == 0,
'initial_header' in location]):
return location['initial_header']
return location['header']
def is_visible(option: dict, context: Context) -> bool:
"""Вернуть True если мы можем видеть эту позицию.
"""
condition = option.get('condition')
if condition is None:
return True
return bool(eval(condition, {}, context))
def get_input_from_user(variants: dict) -> str:
"""Получить от пользователя вариант ответа, который он хочет выбрать.
"""
while True:
variant = input('>')
if variant.strip().lower() in variants:
return variant
print('Выберите один из предоставленных вариантов')
def apply_side_effect(chosen_option: dict, context: Context) -> None:
"""Применить побочныё эффект выбора.
Мутируем переменные контекста, чтобы управлять логикой.
Сами решения принимаются на этапе разработки JSON и в данном
скрипте никак не представлены.
"""
side_effect = chosen_option.get('side_effect')
if side_effect:
exec(side_effect, {}, context)
def enter_location(position: str, locations: Locations,
context: Context) -> Location:
"""Применить операции входа в новую локацию и вернуть её экземпляр.
"""
clear_screen()
location: Location = locations[position]
output_location(position, location, context)
context['times_visited'][position] += 1
return location
def ask_user(location: Location, context: Context) -> dict:
"""Получить обратную связь от пользователя и вернуть экземпляр опции.
Пример данных на выходе:
{
'condition': 'True',
'label': 'Вариант 2',
'goto': 'end',
...
}
Приходится использовать отдельную переменную number из-за
невидимых вариантов в меню выбора опций. Без него нумерация
будет не по порядку.
"""
visible_choices = {}
number = 0
for option in location['options']:
if is_visible(option, context):
number += 1
visible_choices[str(number)] = option
print(f'[{number}] {option["label"]}')
user_choice_number = get_input_from_user(visible_choices)
chosen_option = visible_choices[user_choice_number]
return chosen_option
def output_location(position: str, location: Location, context: Context,
terminal_width: int = 80) -> None:
"""Вывести на экран заголовок локации и краткое описание входа.
"""
print('-' * terminal_width)
header = get_header(position, location, context)
for substring in header.split('\n'):
if substring:
lines = textwrap.wrap(text=substring,
width=terminal_width)
for line in lines:
print(line)
print('-' * terminal_width)
def main():
"""Главный событийный цикл игры.
В бесконечном цикле крутит JSON-ы, пор пока игрок не нажмёт
Ctrl+C или не доберётся до локации с именем end.
"""
locations = get_locations()
context = get_context()
position = 'start'
while position != 'end':
location = enter_location(position, locations, context)
option = ask_user(location, context)
apply_side_effect(option, context)
position = option['goto']
print('Спасибо за игру!')
if __name__ == '__main__':
if os.name == 'nt':
clear_screen = clear_screen_windows
else:
clear_screen = clear_screen_nix
try:
main()
except KeyboardInterrupt:
print('Выход по команде с клавиатуры')
|
IgorZyktin/BGCGame
|
game/__main__.py
|
__main__.py
|
py
| 6,720 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
38858291652
|
"""
Place to register all blueprints
"""
def customize_social_auth():
"""
Customize certain routes of social auth
"""
from datetime import datetime
from flask import Blueprint, current_app, g
from flask_user import login_required, signals
from social_core.actions import do_auth, do_complete, do_disconnect
from social_flask.routes import do_login as base_do_login
from social_flask.utils import psa
social_auth = Blueprint("social", "social_flask")
def do_login(backend, user, social_user):
ret = base_do_login(backend=backend, user=user, social_user=social_user)
# Set email_confirmed_at if not already set, is assuming that a
# user only have one email, that is, the User is the UserMailClass
if ret and not user.email_confirmed_at:
user.email_confirmed_at = datetime.utcnow()
user.save()
# Send confirmed_email signal
signals.user_confirmed_email.send(
current_app._get_current_object(), user=user
)
return ret
@social_auth.route("/sign-in/<string:backend>", methods=("GET", "POST"))
@psa("social.complete")
def auth(backend):
return do_auth(g.backend)
@social_auth.route("/complete/<string:backend>", methods=("GET", "POST"))
@psa("social.complete")
def complete(backend, *args, **kwargs):
"""Overrided view to auto confirm emails due to being confirmed by
auth provider inside login"""
return do_complete(g.backend, login=do_login, user=g.user, *args, **kwargs)
@social_auth.route("/disconnect/<string:backend>", methods=("POST",))
@social_auth.route(
"/disconnect/<string:backend>/<int:association_id>", methods=("POST",)
)
@social_auth.route(
"/disconnect/<string:backend>/<string:association_id>", methods=("POST",)
)
@login_required
@psa()
def disconnect(backend, association_id=None):
"""Disconnects given backend from current logged in user."""
return do_disconnect(g.backend, g.user, association_id)
return social_auth
def register_blueprint(app):
"""
Given an app register all blueprints for the application
"""
from .apps import api, chat, media, posts, user
# Register Blueprints
app.register_blueprint(customize_social_auth(), uri_prefix="oauth")
app.register_blueprint(api.bp, url_prefix="/api")
app.register_blueprint(chat.bp, url_prefix="/chat")
app.register_blueprint(media.bp, url_prefix="/media")
app.register_blueprint(posts.bp, url_prefix="/post")
app.register_blueprint(user.bp, url_prefix="/user")
if app.config["DEBUG"]:
from .apps import showroom
app.register_blueprint(showroom.bp, url_prefix="/showroom")
|
Vixx-X/ati-project
|
src/backend/blueprints.py
|
blueprints.py
|
py
| 2,801 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17509932483
|
def perm(s, k):
n = len(s)
def perm_helper(a=None, stamploc=0, used=None):
if a is None:
a = [None] * k
if used is None:
used = [False] * n
if stamploc == k-1:
for i in range(n):
if not used[i]:
a[stamploc] = s[i]
res.append(''.join(a))
return
for i in range(n):
if not used[i]:
a[stamploc] = s[i]
used[i] = True
perm_helper(a, stamploc+1, used)
used[i] = False
if k == 0:
return [None]
res = []
perm_helper()
return res
assert perm('abc', 1) == ['a', 'b', 'c']
assert perm('ab', 2) == ['ab', 'ba']
assert len(perm('abcdefg', 3)) == 210
assert perm('abc', 0) == [None]
|
soji-omiwade/cs
|
dsa/before_rubrik/perm_nk_notset.py
|
perm_nk_notset.py
|
py
| 817 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72478434747
|
import re
def isValidEmail(email):
regex = re.compile(
r"([A-Za-z0-9]+[.-_])*[A-Za-z0-9]+@[A-Za-z0-9-]+(\.[A-Z|a-z]{2,})+"
)
if re.fullmatch(regex, email):
return True
else:
return False
def isValidPhone(phone):
regex = re.compile(r"^\+?[0-9]{10,}$")
if re.fullmatch(regex, phone):
return True
else:
return False
|
lucas-kaminski/mvp-bot-telegram
|
src/utils/validation.py
|
validation.py
|
py
| 385 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25550775532
|
i=0
Fib = [1,1]
n = int(input("Welche Fibonaccizahl willst du?="))
while i < n:
fib = Fib[i]+Fib[i+1]
Fib.append(fib)
i=i+1
print(f"Die Fibonacci Zahl F_{i+1} ist = {Fib[i]}")
print(Fib)
a=1
b=1
fibo=0
i=0
while i < n:
fibo=a+b
a=b
b=fibo
i=i+1
print(fibo)
def f(p):
if p==1:
return 1
else:
return f(p-1)*p
print(f(10))
|
tobibrosch/mathematischeprogrammierung
|
Gruppenphase/Blatt03/nowakbrosch_Blatt03_rekursion.py
|
nowakbrosch_Blatt03_rekursion.py
|
py
| 381 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9499182379
|
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor
# 공개 데이터셋에서 학습 데이터를 내려받습니다.
training_data = datasets.FashionMNIST(
root="Fashion_MNIST_Data",
train=True,
download=True,
transform=ToTensor(),
)
# 공개 데이터셋에서 테스트 데이터를 내려받습니다.
test_data = datasets.FashionMNIST(
root="Fashion_MNIST_Data",
train=False,
download=True,
transform=ToTensor(),
)
batch_size = 64
# 데이터로더를 생성합니다.
train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
for X, y in test_dataloader:
print(f"Shape of X [N, C, H, W]: {X.shape}")
print(f"Shape of y: {y.shape} {y.dtype}")
break
# 학습에 사용할 CPU나 GPU, MPS 장치를 얻습니다.
device = (
"cuda"
if torch.cuda.is_available()
else "mps"
if torch.backends.mps.is_available()
else "cpu"
)
print(f"Using {device} device")
# 모델을 정의합니다.
class NeuralNetwork(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
model = NeuralNetwork().to(device)
print(model)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
# 예측 오류 계산
pred = model(X)
loss = loss_fn(pred, y)
# 역전파
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), (batch + 1) * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
epochs = 5
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer)
test(test_dataloader, model, loss_fn)
print("Done!")
torch.save(model.state_dict(), "model.pth")
print("Saved PyTorch Model State to model.pth")
|
hotsoycandy/learn-pytorch
|
train.py
|
train.py
|
py
| 2,907 |
python
|
en
|
code
| 1 |
github-code
|
6
|
5544798991
|
from common import execute_sh
from retry import retry
import os
import logging
import json
"""
Issue kubectl commands on the running linode cluster
"""
WEATHER_API_TOKEN = os.environ.get("WEATHER_API_TOKEN")
KUBERNETES_NODE_COUNT = "2"
@retry(tries=60, delay=30)
def get_nodes():
# Verify kubectl is communicating with cluster
cmd = ["kubectl", "--output=json", "get", "nodes"]
output = execute_sh(cmd)
json_object = json.loads(output)
nodes = json_object["items"]
if len(nodes) != int(KUBERNETES_NODE_COUNT):
raise Exception(f"kubectl expected {KUBERNETES_NODE_COUNT} nodes but found {len(nodes)}")
logging.info(f"kubectl OK: Retrieved node count: {len(nodes)}")
return
@retry(tries=5, delay=10)
def apply_deployment():
cmd = ["kubectl", "--output=json", "apply", "-f", "resources/deployment.yaml"]
output = None
try:
output = execute_sh(cmd)
except Exception as e:
raise Exception(f"retrying {cmd}")
json_object = json.loads(output)
logging.debug(f"json ==> {json_object}")
logging.info(f"kubectl deployment applied OK")
return
def apply_service():
cmd = ["kubectl", "--output=json", "apply", "-f", "resources/service.yaml"]
output = execute_sh(cmd)
json_object = json.loads(output)
logging.debug(f"json ==> {json_object}")
logging.info(f"kubectl service applied OK")
return
@retry(tries=5, delay=10)
def delete_service():
cmd = ["kubectl", "delete", "svc", "gwa"]
execute_sh(cmd)
def create_secrets():
"""Create k8s secret for the api key etc"""
cmd = [
"kubectl",
"create",
"secret",
"generic",
"gws-secret",
f"--from-literal=WEATHER_API_TOKEN={WEATHER_API_TOKEN}",
]
execute_sh(cmd)
@retry(tries=20, delay=10)
def get_ingress_ip():
cmd = ["kubectl", "--output=json", "get", "svc", "gwa"]
output = execute_sh(cmd)
json_object = json.loads(output)
logging.debug(f"json ==> {json_object}")
ingress_ip = json_object["status"]["loadBalancer"]["ingress"][0]["ip"]
if not ingress_ip:
raise Exception(f"Ingress IP is empty in the returned json")
logging.info(f"Load Balance Ingress is: {ingress_ip}")
return ingress_ip
def apply_argocd():
cmd = ["ls", "-al"]
output = execute_sh(cmd)
cmd = ["kubectl", "create", "namespace", "argocd"]
output = execute_sh(cmd)
# cmd = ["kubectl", "apply", "--namespace=argocd", "--dry-run=server", "-k", "."]
# output = execute_sh(cmd, "./resources")
return
|
HarrisKirk/blue-green-dreams
|
gwa-deploy/kubectl.py
|
kubectl.py
|
py
| 2,558 |
python
|
en
|
code
| 2 |
github-code
|
6
|
74313290747
|
from .keywords import K_SET_1
class Program:
def __init__(self, inputLines):
self.inputLines = inputLines
self.comment = False
self.indent = 0
self.brackets = []
self.programlines = []
def __repr__(self):
return '\n'.join(self.programlines)
def generate(self, path):
temp = []
for line in self.inputLines:
if line == '\n':
continue
elif '/*' in line:
t = line.split('/*')
temp.extend([t[0], '/*'+t[1]])
elif '//' in line:
if line.strip()[:2] == '//':
temp.append(line)
else:
count = 0
for i in line:
if i == ' ':
count += 1
else:
break
t = line.split('//')
temp.extend([t[0], (' '*count)+'//'+t[1]])
else:
temp.append(line)
self.inputLines = list(filter(lambda x: False if x=='\n' or x == '' else True, temp))
for line in self.inputLines:
if self.comment:
if '*/' in line:
self.comment = False
self.programlines.append(line)
else:
if line == '\n' or '//' in line:
self.programlines.append(line)
elif '/*' in line:
self.programlines.append(line)
self.comment = True
else:
line = line.strip('\n').rstrip()
if line == '':
continue
line = self._indent(line)
line, linetype = self._checkKeywords(line)
if linetype:
line, linetype = self._colon(line)
if linetype:
line = self._semicolon(line)
if self.indent > 0:
self.programlines.extend(['}']*int(self.indent))
with open(path.split('.tr')[0] + '.c', 'w') as outfile:
for line in self.programlines:
outfile.write(line + '\n')
def _indent(self, line):
count = 0
for i in line:
if i == ' ':
count += 1
else:
break
if (count/4).is_integer():
if count/4 < self.indent:
bracketCount = self.indent - count/4
self.programlines.extend([' '*(int(self.indent)*4-1) + '}']*int(bracketCount))
self.indent = count/4
elif count/4 > self.indent:
raise SyntaxError()
return line
def _checkKeywords(self, line):
linetype = True
for words in K_SET_1:
if words in line:
linetype = False
self.programlines.append('#' + line.strip())
return '#' + line.strip(), linetype
return line, linetype
def _semicolon(self, line):
finalline = line.rstrip() + ';'
self.programlines.append(finalline)
return finalline
def _colon(self, line):
if line[-1] == ':':
self.indent += 1
self.programlines.append(line[:-1])
self.programlines.append(' '*(int(self.indent)*4-1) + '{')
return line.replace(':', ''), False
return line, True
|
ShriramShagri/Transpiler
|
src/core/program.py
|
program.py
|
py
| 3,519 |
python
|
en
|
code
| 2 |
github-code
|
6
|
42295145901
|
import geopy
bairros_list = [
"Boa Vista",
"Bom Pastor",
"Centro",
"Granbery",
"Jardim Glória",
"Santa Helena",
"São Mateus",
"Teixeiras",
"Bairu",
"Bonfim",
"Botanágua",
"Centenário",
"Cesário Alvim",
"Grajaú",
"Linhares",
"Manoel Honório",
"Marumbi",
"Nossa Senhora Aparecida",
"Progresso",
"Santa Rita",
"Santa Cândida",
"São Benedito",
"São Bernardo",
"Vitorino Braga",
"Eldorado",
"Granjas Betânea",
"Jardim Bom Clima",
"Mariano Procópio",
"Grama",
"Jardim Emaús",
"Parque Independência",
"Santa Therezinha",
"Filgueiras",
"Vale dos Bandeirantes",
"Barbosa Lage",
"Barreira do Triunfo",
"Benfica",
"Milho Branco",
"Carlos Chagas",
"Cerâmica",
"Esplanada",
"Francisco Bernardino",
"Industrial",
"Jardim Natal",
"Jóquei Clube",
"Nova Era",
"Paula Lima",
"Remonta",
"Represa",
"Santa Cruz",
"São Dimas",
"Vila Esperança",
"Aeroporto",
"Borboleta",
"Cruzeiro Santo Antônio",
"Martelos",
"Morro do Imperador",
"Nova Califórnia",
"Novo Horizonte",
"São Pedro",
"Serro Azul",
"Barão do Retiro",
"Floresta",
"Nossa Senhora de Lourdes",
"Santo Antônio",
"Vila Furtado de Menezes",
"Vila Olavo Costa",
"Niterói",
"Costa Carvalho",
"Bomba de Fogo",
"Cascatinha",
"Graminha",
"Ipiranga",
"Jardim Laranjeiras",
"Sagrado Coração de Jesus",
"Salvaterra",
"Santa Efigênia",
"Santa Luzia",
"São Geraldo",
]
location_dict = {}
for bairro in bairros_list:
geolocator = geopy.geocoders.Nominatim(user_agent="geolocalização")
location = geolocator.geocode(bairro + ', Juiz de Fora - MG')
try:
lat = location.latitude
lon = location.longitude
location_dict[bairro]={
'latitude': lat,
'logitude': lon,
}
print(bairro + ', Juiz de Fora - MG.')
except:
print(bairro + ', não funciona.')
print(location_dict)
|
igortitoneli/Api_Vitrine
|
bairro_location.py
|
bairro_location.py
|
py
| 2,455 |
python
|
is
|
code
| 0 |
github-code
|
6
|
8385196931
|
from __future__ import print_function
from __future__ import absolute_import
import socket
import struct
import sys
import warnings
from . import constants as tc
from .exceptions import TraCIException, FatalTraCIError
from .domain import _defaultDomains
from .storage import Storage
from .step import StepManager
_DEBUG = False
_RESULTS = {0x00: "OK", 0x01: "Not implemented", 0xFF: "Error"}
class Connection(StepManager):
"""Contains the socket, the composed message string
together with a list of TraCI commands which are inside.
"""
def __init__(self, host, port, process, traceFile, traceGetters):
StepManager.__init__(self)
if sys.platform.startswith('java'):
# working around jython 2.7.0 bug #2273
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
else:
self._socket = socket.socket()
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
self._socket.connect((host, port))
except socket.error:
self._socket.close()
raise
self._process = process
self._string = bytes()
self._queue = []
self._subscriptionMapping = {}
if traceFile is not None:
self.startTracing(traceFile, traceGetters, _defaultDomains)
for domain in _defaultDomains:
domain._register(self, self._subscriptionMapping)
def _recvExact(self):
try:
result = bytes()
while len(result) < 4:
t = self._socket.recv(4 - len(result))
if not t:
return None
result += t
length = struct.unpack("!i", result)[0] - 4
result = bytes()
while len(result) < length:
t = self._socket.recv(length - len(result))
if not t:
return None
result += t
return Storage(result)
except socket.error:
return None
def _sendExact(self):
if self._socket is None:
raise FatalTraCIError("Connection already closed.")
length = struct.pack("!i", len(self._string) + 4)
if _DEBUG:
print("sending", Storage(length + self._string).getDebugString())
self._socket.send(length + self._string)
result = self._recvExact()
if _DEBUG:
print("receiving", result.getDebugString())
if not result:
self._socket.close()
del self._socket
raise FatalTraCIError("connection closed by SUMO")
for command in self._queue:
prefix = result.read("!BBB")
err = result.readString()
if prefix[2] or err:
self._string = bytes()
self._queue = []
raise TraCIException(err, prefix[1], _RESULTS[prefix[2]])
elif prefix[1] != command:
raise FatalTraCIError("Received answer %s for command %s." % (prefix[1], command))
elif prefix[1] == tc.CMD_STOP:
length = result.read("!B")[0] - 1
result.read("!%sx" % length)
self._string = bytes()
self._queue = []
return result
def _pack(self, format, *values):
packed = bytes()
for f, v in zip(format, values):
if f == "i":
packed += struct.pack("!Bi", tc.TYPE_INTEGER, int(v))
elif f == "I": # raw int for setOrder
packed += struct.pack("!i", int(v))
elif f == "d":
packed += struct.pack("!Bd", tc.TYPE_DOUBLE, float(v))
elif f == "D": # raw double for some base commands like simstep
packed += struct.pack("!d", float(v))
elif f == "b":
packed += struct.pack("!Bb", tc.TYPE_BYTE, int(v))
elif f == "B":
packed += struct.pack("!BB", tc.TYPE_UBYTE, int(v))
elif f == "u": # raw unsigned byte needed for distance command and subscribe
packed += struct.pack("!B", int(v))
elif f == "s":
v = str(v)
packed += struct.pack("!Bi", tc.TYPE_STRING, len(v)) + v.encode("latin1")
elif f == "p": # polygon
if len(v) <= 255:
packed += struct.pack("!BB", tc.TYPE_POLYGON, len(v))
else:
packed += struct.pack("!BBi", tc.TYPE_POLYGON, 0, len(v))
for p in v:
packed += struct.pack("!dd", *p)
elif f == "t": # tuple aka compound
packed += struct.pack("!Bi", tc.TYPE_COMPOUND, v)
elif f == "c": # color
packed += struct.pack("!BBBBB", tc.TYPE_COLOR, int(v[0]), int(v[1]), int(v[2]),
int(v[3]) if len(v) > 3 else 255)
elif f == "l": # string list
packed += struct.pack("!Bi", tc.TYPE_STRINGLIST, len(v))
for s in v:
packed += struct.pack("!i", len(s)) + s.encode("latin1")
elif f == "f": # float list
packed += struct.pack("!Bi", tc.TYPE_DOUBLELIST, len(v))
for x in v:
packed += struct.pack("!d", x)
elif f == "o":
packed += struct.pack("!Bdd", tc.POSITION_2D, *v)
elif f == "O":
packed += struct.pack("!Bddd", tc.POSITION_3D, *v)
elif f == "g":
packed += struct.pack("!Bdd", tc.POSITION_LON_LAT, *v)
elif f == "G":
packed += struct.pack("!Bddd", tc.POSITION_LON_LAT_ALT, *v)
elif f == "r":
packed += struct.pack("!Bi", tc.POSITION_ROADMAP, len(v[0])) + v[0].encode("latin1")
packed += struct.pack("!dB", v[1], v[2])
return packed
def _sendCmd(self, cmdID, varID, objID, format="", *values):
self._queue.append(cmdID)
packed = self._pack(format, *values)
length = len(packed) + 1 + 1 # length and command
if varID is not None:
if isinstance(varID, tuple): # begin and end of a subscription
length += 8 + 8 + 4 + len(objID)
else:
length += 1 + 4 + len(objID)
if length <= 255:
self._string += struct.pack("!BB", length, cmdID)
else:
self._string += struct.pack("!BiB", 0, length + 4, cmdID)
if varID is not None:
if isinstance(varID, tuple):
self._string += struct.pack("!dd", *varID)
else:
self._string += struct.pack("!B", varID)
self._string += struct.pack("!i", len(objID)) + objID.encode("latin1")
self._string += packed
return self._sendExact()
def _readSubscription(self, result):
if _DEBUG:
print("reading subscription", result.getDebugString())
result.readLength()
response = result.read("!B")[0]
isVariableSubscription = ((response >= tc.RESPONSE_SUBSCRIBE_INDUCTIONLOOP_VARIABLE and
response <= tc.RESPONSE_SUBSCRIBE_BUSSTOP_VARIABLE) or
(response >= tc.RESPONSE_SUBSCRIBE_PARKINGAREA_VARIABLE and
response <= tc.RESPONSE_SUBSCRIBE_OVERHEADWIRE_VARIABLE))
objectID = result.readString()
if not isVariableSubscription:
domain = result.read("!B")[0]
numVars = result.read("!B")[0]
if isVariableSubscription:
while numVars > 0:
varID, status = result.read("!BB")
if status:
print("Error!", result.readTypedString())
elif response in self._subscriptionMapping:
self._subscriptionMapping[response].add(objectID, varID, result)
else:
raise FatalTraCIError(
"Cannot handle subscription response %02x for %s." % (response, objectID))
numVars -= 1
else:
objectNo = result.read("!i")[0]
for _ in range(objectNo):
oid = result.readString()
if numVars == 0:
self._subscriptionMapping[response].addContext(
objectID, self._subscriptionMapping[domain], oid)
for __ in range(numVars):
varID, status = result.read("!BB")
if status:
print("Error!", result.readTypedString())
elif response in self._subscriptionMapping:
self._subscriptionMapping[response].addContext(
objectID, self._subscriptionMapping[domain], oid, varID, result)
else:
raise FatalTraCIError(
"Cannot handle subscription response %02x for %s." % (response, objectID))
return objectID, response
def _subscribe(self, cmdID, begin, end, objID, varIDs, parameters):
format = "u"
args = [len(varIDs)]
for v in varIDs:
format += "u"
args.append(v)
if parameters is not None and v in parameters:
if isinstance(parameters[v], tuple):
format += parameters[v][0]
for a in parameters[v][1:]:
args.append(a)
elif isinstance(parameters[v], int):
format += "i"
args.append(parameters[v])
elif isinstance(parameters[v], float):
format += "d"
args.append(parameters[v])
else:
format += "s"
args.append(parameters[v])
result = self._sendCmd(cmdID, (begin, end), objID, format, *args)
if varIDs:
objectID, response = self._readSubscription(result)
if response - cmdID != 16 or objectID != objID:
raise FatalTraCIError("Received answer %02x,%s for subscription command %02x,%s." % (
response, objectID, cmdID, objID))
def _getSubscriptionResults(self, cmdID):
return self._subscriptionMapping[cmdID]
def _subscribeContext(self, cmdID, begin, end, objID, domain, dist, varIDs, parameters=None):
result = self._sendCmd(cmdID, (begin, end), objID, "uDu" + (len(varIDs) * "u"),
domain, dist, len(varIDs), *varIDs)
if varIDs:
objectID, response = self._readSubscription(result)
if response - cmdID != 16 or objectID != objID:
raise FatalTraCIError("Received answer %02x,%s for context subscription command %02x,%s." % (
response, objectID, cmdID, objID))
def _addSubscriptionFilter(self, filterType, params=None):
if filterType in (tc.FILTER_TYPE_NONE, tc.FILTER_TYPE_NOOPPOSITE,
tc.FILTER_TYPE_LEAD_FOLLOW):
# filter without parameter
assert params is None
self._sendCmd(tc.CMD_ADD_SUBSCRIPTION_FILTER, None, None, "u", filterType)
elif filterType in (tc.FILTER_TYPE_DOWNSTREAM_DIST, tc.FILTER_TYPE_UPSTREAM_DIST,
tc.FILTER_TYPE_TURN, tc.FILTER_TYPE_FIELD_OF_VISION,
tc.FILTER_TYPE_LATERAL_DIST):
# filter with float parameter
self._sendCmd(tc.CMD_ADD_SUBSCRIPTION_FILTER, None, None, "ud", filterType, params)
elif filterType in (tc.FILTER_TYPE_VCLASS, tc.FILTER_TYPE_VTYPE):
# filter with list(string) parameter
self._sendCmd(tc.CMD_ADD_SUBSCRIPTION_FILTER, None, None, "ul", filterType, params)
elif filterType == tc.FILTER_TYPE_LANES:
# filter with list(byte) parameter
# check uniqueness of given lanes in list
lanes = set()
for i in params:
lane = int(i)
if lane < 0:
lane += 256
lanes.add(lane)
if len(lanes) < len(list(params)):
warnings.warn("Ignoring duplicate lane specification for subscription filter.")
self._sendCmd(tc.CMD_ADD_SUBSCRIPTION_FILTER, None, None,
(len(lanes) + 2) * "u", filterType, len(lanes), *lanes)
def hasGUI(self):
try:
self.gui.getIDList()
return True
except TraCIException:
return False
def load(self, args):
"""
Load a simulation from the given arguments.
"""
if self._traceFile:
self._traceFile.write("traci.load(%s)\n" % repr(args))
self._sendCmd(tc.CMD_LOAD, None, None, "l", args)
def simulationStep(self, step=0.):
"""
Make a simulation step and simulate up to the given second in sim time.
If the given value is 0 or absent, exactly one step is performed.
Values smaller than or equal to the current sim time result in no action.
"""
if self._traceFile:
args = "" if step == 0 else str(step)
self._traceFile.write("traci.simulationStep(%s)\n" % args)
if type(step) is int and step >= 1000:
warnings.warn("API change now handles step as floating point seconds", stacklevel=2)
result = self._sendCmd(tc.CMD_SIMSTEP, None, None, "D", step)
for subscriptionResults in self._subscriptionMapping.values():
subscriptionResults.reset()
numSubs = result.readInt()
responses = []
while numSubs > 0:
responses.append(self._readSubscription(result))
numSubs -= 1
self.manageStepListeners(step)
return responses
def getVersion(self):
command = tc.CMD_GETVERSION
result = self._sendCmd(command, None, None)
result.readLength()
response = result.read("!B")[0]
if response != command:
raise FatalTraCIError("Received answer %s for command %s." % (response, command))
return result.readInt(), result.readString()
def setOrder(self, order):
self._sendCmd(tc.CMD_SETORDER, None, None, "I", order)
def close(self, wait=True):
StepManager.close(self, True)
for listenerID in list(self._stepListeners.keys()):
self.removeStepListener(listenerID)
if self._socket is not None:
self._sendCmd(tc.CMD_CLOSE, None, None)
self._socket.close()
self._socket = None
if wait and self._process is not None:
self._process.wait()
|
ngctnnnn/DRL_Traffic-Signal-Control
|
sumo-rl/sumo/tools/traci/connection.py
|
connection.py
|
py
| 14,818 |
python
|
en
|
code
| 17 |
github-code
|
6
|
25457066690
|
import telegram
from twilio.rest import Client
from twilio.base.exceptions import TwilioRestException
import threading
class MessageSender:
def __init__(self, config):
self.lock = threading.Lock()
self.telegram_bot = None
if config["telegram"] is not None:
self.telegram_chat_id = config["telegram"]["chat_id"]
self.telegram_bot = telegram.Bot(token=config["telegram"]["token"])
self.twilio_client = None
if config["twilio"] is not None:
self.toNumber = config["twilio"]["toNumber"]
self.fromNumber = config["twilio"]["fromNumber"]
self.accountSid = config["twilio"]["accountSid"]
self.authToken = config["twilio"]["authToken"]
self.twilio_client = Client(self.accountSid, self.authToken)
def send_message(self, content, sender=None):
with self.lock:
if sender is not None:
content = "Sender: " + sender + ": " + content
if self.telegram_bot is not None:
self.telegram_bot.send_message(chat_id=self.telegram_chat_id, text=content)
if self.twilio_client is not None:
try:
self.twilio_client.messages.create(to=self.toNumber, from_=self.fromNumber,
body=content)
except (NameError, TwilioRestException):
pass
|
wanmeihuali/Agressive-Store-Bots
|
MessageSender.py
|
MessageSender.py
|
py
| 1,434 |
python
|
en
|
code
| null |
github-code
|
6
|
73816585466
|
from collections import defaultdict
from typing import ClassVar, Self
from discord import Message, Thread
from discord.errors import HTTPException
from pydis_core.utils import scheduling
from pydis_core.utils.logging import get_logger
import bot
from bot.constants import Channels
from bot.exts.filtering._filter_context import Event, FilterContext
from bot.exts.filtering._settings_types.settings_entry import ActionEntry
from bot.exts.filtering._utils import FakeContext
from bot.utils.messages import send_attachments
log = get_logger(__name__)
SUPERSTAR_REASON = (
"Your nickname was found to be in violation of our code of conduct. "
"If you believe this is a mistake, please let us know."
)
async def upload_messages_attachments(ctx: FilterContext, messages: list[Message]) -> None:
"""Re-upload the messages' attachments for future logging."""
if not messages:
return
destination = messages[0].guild.get_channel(Channels.attachment_log)
for message in messages:
if message.attachments and message.id not in ctx.uploaded_attachments:
ctx.uploaded_attachments[message.id] = await send_attachments(message, destination, link_large=False)
class RemoveContext(ActionEntry):
"""A setting entry which tells whether to delete the offending message(s)."""
name: ClassVar[str] = "remove_context"
description: ClassVar[str] = (
"A boolean field. If True, the filter being triggered will cause the offending context to be removed. "
"An offending message will be deleted, while an offending nickname will be superstarified."
)
remove_context: bool
async def action(self, ctx: FilterContext) -> None:
"""Remove the offending context."""
if not self.remove_context:
return
if ctx.event in (Event.MESSAGE, Event.MESSAGE_EDIT):
await self._handle_messages(ctx)
elif ctx.event == Event.NICKNAME:
await self._handle_nickname(ctx)
elif ctx.event == Event.THREAD_NAME:
await self._handle_thread(ctx)
@staticmethod
async def _handle_messages(ctx: FilterContext) -> None:
"""Delete any messages involved in this context."""
if not ctx.message or not ctx.message.guild:
return
# If deletion somehow fails at least this will allow scheduling for deletion.
ctx.messages_deletion = True
channel_messages = defaultdict(set) # Duplicates will cause batch deletion to fail.
for message in {ctx.message} | ctx.related_messages:
channel_messages[message.channel].add(message)
success = fail = 0
deleted = list()
for channel, messages in channel_messages.items():
try:
await channel.delete_messages(messages)
except HTTPException:
fail += len(messages)
else:
success += len(messages)
deleted.extend(messages)
scheduling.create_task(upload_messages_attachments(ctx, deleted))
if not fail:
if success == 1:
ctx.action_descriptions.append("deleted")
else:
ctx.action_descriptions.append("deleted all")
elif not success:
if fail == 1:
ctx.action_descriptions.append("failed to delete")
else:
ctx.action_descriptions.append("all failed to delete")
else:
ctx.action_descriptions.append(f"{success} deleted, {fail} failed to delete")
@staticmethod
async def _handle_nickname(ctx: FilterContext) -> None:
"""Apply a superstar infraction to remove the user's nickname."""
alerts_channel = bot.instance.get_channel(Channels.mod_alerts)
if not alerts_channel:
log.error(f"Unable to apply superstar as the context channel {alerts_channel} can't be found.")
return
command = bot.instance.get_command("superstar")
if not command:
user = ctx.author
await alerts_channel.send(f":warning: Could not apply superstar to {user.mention}: command not found.")
log.warning(f":warning: Could not apply superstar to {user.mention}: command not found.")
ctx.action_descriptions.append("failed to superstar")
return
await command(FakeContext(ctx.message, alerts_channel, command), ctx.author, None, reason=SUPERSTAR_REASON)
ctx.action_descriptions.append("superstarred")
@staticmethod
async def _handle_thread(ctx: FilterContext) -> None:
"""Delete the context thread."""
if isinstance(ctx.channel, Thread):
try:
await ctx.channel.delete()
except HTTPException:
ctx.action_descriptions.append("failed to delete thread")
else:
ctx.action_descriptions.append("deleted thread")
def union(self, other: Self) -> Self:
"""Combines two actions of the same type. Each type of action is executed once per filter."""
return RemoveContext(remove_context=self.remove_context or other.remove_context)
|
python-discord/bot
|
bot/exts/filtering/_settings_types/actions/remove_context.py
|
remove_context.py
|
py
| 5,173 |
python
|
en
|
code
| 1,206 |
github-code
|
6
|
5187889784
|
def maxProfit(prices):
"""
:type prices: List[int]
:rtype: int
"""
total = 0
for i in range(len(prices) - 1):
total += max(prices[i + 1] - prices[i], 0)
return total
Input = [7,1,5,3,6,4]
print(maxProfit(Input))
# Output: 7
# Explanation: Buy on day 2 (price = 1) and sell on day 3 (price = 5), profit = 5-1 = 4.
# Then buy on day 4 (price = 3) and sell on day 5 (price = 6), profit = 6-3 = 3.
Input = [1,2,3,4,5]
print(maxProfit(Input))
# Output: 4
# Explanation: Buy on day 1 (price = 1) and sell on day 5 (price = 5), profit = 5-1 = 4.
# Note that you cannot buy on day 1, buy on day 2 and sell them later, as you are
# engaging multiple transactions at the same time. You must sell before buying again.
Input = [7,6,4,3,1]
print(maxProfit(Input))
# Output: 0
# Explanation: In this case, no transaction is done, i.e. max profit = 0.
|
IshGill/DSA-Guides
|
Array Problems/Buy_sell_stock.py
|
Buy_sell_stock.py
|
py
| 936 |
python
|
en
|
code
| 9 |
github-code
|
6
|
32966110161
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 16 08:45:54 2020
@author: rishav
"""
minPer = 100
dict = {'Krishna':[67,68,69]
,'Arjun':[45,98,63]
,'Malika':[52,56,60]}
for i in dict:
x=sum(dict[i])/3
if(x<minPer):
minPer=x
index = i
print(index)
|
rishavrajj/my-python-codes
|
finding_percent.py
|
finding_percent.py
|
py
| 286 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10899943389
|
import cv2
import numpy as np
from keras.models import load_model
image = cv2 .imread("Bore2.jpg",0)
image = cv2.resize(image, (48, 48))
image = image[..., np.newaxis]
# dst = np.expand_dims(image, axis=1)
print(image.shape)
# exit()
model = load_model("model_v6_23.hdf5")
predicted_class = np.argmax(model.predict(image))
print(predicted_class)
exit()
#
# image = cv2.imread("1.jpg")
# cv2.imshow("asli", image)
# cv2.waitKey()
# print(image.shape)
# # image_size=(48,e48)
# width, height = 48, 48
# # face = np.asarray(image).reshape(width, height)
# frame = cv2.resize(image, (0, 0), fx=0.5, fy=0.5)
# # face = cv2.resize(image.astype('uint8'),image_size)
# print(frame.shape)
# cv2.imshow("", frame)
# cv2.waitKey()
# from collections import Counter
#
#
# def most_frequent(List):
# occurence_count = Counter(List)
# print(occurence_count)
# print(type(occurence_count))
# charater = occurence_count.most_common(1)[0][0]
# print(charater)
#
# exit()
# # chracter = occurence_count.most_common(1)[0][0]
# # repr(chracter)
#
#
# List =['Cat', 'Cat', 'Dog']
# print(most_frequent(List))
|
hassanahmed95/My_True_Face
|
ML_training/test_file.py
|
test_file.py
|
py
| 1,134 |
python
|
en
|
code
| 1 |
github-code
|
6
|
75061852026
|
import numpy as np
import p3.pad
import p3.config as c
from p3.agent import Agent
from p3.state import ActionState
class Fox:
def __init__(self):
self.agent = 0 # individual agent number
self.agents = [] # list of agents
self.generation = 0 # generation number
def reset(self):
self.agent = 0
self.agents = []
# Add agent to agents list
def add_agent(self, nnet):
self.agents.append(Agent(len(self.agents), nnet))
# Return list of fitness values for all agents
def get_ind_fitness(self):
fits = []
for a in self.agents:
fits.append(a.fitness)
return fits
# Advance frame by 1
def advance(self, state, pad, mm):
a = self.agents[self.agent]
# For every agent in the population
if self.agent < len(self.agents):
if (state.players[1].action_state == ActionState.DeadDown):
a.damage_dealt.append(-1)
else:
a.damage_dealt.append(state.players[1].percent) # append cpu's percent
if (state.players[2].action_state == ActionState.DeadDown):
a.damage_received.append(-1)
else:
a.damage_received.append(state.players[2].percent) # append ai's percent
if (state.players[1].facing == state.players[2].facing):
a.fitness[0] += .1
# Collect fitness and change agent every x frames
if state.frame % 1200 == 0:
a.pause(state, pad)
a.fit(state, pad) # See Agent class for more on fit()
print(a.number, ": [{0:.2f}".format(a.fitness[1]), ", {0:.2f}] ".format(a.fitness[0]))
a.restart(state, pad)
self.agent += 1
# Change agent move
elif state.frame % 2 == 0:
if (state.players[2].pos_x == a.last_pos):
a.fitness[0] += .2
a.last_pos = state.players[2].pos_x
a.advance(state, pad) # See Agent class for more on advance()
return self.agent
|
gabriel-richardson/NSGA-Smash-AI
|
p3/fox.py
|
fox.py
|
py
| 2,122 |
python
|
en
|
code
| 2 |
github-code
|
6
|
42926685316
|
def Solve(ln_0, ln_1, ln_s):
if ln_s == "-":
result = ln_0 - ln_1
if ln_s == "+":
result = ln_0 + ln_1
if ln_s == "*":
result = ln_0 * ln_1
return result
while True:
try:
T = int(input().strip())
#print(n, m)
for i in range(T):
k = int(input().strip())
li2 = input().strip().split()
ln_0 = int(li2[0])
ln_1 = int(li2[1])
ln_s = li2[2]
print(Solve(ln_0, ln_1, ln_s))
except EOFError:
break
except ValueError:
continue
|
ppalantir/axjingWorks
|
algorithm_note/anExams/tencent3exp4.py
|
tencent3exp4.py
|
py
| 588 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25968013406
|
import argparse
import time
from threading import Thread
import requests
class RequestThread(Thread):
def __init__(self, url):
self.url = url
super(RequestThread, self).__init__(target=self.make_request)
def make_request(self):
requests.get(self.url)
class Worker(object):
def __init__(self):
self.thread = None
@property
def busy(self):
if self.thread and not self.thread.is_alive():
self.thread = None
return self.thread is not None
def run_thread(self, thread):
self.thread = thread
self.thread.start()
def join(self):
self.thread.join()
class WorkerGroup(object):
def __init__(self, num_workers):
self.workers = [self._generate_worker() for i in range(num_workers)]
def get_available_worker(self):
for worker in self.workers:
if not worker.busy:
return worker
time.sleep(0.5)
return self.get_available_worker()
def _generate_worker(self):
return Worker()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('url')
parser.add_argument('workers', type=int)
parser.add_argument('total_requests', type=int)
args = parser.parse_args()
threads = [RequestThread(args.url) for i in range(args.total_requests)]
worker_group = WorkerGroup(args.workers)
while threads:
worker = worker_group.get_available_worker()
worker.run_thread(threads.pop())
for worker in worker_group.workers:
worker.join()
|
wgaggioli/elk-example
|
threaded_requests.py
|
threaded_requests.py
|
py
| 1,602 |
python
|
en
|
code
| 6 |
github-code
|
6
|
11047387601
|
from astropy.wcs.utils import wcs_to_celestial_frame as wcs
from astropy.coordinates import SkyCoord
import astropy.units as u
from scipy import ndimage
import numpy as np
import math as m
__author__ = "Norbert Gyenge"
__email__ = "[email protected]"
def Sunspot_coord(photosphere_full, dx, dy, spot):
'''Sunspot coordinate estimation.
Parameters
----------
photosphere_full - Fits image.
dx, dy - Region of interest box coordinate
spot - Masked submap.
Returns
-------
array[0] - x (arcsec)
array[1] - y (arcsec)
array[2] - r (polar)
array[3] - theta (polar)
array[4] - b (Carrington)
array[5] - l (Carrington)
array[6] - lcm (Carrington)
References
----------
Thompson (2006), A&A, 449, 791'''
# The origo of the coordinate system is the left bottom corner.
y_on_cut, x_on_cut = ndimage.measurements.center_of_mass(spot)
# Restore the region of interest box corner coordinate in pixels
#x_on_im = dx[0] / photosphere_full.scale[0]
#y_on_im = dy[0] / photosphere_full.scale[1]
x_on_im = dx[0]
y_on_im = dy[0]
# Estimate the spot's coordinate in pixels
x, y = (x_on_cut * u.pix) + x_on_im, (y_on_cut * u.pix) + y_on_im
# Convert the spot's coordinate in arcsecs
c = photosphere_full.pixel_to_world(x, y)
Solar_X, Solar_Y = c.Tx, c.Ty
# Polar coordinates
r = np.sqrt(pow(Solar_X.value, 2) + pow(Solar_Y.value, 2)) * u.arcsec
theta = m.atan2(Solar_Y.value, Solar_X.value) * (180 / np.pi) * u.deg
# Use SkyCoord for further conversion
c = SkyCoord(Solar_X, Solar_Y, frame=wcs(photosphere_full.wcs))
# Convert to heliographic stonyhurst
d = c.heliographic_stonyhurst
# Extract the latitude and LCM
latitude = d.lat
lcm = d.lon
# Convert to heliographic Carrington for longitude
longitude = lcm + (photosphere_full.meta['crln_obs'] * u.deg)
return [Solar_X, Solar_Y, r, theta, latitude, longitude, lcm]
|
gyengen/SheffieldSolarCatalog
|
engine/ssc/sunspot/coordinates.py
|
coordinates.py
|
py
| 2,040 |
python
|
en
|
code
| 1 |
github-code
|
6
|
4756037188
|
"""This module provides the CLI for the wrangle-ukds-trade-directories app."""
from . import __app_name__
import argparse
from pathlib import Path
def typecast_args(args):
args.input = Path(args.input)
args.output = Path(args.output)
return args
def test_args(args):
if not Path(args.input).is_dir():
raise RuntimeError("The path specified does not exist")
Path(args.output).mkdir(parents=True, exist_ok=True)
if not Path(args.output).is_dir():
raise RuntimeError("The output path specified does not exist")
return True
def get_args():
# Create the parser
p = argparse.ArgumentParser(
prog=__app_name__, description="Wrangle the UKDS Trade Directories data folder."
)
# Add the arguments
p.add_argument(
"input",
metavar="input",
type=str,
help="The input path where the UKDS trade directories can be found.",
)
p.add_argument(
"output",
metavar="output",
type=str,
help="The output path where the consolidated UKDS trade directories should be located.",
)
# Execute the parse_args() method
args = p.parse_args()
# Set types
args = typecast_args(args)
# Test args
test_args(args)
return args
|
Living-with-machines/wrangle-ukds-trade-directories
|
wrangle_ukds_trade_directories/argparse.py
|
argparse.py
|
py
| 1,285 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26423979164
|
import numpy as np
import torch
import time
from torch.autograd import Variable
import torchvision.transforms as transforms
import torchvision.datasets as dsets
import collections
import torch.utils.data as data
class Model(torch.nn.Module):
def __init__(self, input_dim=784, output_dim=10):
super(Model, self).__init__()
self.linear = torch.nn.Linear(input_dim, output_dim)
self.input_dim = input_dim
self.output_dim = output_dim
def forward(self, x):
outputs = self.linear(x)
return outputs
class MyDataset(data.Dataset):
def __init__(self, images, labels):
self.images = images
self.labels = labels
def __getitem__(self, index): # return tensor type
img, target = self.images[index], self.labels[index]
return img, target
def __len__(self):
return len(self.images)
def get_mnist_train_list():
# build-in mnist dataset
train_dataset = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=False)
img_list = [ x for x, y in train_dataset ]
lab_list = [ y for x, y in train_dataset ]
return img_list, lab_list
def exp_data():
train_dataset = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=False)
for (x, y) in train_dataset:
print( type(x), type(y) )
#exp_data()
def transform_train_list(images_list, labels_list, bt_size):
train_images = [ img for img in images_list ]
train_labels = [ lab for lab in labels_list ]
#print("type", type(train_images[0]))
new_dataset = MyDataset(train_images, train_labels)
train_loader = torch.utils.data.DataLoader(dataset=new_dataset, batch_size=bt_size, shuffle=False)
return train_loader
def evaluate(model_name, test_images, test_labels, l_rate=0.001):
test_model = load_model(model_name)
test_loader = transform_train_list(test_images, test_labels, 1)
# init
correct = 0
total = 0
loss = 0
#optimizer = torch.optim.SGD(test_model.parameters(), lr=l_rate)
criterion = torch.nn.CrossEntropyLoss()
# get acc & loss
for i, (img, lab) in enumerate(test_loader):
img = Variable(img.view(-1, 28 * 28))
lab = Variable(lab)
#optimizer.zero_grad()
outputs = test_model(img)
loss += criterion(outputs, lab)
#optimizer.step()
_, predicted = torch.max(outputs.data, 1)
total+= lab.size(0)
correct+= (predicted == lab).sum()
# get average
loss /= len(test_labels)
accuracy = int(correct)/total
return loss, accuracy
#训练函数
def train(train_model, train_raw_img, train_raw_lab, E, bt_size=100, epochs=1, lr_rate=0.001): # E means iteration
# get train loader
train_loader = transform_train_list(train_raw_img, train_raw_lab, bt_size)
# 计算 softmax 分布之上的交叉熵损失
criterion = torch.nn.CrossEntropyLoss()
#SGD
optimizer = torch.optim.SGD(train_model.parameters(), lr=lr_rate)
# train
tms = []
#tic = time.time()
for epoch in range(epochs):
print('epoch {}:'.format(epoch + 1))
for i in range(E):
print("--\titeration {}".format(i+1))
img, lab = next(iter(train_loader))
img = Variable(img.view(-1, 28 * 28))
lab = Variable(lab)
optimizer.zero_grad()
tic = time.time()
outputs = train_model(img)
#print(lab)
loss = criterion(outputs, lab)
loss.backward()
optimizer.step()
toc = time.time()
tms.append(toc-tic)
#print(loss)
#toc = time.time()
return np.sum(tms)
def save_model(model,path):
# torch.save(model, path, _use_new_zipfile_serialization=False)
torch.save(model, path)
def load_model(model_name):
model = torch.load(model_name)
return model
def aggregate(client_select, client_set, model_name):
models_list = []
for i in client_select:
client = client_set[i]
name = 'models/model{}.pkl'.format(client.ID)
model = torch.load(name)
models_list.append(model)
models_dict = [i.state_dict() for i in models_list]
weight_keys = list(models_dict[0].keys())
server_model_dict = collections.OrderedDict()
for key in weight_keys:
key_sum = 0
sumation = 0
for i in range(len(models_list)):
client = client_set[ client_select[i] ]
key_sum += models_dict[i][key] * client.num_traindata
sumation += client.num_traindata
server_model_dict[key] = key_sum / sumation
server_model = torch.load(model_name)
server_model.load_state_dict(server_model_dict)
torch.save(server_model, model_name, _use_new_zipfile_serialization=False)
#print('aggregation done!')
def predict(model_name, img):
img = Variable(img.view(-1, 28 * 28))
model = torch.load(model_name)
lab = model(img)
x = -100
p = 0
#print(lab[0])
print("hh", lab)
for i in range(len(lab[0])):
if lab[0][i].double() > x:
print(lab[0][i].double(), i)
x = lab[0][i].double()
p = i
#print("hhhhhh", lab, p)
return p
|
WENLIXIAO-CS/FL-IoT-Demo
|
Fed-IoT-demo-lightly/Pytorch_Model.py
|
Pytorch_Model.py
|
py
| 5,470 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34540300851
|
import configparser
import argparse
import json
import boto3
import utility
from collections import OrderedDict
global emr_configuration, emr_applications, cluster_config, optional_instance_config
emr_configuration = "emr_cluster.config"
emr_applications = ["Hadoop", "Spark", "Ganglia"]
cluster_config = "source/cluster_creator/cluster_config.json"
optional_instance_config = {"vpc_subnet": "Ec2SubnetId",
"master_security_group": "EmrManagedMasterSecurityGroup",
"slave_security_group": "EmrManagedSlaveSecurityGroup",
"service_access_security_group": "ServiceAccessSecurityGroup"}
def check_configuration(config):
if not utility.check_config(config, "EMR", ["release_label", "software_installer_location",
"genome_folder_location"]):
return False
if not utility.check_upload_config(config["EMR"], "upload_bootstrap_scripts", "bootstrap_scripts",
"bootstrap_scripts_local_location", "bootstrap_scripts_s3_location"):
return False
if not utility.check_config(config, "EMR_nodes", ["key_name", "service_role", "instance_profile",
"master_instance_type", "master_instance_count",
"core_instance_type", "core_instance_count"]):
return False
release_version = config["EMR"]["release_label"].split("-")[-1].split(".")
major_release_version = int(release_version[0])
minor_release_version = int(release_version[1])
if config["EMR_nodes"].get("custom_ami_id", "").strip() != "" \
and not (major_release_version >= 5 and minor_release_version >= 7):
print("\033[31mERROR: \033[0mCustom AMI can only be used with EMR release >= 5.7")
return False
return True
def build_command(config):
global emr_applications, cluster_config
emr_arguments = OrderedDict()
# EMR configs
if config["EMR"]["name"]:
emr_arguments["Name"] = config["EMR"]["name"]
if config["EMR"]["log_uri"]:
emr_arguments["LogUri"] = config["EMR"]["log_uri"]
emr_arguments["ReleaseLabel"] = config["EMR"]["release_label"]
# Instances config
emr_arguments["Instances"] = OrderedDict()
instance_groups = []
for node_type in ["master", "core"]:
instance_specification = {}
if int(config["EMR_nodes"][node_type + "_instance_count"]) == 0:
continue
instance_specification['Name'] = node_type + "_node"
instance_specification['InstanceRole'] = node_type.upper()
if config["EMR_nodes"].getboolean(node_type + "_instance_spot"):
instance_specification['Market'] = "SPOT"
instance_specification['BidPrice'] = config["EMR_nodes"][node_type + "_instance_bid_price"]
else:
instance_specification['Market'] = "ON_DEMAND"
instance_specification['InstanceType'] = config["EMR_nodes"][node_type + "_instance_type"]
instance_specification['InstanceCount'] = int(config["EMR_nodes"][node_type + "_instance_count"])
instance_groups.append(instance_specification)
emr_arguments["Instances"]["InstanceGroups"] = instance_groups
if config["EMR_nodes"]["key_name"]:
emr_arguments["Instances"]["Ec2KeyName"] = config["EMR_nodes"]["key_name"]
emr_arguments["Instances"]["KeepJobFlowAliveWhenNoSteps"] = True
for instance_config in optional_instance_config:
if instance_config in config["EMR_nodes"] and config["EMR_nodes"][instance_config].strip() != "":
emr_arguments["Instances"][optional_instance_config[instance_config]] = config["EMR_nodes"][instance_config]
emr_arguments["Steps"] = [
{
"Name": "Setup Hadoop Debugging",
"ActionOnFailure": "TERMINATE_CLUSTER",
"HadoopJarStep": {
"Jar": "/var/lib/aws/emr/step-runner/hadoop-jars/command-runner.jar",
"MainClass": "state-pusher-script"
}
}
]
if "bootstrap_scripts" in config["EMR"]:
bootstrap_actions = []
for bootstrap_script in config["EMR"]["bootstrap_scripts"].split(","):
bootstrap_script = bootstrap_script.strip()
bootstrap_action_args = []
if bootstrap_script == "install_software.sh":
bootstrap_action_args = [config["EMR"]["software_installer_location"]]
elif bootstrap_script == "copy_reference.sh":
bootstrap_action_args = [config["EMR"]["genome_folder_location"]]
bootstrap_actions.append({
"Name": bootstrap_script,
"ScriptBootstrapAction": {
"Path": config["EMR"]["bootstrap_scripts_s3_location"].rstrip("/") + "/" + bootstrap_script,
"Args": bootstrap_action_args
}
})
emr_arguments["BootstrapActions"] = bootstrap_actions
emr_arguments["Applications"] = [{'Name': app} for app in emr_applications]
emr_arguments["Configurations"] = json.loads(open(cluster_config).read()) if cluster_config else []
emr_arguments["VisibleToAllUsers"] = True
emr_arguments["JobFlowRole"] = config["EMR_nodes"]["instance_profile"]
emr_arguments["ServiceRole"] = config["EMR_nodes"]["service_role"]
if "custom_ami_id" in config["EMR_nodes"]:
emr_arguments["CustomAmiId"] = config["EMR_nodes"]["custom_ami_id"]
if "ebs_root_volume_size" in config["EMR_nodes"]:
emr_arguments["EbsRootVolumeSize"] = config["EMR_nodes"]["ebs_root_volume_size"]
return emr_arguments
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Cluster launcher for spark-based RNA-seq Pipeline')
parser.add_argument('--config', '-c', action="store", dest="emr_config", help="EMR configuration file")
parser.add_argument('--dry-run', '-d', action="store_true", dest="dry_run",
help="Produce the configurations for the cluster to be created")
parser_result = parser.parse_args()
if parser_result.emr_config and parser_result.emr_config.strip() != "":
emr_configuration = parser_result.emr_config
config = configparser.ConfigParser()
config.read(emr_configuration)
if check_configuration(config):
if config["EMR"].get("upload_bootstrap_scripts", "False") == "True":
utility.upload_files_to_s3(
[(bootstrap_script.strip(), config["EMR"]["bootstrap_scripts_local_location"],
config["EMR"]["bootstrap_scripts_s3_location"])
for bootstrap_script in config["EMR"]["bootstrap_scripts"].split(",")],
parser_result.dry_run)
emr_argument = build_command(config)
if not parser_result.dry_run:
emr_client = boto3.client("emr")
cluster_launch = emr_client.run_job_flow(**emr_argument)
print("Cluster has been launched with ID", cluster_launch["JobFlowId"])
else:
print("\n".join(["{} = {}".format(*emr_arg) for emr_arg in list(emr_argument.items())]))
|
VCCRI/Falco
|
launch_cluster.py
|
launch_cluster.py
|
py
| 7,234 |
python
|
en
|
code
| 37 |
github-code
|
6
|
41744376400
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 16 14:43:47 2021
For plotting: COM data structure is [RGB,YX,FRAME], so if you want the X coordinate of the
red float on the 69th frame, you type COM[0,1,69]
@author: miles
"""
import glob
import os
"""directories and data info"""
data_dir='switch/cycle/'
file_list=sorted(glob.glob(data_dir+'COMs/*.out'), key=os.path.getmtime)
with open(data_dir+'fullCOMs','w') as outfile:
for fname in file_list:
with open(fname) as infile:
for line in infile:
outfile.write(line)
|
Faaborg/float_tracker
|
COM_concat.py
|
COM_concat.py
|
py
| 601 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72764469628
|
from sklearn.feature_extraction.text import TfidfVectorizer
from wordcloud import WordCloud
import numpy as np
def get_wordcloud(data, stop_words):
vectorizer = TfidfVectorizer(
use_idf=False, stop_words=stop_words, ngram_range=(2, 2))
vectors = vectorizer.fit_transform(data)
counts = np.array(vectors.sum(axis=0))[0]
dico = dict()
words = vectorizer.get_feature_names_out()
for i in range(len(words)):
w = words[i]
dico[w] = counts[i]
return WordCloud(background_color='white', stopwords=stop_words, max_words=100).generate_from_frequencies(dico)
|
Amayas29/review-analysis
|
src/iads/nlp/visualisation.py
|
visualisation.py
|
py
| 607 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12168811050
|
# import requests module
import requests
import logging
import time
# Setting up Logging
logging.basicConfig(level = logging.INFO)
logger = logging.getLogger()
# URL
url = "https://google.com"
# Make request method
def make_request(url):
logging.info("Fetching URL")
try:
response = requests.get(url)
# print response
logger.info(f"Response from URL: {str(response).split()[-1]}")
# print elapsed time
logger.info(f"Elapsed time: {response.elapsed.total_seconds()}")
except requests.exceptions.RequestException as e:
logger.critical(f"Unable to fecth URL: {url}")
raise SystemExit(e)
def main():
logging.info("Starting monitoring application")
while True:
# Call make_request method
make_request(url)
# Run every 60 seconds
time.sleep(60)
if __name__ == "__main__":
main()
|
tolkiger/terraform-ecs-fargate-cicd-pipeline
|
monitoring.py
|
monitoring.py
|
py
| 891 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3708389787
|
import re
import redis
import pickle
import requests
from bs4 import BeautifulSoup
from dateutil.parser import parse
import errors
red = redis.StrictRedis(host='redis', port=6379, db=0)
try:
red.get('test')
except ConnectionError:
red = None
_POST_COMMENT_URL = \
'https://telopeapark.managebac.com/groups/{}/messages/{}/comments'
_CACHE_EXPIRE = 5 * 60 # 5min
class Messages(list):
'''
Represents the :class:`Message` s for a given class on managebac
Gets the messages as :class:`LazyMessage` s
Raises:
BadToken, ManageBacCommunicationException
'''
def __init__(self, url, token):
r = requests.get(url + '/archive', cookies=token)
if r.ok and r.status_code == 200:
soup = BeautifulSoup(r.text)
class_name = soup.h1.div.next_sibling.next_sibling.text
for topic in soup.findAll(class_='topic'):
url = topic.a['href']
self.append(LazyMessage(
id_=int(re.search('/messages/([0-9]+)', url).group(1)),
class_id=int(re.search(
'/[classesgroups]+/([0-9]+)/', url).group(1)),
class_name=class_name,
by=re.search('by\n(.+)', topic.label.text).group(1),
title=topic.a.text
))
elif r.status_code == 302:
raise errors.BadToken
else:
raise errors.ManageBacCommunicationException
class Message():
'''
Represents a message that you post on managebac
The constructor downloads a message and fill the object
Args:
* `url` (str) - url of the message
* `token` - returned by :func:`managebac.login`
Sets Values:
* `id_` (int)
* `title` (string)
* `by` (string)
* `text` (string) - just a string, no HTML
* `time` (:class:`datetime.datetime`)
* `avatar` (string): a image URL
* `comments` (list of :class:`Comment`)
* `class_name` (string)
* `class_id` (int)
Raises:
BadToken, ManageBacCommunicationException
'''
def __init__(self, url, token):
r = requests.get(url, cookies=token)
if r.ok and r.status_code == 200:
self.id_ = int(re.search('/messages/([0-9]+)', url).group(1))
self.class_id = int(re.search(
'/[classesgroups]+/([0-9]+)/', url).group(1))
soup = BeautifulSoup(r.text)
self.class_name = soup.h1.div.next_sibling.next_sibling.text
message = soup.find(class_='reply_target')
self.avatar = message.img['src']
self.time = parse(message.find(class_='time').text)
self.by = message.strong.text.strip()
self.title = message.a.text
self.text = message.find(class_='content').text
self.comments = []
for el in message.find_next_siblings(class_='topic'):
self.comments.append(Comment(
avatar=el.img['src'],
time=parse(el.find(class_='time').text),
by=el.strong.text.strip(),
text=el.find(class_='content').text
))
self.loaded = True
if red:
cache_id = 'cache:message:{}'.format(self.id_)
red.set(cache_id, pickle.dumps(self))
red.expire(cache_id, _CACHE_EXPIRE)
elif r.status_code == 302:
raise errors.BadToken
else:
raise errors.ManageBacCommunicationException
def post_comment(self, text, token):
'''
Post a comment below the message on managebac.
Args:
* `text` (str) - plain text to post
* `token` - the users login from :func:`managebac.login`
'''
r = requests.post(_POST_COMMENT_URL.format(self.class_id, self.id_),
cookies=token, data={'post[body]': text})
if r.ok and r.status_code == 200:
return
elif r.status_code == 302:
raise errors.BadToken
else:
raise errors.ManageBacCommunicationException
def __unicode__(self):
return u'Message({} said "{}":"{}" ({}), at {} in {} ({}), {})'.format(
self.by, self.title, self.text, self.id_, self.time,
self.class_name, self.class_id, map(unicode, self.comments))
class LazyMessage(Message):
'''
A lazy loaded message class
By default, it only includes the following attributes:
* `id_` (int)
* `title` (string)
* `by` (string)
* `class_name` (str)
* `class_id` (int)
It also introduces the `loaded` (bool) attribute
'''
def __init__(self, **kwargs):
self.loaded = False
for k, v in kwargs.iteritems():
setattr(self, k, v)
if red:
old = red.get('cache:message:{}'.format(self.id_))
if old:
self = pickle.loads(old)
self.loaded = True
def load(self, token):
'''
Same as :class:`Message`, but with the URL autogenerated
'''
Message.__init__(self, 'https://telopeapark.managebac.com/groups/{}'
'/messages/{}'.format(self.class_id, self.id_),
token)
def __unicode__(self):
if self.loaded:
return Message.__unicode__(self)
return u'LazyMessage({} said {} ({}), in {} ({}))'.format(
self.by, self.title, self.id_, self.class_name, self.class_id)
class Comment():
'''
A (dumb) object that represents a comment on a :class:`Message`
The constructor makes a new Comment from the kwargs. Expects the
same args as a :class:`Message`, but without the `id_`,
`title` or `class_*`
'''
def __init__(self, **kwargs):
'''
'''
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __unicode__(self):
return u'Comment({} said "{}", at {})'.format(
self.by, self.text, self.time)
|
samdroid-apps/ManageBacToTheFuture
|
lib/message.py
|
message.py
|
py
| 6,140 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36093046858
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
import mock
from napixd.exceptions import InternalRequestFailed
from napixd.application import Napixd
from napixd.services.contexts import NapixdContext
from napixd.loader.loader import Loader, Load
from napixd.http.router.router import Router
from napixd.http.request import Request
class MyService(object):
def __init__(self, mgr, alias, conf=None):
self.alias = alias
self.url = self.alias
def setup_bottle(self, app):
app.route('/' + self.url, self.keep)
def keep(self):
pass
def __eq__(self, other):
return self.__class__ == other.__class__ and self.alias == other.alias
class TestReload(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.patch_service = mock.patch('napixd.application.Service', MyService)
def setUp(self):
self.Service = self.patch_service.start()
loader = mock.Mock(spec=Loader)
self.load = load = loader.load.return_value = mock.Mock(spec=Load)
self.m1 = m1 = mock.Mock(alias='m1')
self.m2 = m2 = mock.Mock(alias='m2')
load.managers = [m1, m2]
load.new_managers = []
load.old_managers = []
load.error_managers = []
self.server = server = mock.MagicMock(spec=Router)
self.napixd = Napixd(loader, server)
load.managers = []
def tearDown(self):
self.patch_service.stop()
def test_add_filter(self):
self.server.add_filter.assert_called_once_with(self.napixd)
def test_as_plugin(self):
cb = mock.Mock()
req = mock.Mock()
r = self.napixd(cb, req)
self.assertEqual(r, cb.return_value)
cb.assert_called_once_with(NapixdContext(self.napixd, req))
def test_find_service(self):
s = self.napixd.find_service('m1')
self.assertEqual(s, MyService(None, 'm1'))
def test_find_not_service(self):
self.assertRaises(InternalRequestFailed, self.napixd.find_service, 'm3')
def test_zero(self):
assert not self.server.route.assert_has_calls([
mock.call('/', self.napixd.slash),
mock.call('/m1', mock.ANY),
mock.call('/m2', mock.ANY),
])
self.assertEqual(self.napixd.slash(mock.Mock(spec=Request)),
['/m1', '/m2'])
def test_reload_new(self):
assert not self.server.route.reset_mock()
m3 = mock.Mock(alias='m3')
self.load.new_managers = [m3]
self.napixd.reload()
self.server.route.assert_called_once_with('/m3', mock.ANY)
self.assertEqual(self.server.unroute.call_count, 0)
self.assertEqual(self.napixd.slash(mock.Mock(spec=Request)),
['/m1', '/m2', '/m3'])
def test_reload_old(self):
self.server.route.reset_mock()
self.load.old_managers = [mock.Mock(alias='m2')]
self.napixd.reload()
self.server.unroute.assert_called_once_with('/m2', all=True)
self.assertEqual(self.server.route.call_count, 0)
self.assertEqual(self.napixd.slash(mock.Mock(spec=Request)),
['/m1'])
def test_reload_error(self):
self.server.route.reset_mock()
error = mock.Mock(alias='m2')
self.load.old_managers = [mock.Mock(alias='m2')]
self.load.error_managers = [error]
self.napixd.reload()
self.server.unroute.assert_called_once_with('/m2', all=True)
self.server.route.assert_has_calls([
mock.call('/m2', mock.ANY),
mock.call('/m2/', mock.ANY, catchall=True),
])
self.assertEqual(self.napixd.slash(mock.Mock(spec=Request)),
['/m1', '/m2'])
def test_reload_error_and_error(self):
self.load.old_managers = [mock.Mock(alias='m2')]
self.load.error_managers = [mock.Mock(alias='m2')]
self.napixd.reload()
self.server.reset_mock()
error = mock.Mock(alias='m2')
self.load.old_managers = []
self.load.error_managers = [error]
self.server.__contains__.return_value = True
self.napixd.reload()
self.server.unroute.assert_called_once_with('/m2', all=True)
self.server.route.assert_has_calls([
mock.call('/m2', mock.ANY),
mock.call('/m2/', mock.ANY, catchall=True),
])
|
napix/NapixServer
|
tests/test_application.py
|
test_application.py
|
py
| 4,444 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3728793373
|
groupA=0
groupB=0
groupC=0
groupD=0
n=int(input("ponga # estudiantes: "))
i=0
while i < n:
alt=int(input("altura estudiantes: "))
if alt<=150:
groupA +=1
print("Hay", groupA,"alumnos en grupoA")
elif 150<alt<=165:
groupB +=1
print("Hay", groupB,"alumnos en grupoB")
elif 165<alt<=180:
groupC +=1
print("Hay", groupC,"alumnos en grupoC")
elif 180 < alt:
groupD +=1
print("Hay", groupD,"alumnos en grupoD")
else :
print ("invalid values")
i+=1
|
caroortegaa/Semestre-1
|
Ejercicios/Estudiantes.py
|
Estudiantes.py
|
py
| 545 |
python
|
es
|
code
| 0 |
github-code
|
6
|
31994054521
|
#coding: utf-8
import json, random, os
import hashlib
import requests
from flask_babel import _
from webapp import app
#利用百度翻译API提供文本翻译
def translate(text, source_language, dest_language):
if 'BD_TRANSLATOR_KEY' not in app.config or not app.config['BD_TRANSLATOR_KEY']:
return _('Error: the translation service is not configured.')
url = "http://api.fanyi.baidu.com/api/trans/vip/translate"
appid = '20200321000402156'
salt = random.randint(32768, 65536) #生成一个随机数
sign = appid + text +str(salt) + app.config['BD_TRANSLATOR_KEY']
m = hashlib.new('md5')
m.update(sign.encode(encoding='utf-8'))
msign = m.hexdigest() #得到原始签名的MD5值
if dest_language == 'es': #pybabel 与百度翻译对应的语言缩写不一致
dest_language = 'spa'
data= {
'q': text,
'from':source_language or 'auto',
'to':dest_language,
'appid':appid,
'salt':salt,
'sign':msign
}
r = requests.get(url, params=data)
if r.status_code != 200:
return _('Error: the translation service failed.')
# print(json.loads(r.content.decode('utf-8')))
return json.loads(r.content.decode('utf-8'))['trans_result'][0]['dst']
if __name__ == '__main__':
result = translate('我命由我不由天','', 'spa')
print(result)
print(type(result))
|
huawenjin1995/Microblog
|
webapp/translate.py
|
translate.py
|
py
| 1,421 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8779322237
|
class Node:
def __init__(self,value) :
self.value = value
self.next = None
class LinkedList:
def __init__(self) :
self.head = None
def add_node(self,value):
node = Node(value)
if self.head is None:
self.head = node
else:
current = self.head
while current.next:
current = current.next
current.next = node
def add_node_begening(self,value,target):
new_node = Node(value)
if self.head.value == target:
new_node.next = self.head
self.head = new_node
else:
current = self.head
while current.next:
if current.next.value == target:
new_node.next = current.next
current.next = new_node
current = current.next
current = current.next
def delete_node(self,value):
if self.head.value == value:
self.head = self.head.next
else:
current = self.head
while current.next.value != value:
current = current.next
current.next = current.next.next
def delete_before(self,value):
if self.head.next.value == value:
self.head = self.head.next
else:
current = self.head
while current.next.next.value != value:
current = current.next
current.next = current.next.next
def add_node_after(self,value,target):
newnode = Node(value)
if self.head.value == target:
newnode.next = self.head.next
self.head.next = newnode
else:
current = self.head
while current.value != target:
current = current.next
newnode.next = current.next
current.next = newnode
def array_to_linkedlist(self,arr):
for i in arr:
node = Node(i)
if self.head is None:
self.head = node
else:
current = self.head
while current.next:
current = current.next
current.next = node
def print_node(self):
current = self.head
while current:
print(current.value,end=' ')
current = current.next
obj = LinkedList()
arr=[8,5,9,6,3,4,7]
obj.array_to_linkedlist(arr)
obj.add_node_after(89,6)
obj.print_node()
obj.add_node_begening(90,89)
print()
obj.print_node()
print()
obj.delete_node(90)
obj.print_node()
print()
obj.delete_before(7)
obj.print_node()
|
ashwin275/DSA
|
linkedList/linkedlist.py
|
linkedlist.py
|
py
| 2,807 |
python
|
en
|
code
| 2 |
github-code
|
6
|
21341146103
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: KrianJ
# datetime:2022/5/8 21:32
# software: PyCharm-WideDeep
import tensorflow as tf
from tensorflow.keras.layers import Embedding
from tensorflow.keras import Model
from models_tf2.base_layer import LR_Layer, DenseLayer
class WideDeep(Model):
def __init__(self, dense_features, sparse_features, sparse_one_hot_dim, sparse_embed_dim,
hidden_units, output_dim, activation):
"""
WideDeep模型
:param dense_features: 稠密数值特征名
:param sparse_features: 稀疏类别特征名
:param sparse_one_hot_dim: 每个类别特征one_hot后的维度
:param sparse_embed_dim: 每个one_hot类别特征转成embedding的维度
:param hidden_units: deep侧的隐层节点数
:param output_dim: deep侧的输出维度
:param activation: deep侧激活函数
"""
super(WideDeep, self).__init__()
self.dense_features = dense_features # 数值特征
self.sparse_features = sparse_features # 类别特征
self.n_dense = len(self.dense_features)
self.n_sparse = len(self.sparse_features)
# Wide部分
self.wide = LR_Layer()
# Deep部分: sparse feature的embedding layer(高维one-hot -> 低维embedding)
self.embedding_layers = {
'embed_{}'.format(i): Embedding(input_dim=sparse_one_hot_dim[i],
output_dim=sparse_embed_dim[i])
for i, fea in enumerate(self.sparse_features)
}
self.deep = DenseLayer(hidden_units, output_dim, activation)
def call(self, inputs, training=None, mask=None):
"""inputs = [数值特征向量 label_encoding类别特征向量 one_hot类别特征向量]"""
dense_input = inputs[:, :self.n_dense]
sparse_category_input = inputs[:, self.n_dense: (self.n_dense + self.n_sparse)]
sparse_onehot_input = inputs[:, (self.n_dense + self.n_sparse):]
# wide部分
wide_input = tf.concat([dense_input, sparse_onehot_input], axis=-1)
wide_output = self.wide(wide_input)
# deep部分
sparse_embeds = [] # 每个类别特征的embedding
for i in range(sparse_category_input.shape[-1]):
embed_layer = self.embedding_layers['embed_{}'.format(i)]
sparse_embeds.append(embed_layer(sparse_category_input[:, i]))
sparse_embeds = tf.concat(sparse_embeds, axis=-1)
deep_output = self.deep(sparse_embeds)
# wide+deep
output = tf.nn.sigmoid(0.5 * (wide_output + deep_output))
return output
|
KrianJ/CtrEstimate
|
models_tf2/WideDeep.py
|
WideDeep.py
|
py
| 2,715 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22218976546
|
import os
import math
from director import robotsystem
from director.consoleapp import ConsoleApp
from director import ioUtils
from director import segmentation
from director import applogic
from director import visualization as vis
from director import continuouswalkingdemo
from director import objectmodel as om
from director import ikplanner
from director import navigationpanel
from director import cameraview
from director import playbackpanel
app = ConsoleApp()
dataDir = app.getTestingDataDirectory()
# create a view
view = app.createView()
segmentation._defaultSegmentationView = view
#footstepsPanel = footstepsdriverpanel.init(footstepsDriver, robotStateModel, robotStateJointController, mapServerSource)
footstepsPanel = None
robotsystem.create(view, globals())
def processSingleBlock(robotStateModel, whichFile=0):
if (whichFile == 0):
polyData = ioUtils.readPolyData(os.path.join(dataDir, 'tabletop/table_top_45.vtp'))
else:
polyData = ioUtils.readPolyData(os.path.join(dataDir, 'terrain/block_top.vtp'))
standingFootName = cwdemo.ikPlanner.leftFootLink
standingFootFrame = robotStateModel.getLinkFrame(standingFootName)
segmentation.findMinimumBoundingRectangle(polyData, standingFootFrame)
def processSnippet():
obj = om.getOrCreateContainer('continuous')
om.getOrCreateContainer('cont debug', obj)
if (continuouswalkingDemo.processContinuousStereo):
polyData = ioUtils.readPolyData(os.path.join(dataDir, 'terrain/block_snippet_stereo.vtp'))
polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01)
else:
polyData = ioUtils.readPolyData(os.path.join(dataDir, 'terrain/block_snippet.vtp'))
vis.updatePolyData( polyData, 'walking snapshot trimmed', parent='continuous')
standingFootName = cwdemo.ikPlanner.leftFootLink
standingFootFrame = robotStateModel.getLinkFrame(standingFootName)
vis.updateFrame(standingFootFrame, standingFootName, parent='continuous', visible=False)
# Step 2: find all the surfaces in front of the robot (about 0.75sec)
clusters = segmentation.findHorizontalSurfaces(polyData)
if (clusters is None):
print("No cluster found, stop walking now!")
return
# Step 3: find the corners of the minimum bounding rectangles
blocks,match_idx,groundPlane = cwdemo.extractBlocksFromSurfaces(clusters, standingFootFrame)
footsteps = cwdemo.placeStepsOnBlocks(blocks, groundPlane, standingFootName, standingFootFrame)
cwdemo.drawFittedSteps(footsteps)
# cwdemo.sendPlanningRequest(footsteps)
#navigationPanel = navigationpanel.init(robotStateJointController, footstepsDriver)
navigationPanel = None
continuouswalkingDemo = continuouswalkingdemo.ContinousWalkingDemo(robotStateModel, footstepsPanel, footstepsDriver, playbackpanel, robotStateJointController, ikPlanner,
teleopJointController, navigationPanel, cameraview)
cwdemo = continuouswalkingDemo
# test 1
processSingleBlock(robotStateModel, 1)
# test 2 - Table:
processSingleBlock(robotStateModel, 0)
# test 3
processSnippet()
# test 4
continuouswalkingDemo.processContinuousStereo = True
processSnippet()
if app.getTestingInteractiveEnabled():
view.show()
app.showObjectModel()
app.start()
|
RobotLocomotion/director
|
src/python/tests/testContinuousWalking.py
|
testContinuousWalking.py
|
py
| 3,324 |
python
|
en
|
code
| 176 |
github-code
|
6
|
71082571387
|
from ride import Ride
def read_input(file):
input = open(file, "r")
content = input.readlines()
input.close()
#first line
elems = content[0].strip().split(" ")
R = int(elems[0])
C = int(elems[1])
F = int(elems[2])
N = int(elems[3])
B = int(elems[4])
T = int(elems[5])
rides = []
#read lines
for i in range(1,len(content)):
line = content[i].strip().split(" ")
num = i-1
x0 = int(line[0])
y0 = int(line[1])
x1 = int(line[2])
y1 = int(line[3])
es = int(line[4])
lf = int(line[5])
rides.append(Ride(x0,y0,x1,y1,es,lf,num))
return R,C,F,N,B,T,rides
|
bjoukovs/HashCode2K18
|
read.py
|
read.py
|
py
| 683 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70945127228
|
# remote DB 연동
import MySQLdb
import pandas as pd
import numpy as np
import ast
import csv
"""
config = {
'host':'127.0.0.1',
'user':'root',
'password':'123',
'database':'test',
'port':3306,
'charset':'utf8',
'use_unicode':True
}
"""
try: # db 읽는 방법
with open("mariadb_connect.txt", "r") as fr:
config = fr.read() # String 형태
except Exception as e:
print('read err : ' + str(e))
print(config)
config = ast.literal_eval(config)
print(type(config)) # dict type으로 들어옴
#############################################
# sql 명령
try:
conn = MySQLdb.connect(**config) # 연결을 dict type으로 (String type은 err)
cursor = conn.cursor()
sql = """
select jikwon_no, jikwon_name, buser_name, jikwon_jik, jikwon_gen, jikwon_pay
from jikwon inner join buser
on buser.buser_no = jikwon.buser_num
"""
cursor.execute(sql)
# for (a,b,c,d,e,f) in cursor:
# print(a,b,c,d,e,f)
with open("jikwon_datas.csv", "w", encoding="utf-8") as fw:
writer = csv.writer(fw)
for row in cursor:
writer.writerow(row)
print('저장 성공')
# 읽기 1 : csv
df = pd.read_csv("jikwon_datas.csv", header=None, names=('번호','이름','부서','직급','성별','연봉'))
print(df[:3])
# 읽기 2 : sql
df2 = pd.read_sql(sql, conn)
#df2.columns = '번호','이름','부서','직급','성별','연봉' # 튜플타입
df2.columns = ('번호','이름','부서','직급','성별','연봉') # 튜플타입
print(df2.head(3))
print(len(df2))
print('\n *직급 : \n',df2['직급'].value_counts())
print('\n *부서 : \n',df2['부서'].value_counts())
print('\n *연봉 합계 : \n',df2.loc[:,'연봉'].sum() / len(df2))
print('\n *연봉 평균 : \n',df2.loc[:,'연봉'].mean())
print('\n *연봉 상세 : \n',df2.loc[:,['연봉']].describe())
print('\n *연봉 5000 이상 : \n',df2.loc[df2.loc[:,'연봉']>=5000])
print('\n *연봉 5000 이상 , 부서는 영업부: \n',df2.loc[(df2.loc[:,'연봉']>=5000) & (df2['부서']=='영업부')])
print('\n 교차표----------- \n')
ctab = pd.crosstab(df2['성별'], df2['직급'], margins=True)
print('\n 교차표 : \n',ctab)
import matplotlib.pyplot as plt
plt.rc('font', family='malgun gothic')
# 직급별 연봉 평균
jik_ypay = df2.groupby(['직급'])['연봉'].mean()
print('\n 직급별 연봉 평균 : \n',jik_ypay)
print('\n 직급별 연봉 평균 : \n',jik_ypay.index)
print('\n 직급별 연봉 평균 : \n',jik_ypay.values)
plt.pie(jik_ypay,
labels=jik_ypay.index,
labeldistance=0.5,
counterclock=False, # 시계반대방향
shadow=True,
explode=(0.2,0,0,0.2,0))
plt.show()
#############################################
except Exception as e:
print('err : ', e)
finally:
cursor.close()
conn.close()
#############################################
|
kangmihee/EX_python
|
py_pandas_db/pack/pandas_db2.py
|
pandas_db2.py
|
py
| 3,238 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.