seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
18705309300
|
from time import sleep
import csv
import requests
from lxml import etree
from datetime import datetime
import pytz
urls = ['https://www.eclatparis.com/produits',
'https://www.eclatparis.com/produits?offset=200',
'https://www.eclatparis.com/produits?offset=400',
'https://www.eclatparis.com/produits?offset=600'
]
url_header = 'https://www.eclatparis.com'
# 存储所有产品的URL的列表
product_urls = []
# 由于要布置到云端,pythonanywhere对selenium的限制太多了,
# 直接布置selenium会导致程序无法运行
# 这里的cookies是从本地版的运行中,利用selenium得到的,
# 直接使用cookies就可以绕过登录界面
cookies= {'hasCart': 'true',
'_ga_ZMDKD43H01': 'GS1.1.1677141224.1.1.1677141248.0.0.0',
'siteUserCrumb': '91m8yMQracXHvtn3hp_zqJZ29UAbVo6aaNclbA8xsq_qVyEboCKRsEBv3EqT4dQmzImPIrdRSieZfpx1drxkGFQAvslwA5temqFq29j_XcmIbFuE51bxgA1TRcZFYz1o',
'SiteUserInfo': '%7B%22authenticated%22%3Atrue%2C%22lastAuthenticatedOn%22%3A%222023-02-23T08%3A34%3A03.898Z%22%2C%22siteUserId%22%3A%2263dc38e82b1d5869bf4988e2%22%2C%22firstName%22%3A%22Wenjie%22%7D',
'crumb': 'BaRQABCJ44v5MTBhMjM1YWRhODA1ZDUxMWU5Y2JhYjY3MmYyNjU5',
'ss_cvt': '1677141232051',
'ss_cvr': 'cb592531-5038-4c16-bff7-3c4d1ae0cf97|1677141232051|1677141232051|1677141232051|1',
'CART': '-B8ztComuxoy8mZfh6NOEGGjxgnUzIdW4JGacILa',
'_ga': 'GA1.1.1986290368.1677141224',
'SiteUserSecureAuthToken': 'MXw1OTBmNTNlNy0xZWMyLTQzODctYWMzZS01NjAzZTIwYjEzOWJ8V0w0UE1BQlN3SFYwOFY0WWQyRmtsQmVDR1ktSVV1SVltTVVkZkdGcy1oel9yT21odDY4OXFZUm1IeElMWkRWQg',
'_fbp': 'fb.1.1677141224123.561848947'}
for url in urls:
# get中的cookies是字典类的
# get方法是会阻塞线程的,只有在获取完整个页面的所有数据之后才会进行下面的代码
response =requests.get(url,cookies=cookies)
sleep(2) # sleep在这里的作用并不是等待页面加载完成,而是防止过快的爬取导致ip地址被网址suspendus
body = etree.HTML(response.content)
links = body.xpath( '//a[contains(@class, "grid-item-link")]')
for link in links:
product_urls.append(url_header + link.get('href'))
sleep(2)
francetime = pytz.timezone("Europe/Paris")
dt = datetime.now(francetime)
###英文格式
timenow =str(dt.year)+' '+str(dt.month)+' '+str(dt.day)
filename = timenow + 'product_urls_1.csv'
# 将所有产品的URL存储到CSV文件中
headers = ['URL', 'Sold out']
with open(filename, mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(headers)
for url in product_urls:
writer.writerow([url, ''])
# 检查每个产品是否已售罄,将结果添加到CSV文件中
with open(filename, mode='r', newline='') as file:
reader = csv.reader(file)
next(reader) # 跳过标题行
rows = []
for row in reader:
try:
response = requests.get(row[0], cookies=cookies)
html = etree.HTML(response.content)
sold_out = 'Yes' if html.xpath('//div[@class="ProductItem-details-checkout"]//div[@class="product-mark sold-out"]') else ''
row[1] = sold_out
except Exception as e:
print(f"Failed to check product {row[0]}: {e}")
rows.append(row)
with open(filename, mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(headers)
for row in rows:
writer.writerow(row)
print('done')
|
qingyi-er-san/aprizo_codes
|
eclat数据爬取/eclat_云端版.py
|
eclat_云端版.py
|
py
| 3,460 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27143309324
|
#!/usr/bin/env python3
from PIL import Image
import os
path = os.getenv('HOME') + '/supplier-data/images/'
for file in os.listdir(path):
if (file.endswith('.tiff')):
shortFileName = file.rstrip('.tiff')
with Image.open(path + file) as im:
im.resize((600, 400)).convert('RGB').save(path + shortFileName + ".jpeg", "JPEG")
|
Mark-C-Hall/Google-IT-Automate-Final
|
changeImage.py
|
changeImage.py
|
py
| 357 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3848261767
|
#!/bin/python3
import sys
def staircase(n):
for x in range(1, n + 1):
if x < n:
remain = n - x
print(remain * " " + x * "#")
else:
print(x * "#")
if __name__ == "__main__":
n = int(input().strip())
staircase(n)
|
pedroinc/hackerhank
|
staircase.py
|
staircase.py
|
py
| 280 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35145658447
|
import pytest
from .task import in_component
class Case:
def __init__(self, name: str, n: list, vertices: list, edges: list,
answer: bool):
self._name = name
self.n = n
self.vertices = vertices
self.edges = edges
self.answer = answer
def __str__(self) -> str:
return 'task4_test_{}'.format(self._name)
TEST_CASES = [
Case(
name='base1',
n=4,
vertices=[1, 2, 3],
edges=[
(1, 2),
(2, 3),
(1, 3),
],
answer=True,
),
Case(
name='base2',
n=4,
vertices=[1, 2, 3],
edges=[
(1, 2),
(3, 4),
],
answer=False,
),
Case(
name='base3',
n=4,
vertices=[4, 2, 3, 1],
edges=[
(1, 2),
],
answer=True,
),
Case(
name='base4',
n=2,
vertices=[1],
edges=[
(1, 2),
],
answer=False,
),
]
@pytest.mark.parametrize('case', TEST_CASES, ids=str)
def test_task3(case: Case) -> None:
answer = in_component(
n=case.n,
vertices=case.vertices,
edges=case.edges,
)
assert answer == case.answer
|
renesat/Base-Graph-Contest
|
tasks/task4/test_public.py
|
test_public.py
|
py
| 1,283 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18483249694
|
import pygame
import random
import sys
from pygame.locals import *
from config import (
FPS,
MODIFIER,
WIDTH,
HEIGHT,
LINETHICKNESS,
PADDLESIZE,
PADDLEOFFSET,
BLACK,
GREY,
ORIGIN_X,
ORIGIN_Y,
DIFFICULTY,
MAX_SCORE
)
def drawArena():
DISPLAYSURF.fill((0, 0, 0))
# draw the outline of arena
pygame.draw.rect(DISPLAYSURF, GREY, ((0 ,0), (WIDTH, HEIGHT)), LINETHICKNESS*2)
# draw the middle line
pygame.draw.line(DISPLAYSURF, GREY, (int(WIDTH/2), 0), (int(WIDTH/2), HEIGHT), int(LINETHICKNESS/4))
def drawPaddle(paddle):
# checks boundaries
if paddle.bottom > HEIGHT - LINETHICKNESS:
paddle.bottom = HEIGHT - LINETHICKNESS
elif paddle.top < LINETHICKNESS:
paddle.top = LINETHICKNESS
# draws the paddle
pygame.draw.rect(DISPLAYSURF, GREY, paddle)
def drawBall(ball):
pygame.draw.rect(DISPLAYSURF, GREY, ball)
# moves the ball, returns new position
def moveBall(ball, ballDirX, ballDirY):
ball.x += (ballDirX * MODIFIER)
ball.y += (ballDirY * MODIFIER)
return ball
# checks for a collision with a wall, and 'bounces' off it.
def checkEdgeCollision(ball, ballDirX, ballDirY):
if ball.top == (LINETHICKNESS) or ball.bottom == (HEIGHT - LINETHICKNESS):
ballDirY = ballDirY * -1
if ball.left == (LINETHICKNESS) or ball.right == (WIDTH - LINETHICKNESS):
ballDirX = ballDirX * -1
return ballDirX, ballDirY
# checks if the ball has hit a paddle, and 'bounces' off it.
def checkPaddleCollision(ball, paddle1, paddle2, ballDirX):
if ballDirX == -1 and paddle1.right == ball.left and paddle1.top < ball.top and paddle1.bottom > ball.bottom:
return -1
elif ballDirX == 1 and paddle2.left == ball.right and paddle2.top < ball.top and paddle2.bottom > ball.bottom:
return -1
else:
return 1
# computer "ai"
def computerMove(ball, ballDirX, paddle2):
# if the ball is moving away from the paddle, center
if ballDirX == -1:
if paddle2.centery < (HEIGHT/2):
paddle2.y += MODIFIER - random.choice(DIFFICULTY)
elif paddle2.centery > (HEIGHT/2):
paddle2.y -= MODIFIER - random.choice(DIFFICULTY)
# if the ball moving towards the paddle, track its movement.
elif ballDirX == 1:
if paddle2.centery < ball.centery:
paddle2.y += MODIFIER - random.choice(DIFFICULTY)
else:
paddle2.y -= MODIFIER - random.choice(DIFFICULTY)
return paddle2
# checks to see if a point has been scored, returns new score
def checkScore(ball, p1_score, p2_score):
hit = False
# reset points if left wall is hit
if ball.left == LINETHICKNESS:
p2_score += 1
hit = True
# awards 1 point to the player if the right wall is hit
elif ball.right == WIDTH - LINETHICKNESS:
p1_score += 1
hit = True
# if no points scored, return score unchanged
return p1_score, p2_score, hit
# displays the current score on the screen
def displayScore(p1_score, p2_score):
# player
resultP1Surf = BASICFONT.render('Player %s' %(p1_score), True, GREY)
resultP1Rect = resultP1Surf.get_rect()
resultP1Rect.topright = (100, 25)
DISPLAYSURF.blit(resultP1Surf, resultP1Rect)
# computer
resultP2Surf = BASICFONT.render('Computer %s' %(p2_score), True, GREY)
resultP2Rect = resultP2Surf.get_rect()
resultP2Rect.topleft = (WIDTH - 150, 25)
DISPLAYSURF.blit(resultP2Surf, resultP2Rect)
# displays the end of the game
def gameOver():
finalSurf = BASICFONT.render('GAME OVER', True, GREY)
finalSurfRect = finalSurf.get_rect()
finalSurfRect.topright = (WIDTH/2 + 59, HEIGHT/2 - 50)
DISPLAYSURF.blit(finalSurf, finalSurfRect)
# main function
def main():
pygame.init()
global DISPLAYSURF
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption('Pongame')
# font information
global BASICFONT, BASICFONTSIZE
BASICFONTSIZE = 20
BASICFONT = pygame.font.Font('freesansbold.ttf', BASICFONTSIZE)
# initiate variables and set starting positions
# for any future changes made within rectangles
ballX = ORIGIN_X
ballY = ORIGIN_Y
playerOnePosition = playerTwoPosition = int((HEIGHT - PADDLESIZE) /2)
p1_score = p2_score = 0
game_over = False
# keeps track of the ball's direction
ballDirX = -1 # -1 = left 1 = right
ballDirY = -1 # -1 = up 1 = down
# creates Rectangles for ball and paddles
paddle1 = pygame.Rect(PADDLEOFFSET, playerOnePosition, LINETHICKNESS, PADDLESIZE)
paddle2 = pygame.Rect(WIDTH - PADDLEOFFSET - LINETHICKNESS, playerTwoPosition, LINETHICKNESS, PADDLESIZE)
ball = pygame.Rect(ballX, ballY, LINETHICKNESS, LINETHICKNESS)
# draws the starting position of the Arena
drawArena()
drawPaddle(paddle1)
drawPaddle(paddle2)
drawBall(ball)
pygame.mouse.set_visible(0)
while True:
# main game loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# mouse movement commands
elif event.type == pygame.MOUSEMOTION and not game_over:
mousex, mousey = event.pos
paddle1.y = mousey
if not game_over:
drawArena()
drawPaddle(paddle1)
drawPaddle(paddle2)
drawBall(ball)
ball = moveBall(ball, ballDirX, ballDirY)
ballDirX, ballDirY = checkEdgeCollision(ball, ballDirX, ballDirY)
ballDirX = ballDirX * checkPaddleCollision(ball, paddle1, paddle2, ballDirX)
p1_score, p2_score, hit = checkScore(ball, p1_score, p2_score)
paddle2 = computerMove (ball, ballDirX, paddle2)
displayScore(p1_score, p2_score)
game_over = p1_score + p2_score == MAX_SCORE
if hit:
ball.x = ballX = ORIGIN_X
ball.y = ballY = ORIGIN_Y
hit = False
pygame.time.wait(1000)
else:
gameOver()
pygame.display.update()
FPSCLOCK.tick(FPS)
if __name__=='__main__':
main()
|
samuele-mattiuzzo/pongame
|
pongame.py
|
pongame.py
|
py
| 6,281 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13770141152
|
import time
def convert_file(input_file_name, output_file_name, show_xml = False):
input_file = open(input_file_name + ".json", 'r', encoding='utf-8')
output_file = open(output_file_name + ".xml", 'w', encoding='utf-8')
xml = file_to_xml(input_file)
output_file.write(xml)
input_file.close()
output_file.close()
# if show_xml:
# print(xml)
def file_to_xml(file):
lines = file.readlines()
lines.pop(0)
lines.pop(0)
lines.pop(-1)
lines.pop(-1)
tf = True
for i in range(len(lines)):
lines[i] = lines[i].replace('\n', '').replace('\"', '')
if lines[i][-1] == ',':
lines[i] = lines[i][:-1]
while "%" in lines:
lines.remove("%")
for i in range(len(lines)):
lines[i] = lines[i].replace("\t", "", 2)
lines[i] = lines[i].split(": ")
for elem in lines:
print(elem)
xml = "<timetable>\n"
for i in range(len(lines)):
row = lines[i]
if "}" in row[0]:
continue
if row[0][0] != "\t" and row[1] != '{':
xml += "\t" + f"<{row[0]}>{row[1]}</{row[0]}>\n"
elif row[0][0] != "\t" and row[1] == '{':
xml += "\t" + f"<{row[0]}>\n"
j = i + 1
while "}" not in lines[j][0]:
key = lines[j][0].replace("\t", "")
value = lines[j][1]
xml += "\t" * 2 + f"<{key}>{value}</{key}>\n"
j += 1
xml += "\t" + f"</{row[0]}>\n"
xml += "</timetable>"
return xml
start_time = time.perf_counter()
# convert_file('1', '1')
for n in range(10):
convert_file("1","1")
print(time.perf_counter() - start_time)
|
Mekek/informatics_lab4
|
main_task.py
|
main_task.py
|
py
| 1,725 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72453926907
|
# -*- coding: utf-8 -*-
import pymysql
from pymongo import MongoClient
class MiddleTable(object):
def __init__(self):
self.mysql_host = "192.168.10.121"
self.mysql_user = 'hzyg'
self.mysql_password = '@hzyq20180426..'
self.MONGO_HOST = '127.0.0.1'
self.MONGO_PORT = 27017
# self.MONGO_USER = ''
# self.PSW = ''
def open_sql(self, ms_db, mo_db, mo_coll):
self.link = pymysql.connect(self.mysql_host, self.mysql_user, self.mysql_password, ms_db)
self.link.set_charset('utf8')
self.cursor = self.link.cursor()
self.client = MongoClient(host=self.MONGO_HOST, port=self.MONGO_PORT)
self.mo_db = self.client[mo_db]
self.coll = self.mo_db[mo_coll]
def input_sql(self):
producer_list = self.coll.distinct('corpName', {})
seller_list = self.coll.distinct('corpNameBy', {})
for name in producer_list or seller_list:
if name != '/':
detail_list = self.coll.find({'$or': [{'corpName': name, 'corpNameBy': name}]})
for detail in detail_list:
inspection_id = detail['_id']
produce_name = detail['corpName']
if produce_name != '/':
sql = "select id from sys_organization where name='%s'" % produce_name
self.cursor.execute(sql)
produce_id = self.cursor.fetchone()
if not produce_id:
break
else:
produce_id = produce_id[0]
else:
produce_id = None
seller_name = detail['corpNameBy']
if seller_name != '/':
sql = "select id from sys_organization where name='%s'" % seller_name
self.cursor.execute(sql)
seller_id = self.cursor.fetchone()
if not seller_id:
break
else:
seller_id = seller_id[0]
sql = "select supervise_id from sys_organization_ascription where organization_id='%s'" % seller_id
self.cursor.execute(sql)
supervise_id = self.cursor.fetchone()
supervise_id = supervise_id[0]
else:
seller_id = None
supervise_id = None
security_results = detail['newsDetailType']
if security_results >= 54 and security_results <= 76 or security_results == 100:
security_results = 1
elif security_results >= 77 and security_results <= 99 or security_results ==101:
security_results = 2
data_type = detail['rwly']
if '省抽' in data_type:
data_type = 521
elif '国抽' in data_type:
data_type = 520
else:
data_type = 526
status = detail['status']
notice_date = detail['ggrq']
sql = """INSERT INTO organization_inspection_relation(inspection_id, producer_id, seller_id, security_results, source, data_type, status, notice_date) VALUES("%s","%d", "%d", "%s", "%d", "%d", "%d", "%s")""" % (inspection_id, produce_id, seller_id, security_results, supervise_id, data_type, status, notice_date)
self.cursor.execute(sql)
self.link.commit()
def close_sql(self):
self.link.close()
mt = MiddleTable()
mt.open_sql('yfhunt', 'zhejiang', 'sheng')
mt.input_sql()
mt.close_sql()
|
cyndi088/MiddleTables
|
mongo_to_mysql.py
|
mongo_to_mysql.py
|
py
| 3,882 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1073007549
|
import traceback
from selenium.webdriver.common.by import By
from traceback import print_stack
import utilities.logger as log
import logging
class SeleniumDriver():
log = log.myLogger(logging.DEBUG)
def __init__(self, driver):
self.driver = driver
def getByType(self, locatorType):
locatorType = locatorType.lower()
if locatorType == "id":
return By.ID
elif locatorType == "name":
return By.NAME
elif locatorType == "xpath":
return By.XPATH
elif locatorType == "css":
return By.CSS_SELECTOR
elif locatorType == "class":
return By.CLASS_NAME
elif locatorType == "link":
return By.LINK_TEXT
else:
self.log.info("Locator type " + locatorType +
" not correct/supported")
return False
def getElement(self, locator, locatorType="id"):
element = None
try:
locatorType = locatorType.lower()
byType = self.getByType(locatorType)
element = self.driver.find_element(byType, locator)
self.log.info("Element found with locator: " + locator +
" and locatorType: " + locatorType)
except:
self.log.info("Element not found with locator: " + locator +
" and locatorType: " + locatorType)
self.log.error("Exception Caught: {}".format(traceback.format_exc()))
self.log.error("".join(traceback.format_stack()))
return element
def elementClick(self, locator="", locatorType = "xpath", element=None):
try:
if locator: # This means if locator is not empty
element = self.getElement(locator, locatorType)
element.click()
self.log.info("Clicked on element with locator: " + locator +
" locatorType: " + locatorType)
except:
self.log.info("Cannot click on the element with locator: " + locator +
" locatorType: " + locatorType)
print_stack()
def getText(self, locator="", locatorType = "xpath", element=None, info=""):
try:
if locator:
element = self.getElement(locator, locatorType)
text = element.text
if len(text) != 0:
self.log.info("Getting text on element :: " + info)
self.log.info("The text is :: '" + text + "'")
text = text.strip()
except:
self.log.error("Failed to get text on element " + info)
print_stack()
text = None
return text
def isElementPresent(self, locator="", locatorType = "xpath", element=None):
try:
if locator:
element = self.getElement(locator, locatorType)
if len(element) > 0:
self.log.info("Element present with locator: " + locator +
" locatorType: " + locatorType)
return True
else:
self.log.info("Element not present with locator: " + locator +
" locatorType: " + locatorType)
return False
except:
print("Element not found")
return False
|
rchroy/SamsungPhoneTest
|
base/my_selenium_driver.py
|
my_selenium_driver.py
|
py
| 3,365 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38255085940
|
from django.shortcuts import reverse
from django.views.generic import TemplateView
from django.utils import timezone
from hknweb.utils import (
method_login_and_permission,
get_semester_bounds,
)
from hknweb.events.constants import ATTR
from hknweb.events.models import Event, EventType
from hknweb.events.utils import format_url
from hknweb.utils import get_access_level
@method_login_and_permission("events.add_rsvp")
class AllRsvpsView(TemplateView):
"""List of rsvp'd and not rsvp'd events."""
template_name = "events/all_rsvps.html"
def get_context_data(self):
# Get the start and end time for event filtering
start_time, end_time = get_semester_bounds(timezone.now())
if self.request.GET.get("option") == "upcoming":
start_time = timezone.now()
# Get the current event type
event_types = EventType.objects.order_by("type").all()
event_types = sorted(event_types, key=lambda e: not (e.type == ATTR.MANDATORY))
event_type = self.request.GET.get("event_type", event_types[0].type)
event_type = EventType.objects.filter(type=event_type).first()
# Get all events
all_events = Event.objects.filter(
start_time__gte=start_time,
start_time__lte=end_time,
access_level__gte=get_access_level(self.request.user),
event_type=event_type,
).order_by("start_time")
rsvpd_data, not_rsvpd_data = [], []
for event in all_events:
if event.rsvp_set.filter(user=self.request.user):
data, url = rsvpd_data, "events:unrsvp"
waitlisted = event.on_waitlist(self.request.user)
else:
data, url = not_rsvpd_data, "events:rsvp"
waitlisted = False
data.append(
{
"event": event,
"action": reverse(url, args=[event.id]),
"location": format_url(event.location),
"waitlisted": waitlisted,
}
)
data = [
{
ATTR.CLASS: "right-half",
ATTR.TITLE: "RSVP'd / Waitlist",
ATTR.EVENTS: rsvpd_data,
ATTR.DISPLAY_VALUE: "un-RSVP",
},
{
ATTR.CLASS: "left-half",
ATTR.TITLE: "Not RSVP'd",
ATTR.EVENTS: not_rsvpd_data,
ATTR.DISPLAY_VALUE: "RSVP",
},
]
context = {
"data": data,
"event_types": event_types,
"event_type": event_type,
}
return context
|
Gabe-Mitnick/hknweb
|
hknweb/events/views/aggregate_displays/tabular.py
|
tabular.py
|
py
| 2,679 |
python
|
en
|
code
| null |
github-code
|
6
|
43627131674
|
import heapq
import collections
class Solution:
def assignBikes(self, workers, bikes):
dist_map = collections.defaultdict(list)
m, n = len(workers), len(bikes)
for i in range(m):
for j in range(n):
w = workers[i]
b = bikes[j]
dist = abs(w[0]-b[0]) + abs(w[1]-b[1])
heap = dist_map[dist]
heapq.heappush(heap, (i, j))
dist_map[dist] = heap
assigned_workers = set()
assigned_bikes = set()
res = [0]*m
distances = sorted(list(dist_map.keys()))
for d in distances:
heap = dist_map[d]
while heap:
pair = heapq.heappop(heap)
if pair[0] not in assigned_workers and pair[1] not in assigned_bikes:
res[pair[0]] = pair[1]
assigned_workers.add(pair[0])
assigned_bikes.add(pair[1])
return res
|
MichaelTQ/LeetcodePythonProject
|
solutions/leetcode_1051_1100/LeetCode1057_CampusBikes.py
|
LeetCode1057_CampusBikes.py
|
py
| 986 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71477785468
|
import sys
sys.stdin = open('input.txt')
'''
#1 5
#2 8
#3 9
'''
# dp로 풀면 안됨 ;;
T = int(input())
for tc in range(1, T+1):
N = int(input())
# print(tc)
# data = [[1000]*(N+1)]+ [[1000]+list(map(int, input().split())) for _ in range(N)]
data = [list(map(int, input().split())) for _ in range(N)]
new_data = [[0]*N for _ in range(N)]
for i in data:
print(i)
print()
for i in range(N):
for j in range(N):
if i == 0 and j == 0:
new_data[i][j] = data[i][j]
elif i == 0:
new_data[i][j] = max(data[i][j]-data[i][j-1],0) + 1 + new_data[i][j-1]
elif j == 0:
new_data[i][j] = max(data[i][j]-data[i-1][j],0) + 1 + new_data[i-1][j]
else:
route1 = max(data[i][j] - data[i-1][j], 0) + new_data[i-1][j]
route2 = max(data[i][j] - data[i][j-1], 0) + new_data[i][j-1]
# route3 = max(data[i][j] - data[i][j+1], 0) + new_data[i-1][j+1]
new_data[i][j] = min(route1, route2) + 1
for i in new_data:
print(i)
print()
print(f'#{tc}', new_data[-1][-1])
|
YOONJAHYUN/Python
|
SWEA/5250_최소비용/sol.py
|
sol.py
|
py
| 1,170 |
python
|
en
|
code
| 2 |
github-code
|
6
|
22837988470
|
import pandas as pd
import networkx as nx
import pickle
import ast
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
df1 = pd.read_csv('../reading_and_cleaning/guest_host_cleaned_podcasts.csv', sep='\t', index_col=0)
split_hosts = pd.read_csv('../reading_and_cleaning/split_hosts.csv', sep='\t', index_col=0)
guest_durations = pd.read_csv('../reading_and_cleaning/guest_durations.csv', sep='\t', index_col=0)
G2 = nx.from_pandas_dataframe(guest_durations, 'guests', 'hosts', edge_attr=['duration'], create_using=nx.Graph())
podcast_info = pd.read_csv('../reading_and_cleaning/meta_podcast_info.csv', sep='\t', index_col=0)
host_list = []
for index1, row1 in podcast_info.iterrows():
hosts = ast.literal_eval(row1['Hosts'])
for host in hosts:
host_list.append(host)
host_list = set(host_list)
top_host_podcast = {}
top_guest_podcast = {}
host_podcasts = {}
guest_podcasts = {}
for node in G2.nodes():
if node in host_list:
#print(node)
df = split_hosts[split_hosts['hosts']==node]
host_durations = df.groupby(['podcast'])['duration'].sum()
host_durations = host_durations.reset_index()
host_durations = host_durations.sort_values(by='duration', ascending=False)
#print(host_durations['podcast'])
#top_podcast = host_durations['podcast'][0]
top_host_podcast[node] = host_durations['podcast'][0]
host_podcasts[node] = host_durations['podcast'].values
#print(host_durations['podcast'].values)
# for index, row in podcast_info.iterrows():
# if(row['Podcast Name']==top_podcast):
# top_cat = ast.literal_eval(row['categories'])[0]
# top_category[node] = top_cat
#print(node, top_cat)
df = df1[df1['guests']==node]
guest_durations = df.groupby(['podcast'])['duration'].sum()
guest_durations = guest_durations.reset_index()
guest_durations = guest_durations.sort_values(by='duration', ascending=False)
#top_podcast = guest_durations['podcast'][0]
if(len(guest_durations)==0):
continue
top_guest_podcast[node] = guest_durations['podcast'].iloc[0]
guest_podcasts[node] = guest_durations['podcast'].values
# for index, row in podcast_info.iterrows():
# if(row['Podcast Name']==top_podcast):
# top_cat = ast.literal_eval(row['categories'])[0]
# top_category[node] = top_cat
save_obj(top_host_podcast, 'top_host_podcast')
save_obj(top_guest_podcast, 'top_guest_podcast')
save_obj(host_podcasts, 'host_podcasts')
save_obj(guest_podcasts, 'guest_podcasts')
|
brooksjaredc/podcast_network_analysis
|
analyzing_functions/set_top_podcast.py
|
set_top_podcast.py
|
py
| 2,778 |
python
|
en
|
code
| 1 |
github-code
|
6
|
17541475306
|
#https://www.hackerrank.com/challenges/incorrect-regex/problem
#Solution
# Enter your code here. Read input from STDIN. Print output to STDOUT
import re
T= int(input())
for i in range(T):
S=input()
try:
res = re.compile(S)
print("True")
except Exception:
print("False")
|
AbdullaElshourbagy/Hacker-Rank-Solutions
|
Python/09 - Errors and Exceptions/02_Incorrect_Regex.py
|
02_Incorrect_Regex.py
|
py
| 330 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21154911963
|
from layers.layer import Layer
import os
import hashlib
import csv
class FileOnlineOutputLayer(Layer):
def __init__(self, log_messages, results: dict, filename: str, templates: list, message_headers: list):
self.log_messages = log_messages
self.filename = filename
self.results = results
self.templates = templates
self.message_headers = message_headers
def output_csv(self, filename, messages, headers):
with open(filename, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
for key, row in messages.items():
writer.writerow(row)
def outputResult(self):
# import pdb; pdb.set_trace()
log_events = dict()
eids = dict()
for idx, val in self.results.items():
temp = ' '.join(self.templates[val])
if temp not in eids:
eids[temp] = hashlib.md5(temp.encode('utf-8')).hexdigest()[0:8]
self.log_messages[idx]['EventTemplate'] = temp
self.log_messages[idx]['EventTemplate'] = temp
self.log_messages[idx]['EventId'] = eids[temp]
tot = 0
for temp, eid in eids.items():
log_events[tot] = dict(EventId=eid, EventTemplate=temp)
tot += 1
self.message_headers += ['EventId', 'EventTemplate']
event_headers = ['EventId', 'EventTemplate']
self.output_csv(self.filename+'_structured.csv', self.log_messages, self.message_headers)
self.output_csv(self.filename+'_templates.csv', log_events, event_headers)
def run(self):
dirname = os.path.dirname(self.filename)
os.makedirs(dirname, exist_ok=True)
self.outputResult()
|
kashanahmed867/ADAL-NN
|
log_parser/online_logparser/layers/fileonline_output_layer.py
|
fileonline_output_layer.py
|
py
| 1,784 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70488684987
|
# accepted on coderun
def cards_lost(): # 36.6
n, linears, squares, cubes = get_pars()
# pre-calculations:
whole_linears = (n * (n + 1)) // 2
whole_squares = (n * (n + 1) * (2 * n + 1)) // 6
whole_cubes = ((n * (n + 1)) // 2) ** 2
# constants:
a = whole_linears - linears
b = whole_squares - squares
c = whole_cubes - cubes
print(f'a, b, c: {a, b, c}')
# aux pars:
b_ = (a ** 2 - b) // 2
c_ = (a ** 3 + 2 * c) // 6 - a * b // 2
print(f'a, b_, c_: {a, b_, c_}')
# solving the cubic equation with z variable:
# for a start lets do a factorization of the free term:
factors = factorize(abs(c_), n)
print(f'factors: {factors}')
z = -1
for z_ in factors:
eq = z_ ** 3 - a * z_ ** 2 + b_ * z_ - c_
# print(f'z: {z_}, eq: {eq}')
if eq == 0:
# print(f'z: {z_} is root!')
z = z_
break
print(f'z: {z}')
# now let us solve the quadratic equation with x var:
d = (a - z) ** 2 - 4 * c_ // z
x = (a - z + int(d ** .5)) // 2
print(f'x: {x}')
y = a - z - x
print(f'y: {y}')
return f'{x} {y} {z}'
def factorize(num: int, n: int) -> list[int]:
factors = set()
f_ = 2
while f_ <= n and f_ ** 2 <= num:
if num % f_ == 0:
factors.add(f_)
factors.add(num // f_)
f_ += 1
factors.add(num)
return sorted(factors)
def get_pars():
n = int(input())
linears, squares, cubes = [int(i) for i in input().split()]
return n, linears, squares, cubes
cards_lost()
|
LocusLontrime/Python
|
Yandex_fast_recruit_days/Medium/Maps.py
|
Maps.py
|
py
| 1,582 |
python
|
en
|
code
| 1 |
github-code
|
6
|
4311447480
|
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
import numpy as np
###################################
#### Constants ###################
##################################
def run(X, Y, Xtest = None):
_,img_width, img_height,_ = X.shape
_, classes = Y.shape
#validation_data_dir = 'data/validation'
#nb_train_samples = 2000
#nb_validation_samples = 800
epochs = 10
batch_size = 16
#if K.image_data_format() == 'channels_first':
# input_shape = (3, img_width, img_height)
#else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(classes))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator()
# this is the augmentation configuration we will use for testing:
# only rescaling
model.fit(X, Y, epochs=10, verbose=1, validation_split=0.2, shuffle=True)
#model.fit(X,Y,epochs=25)
#a = model.predict(X)
#exp_scores = np.exp(a)
#probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
#ypred = np.argmax(probs, axis = 1)
#Y = np.argmax(Y, axis = 1)
#from sklearn.metrics import confusion_matrix, accuracy_score
#acc = accuracy_score(Y, ypred)
#print acc
#xval = X[:int(0.2 * len(X))]
#yval = model.predict(xval)
#ytrue = Y[:int(0.2 * len(X))]
return model.predict(Xtest)
|
psrikanthm/satellite-image-classification
|
src/simple_arch.py
|
simple_arch.py
|
py
| 2,151 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3715152381
|
"""
A simple event-sourced user service
"""
import datetime
import functools
import logging
import typing
import aiohttp.web
import faust
import strawberry
import strawberry.asgi.http
import strawberry.asgi.utils
import strawberry.graphql
import strawberry.types.datetime
from faust_avro import App, Record
class UserExistsError(Exception):
pass
class UserDoesNotExistError(Exception):
pass
##############################################################################
# Models
##############################################################################
class User(Record, coerce=True):
"""The main data model for a user, stored in our table."""
email: str
name: str
joined: datetime.datetime
def __str__(self):
return f"{name} <{email}>"
class UserKey(Record):
key: str
class UserCreated(Record, coerce=True):
user: User
class NameChanged(Record, coerce=True):
email: str
name: str
class UpdatedEmail(Record, coerce=True):
old_email: str
new_email: str
class UserDeleted(Record, coerce=True):
email: str
class UserRequest(Record, coerce=True):
update: typing.Union[UserCreated, NameChanged, UpdatedEmail, UserDeleted]
##############################################################################
# App
##############################################################################
app = App(
"users", broker="kafka://localhost", reply_create_topic=True, topic_partitions=1
)
users_requests = app.topic(
"_users_requests", key_type=UserKey, value_type=UserRequest, internal=True
)
cleaned_users_requests = app.topic(
"users", key_type=UserKey, value_type=UserRequest, internal=True
)
users_table = app.Table("users_table", partitions=1)
##############################################################################
# Business logic
##############################################################################
@functools.singledispatch
async def update_handler(msg: typing.Any):
raise NotImplementedError(f"No handler for {msg}")
@update_handler.register
async def user_created(msg: UserCreated):
email = msg.user.email
if email in users_table:
raise UserExistsError(f"User with {email} already exists.")
users_table[email] = msg.user
@update_handler.register
async def name_changed(msg: NameChanged):
user = users_table[msg.email]
user.name = msg.name
users_table[msg.email] = user
@update_handler.register
async def updated_email(msg: UpdatedEmail):
if msg.old_email == msg.new_email:
pass
if msg.old_email not in users_table:
raise UserDoesNotExistError(f"User with {msg.old_email} does not exist.")
if msg.new_email in users_table:
raise UserExistsError(f"User with {msg.new_email} already exists.")
user = users_table[msg.old_email]
user.email = msg.new_email
users_table[msg.new_email] = user
# This is subtle. We jump from the agent for partition new_email over to
# the agent for partition old_email and request a delete there. For a
# short time, the user will exist under both email addresses.
await users_requests.send(
key=UserKey(msg.old_email), value=UserRequest(UserDeleted(msg.old_email))
)
@update_handler.register
async def deleted_email(msg: UserDeleted):
if msg.email not in users_table:
raise UserDoesNotExistError(f"User with {msg.email} does not exist.")
del users_table[msg.email]
##############################################################################
# Agent
##############################################################################
@app.agent(users_requests)
async def users_svc(requests):
async for key, value in requests.items():
try:
await update_handler(value.update)
await cleaned_users_requests.send(key=key, value=value)
yield 200 # OK
except UserExistsError:
yield 409 # Conflict
except UserDoesNotExistError:
yield 404 # Not Found
except NotImplementedError as e:
logging.error(e)
yield 501 # Not Implemented
except Exception as e:
logging.error(e)
yield 500 # Internal Server Error
@app.agent(cleaned_users_requests)
async def cleaned_users_requests(requests):
async for value in requests:
# Silly, but faust-avro uses the agent to do topic-schema registration
pass
##############################################################################
# RESTish
##############################################################################
@app.page("/users")
class users(faust.web.View):
async def get(self, request: faust.web.Request) -> faust.web.Response:
"""List all users"""
return self.json(dict(users=dict(users_table.items())))
async def post(self, request: faust.web.Request) -> faust.web.Response:
"""Create a new user"""
data = await request.json()
key = UserKey(data["email"])
user = User(**data, joined=datetime.datetime.now())
value = UserRequest(UserCreated(user))
response = await users_svc.ask(key=key, value=value)
if response == 200:
return self.json(dict(user=user.asdict()))
elif response == 409:
raise aiohttp.web.HTTPConflict()
else:
raise aiohttp.web.HTTPInternalServerError()
@app.page("/users/{email}")
class users_update(faust.web.View):
@app.table_route(table=users_table, match_info="email")
async def get(
self, request: faust.web.Request, *, email: str
) -> faust.web.Response:
"""Get a specific user"""
try:
return self.json(dict(user=users_table[email].asdict()))
except KeyError:
raise aiohttp.web.HTTPNotFound()
@app.table_route(table=users_table, match_info="email")
async def patch(
self, request: faust.web.Request, *, email: str = None
) -> faust.web.Response:
"""Update a specific user"""
data = await request.json()
if "name" in data:
update = NameChanged(email, data["name"])
elif "new_email" in data:
update = UpdatedEmail(email, data["new_email"])
# Note this re-routes what partition we'll send on
email = data["new_email"]
else:
raise aiohttp.web.HTTPBadRequest()
response = await users_svc.ask(key=UserKey(email), value=UserRequest(update))
if response == 200:
return self.json(dict(user=users_table[email].asdict()))
elif response == 404:
raise aiohttp.web.HTTPNotFound()
elif response == 409:
raise aiohttp.web.HTTPConflict()
else:
raise aiohttp.web.HTTPInternalServerError()
##############################################################################
# GraphQLish
##############################################################################
@strawberry.type
class UserType:
email: str
name: str
joined: strawberry.types.datetime.DateTime
@strawberry.type
class Query:
@strawberry.field
def users(self, info, email: str = None) -> typing.List[UserType]:
if email is not None:
return [users_table[email]]
else:
return list(users_table.values())
@strawberry.input
class CreateUserInput:
email: str
name: str
@strawberry.input
class ChangeUserNameInput:
email: str
name: str
@strawberry.input
class ChangeUserEmailInput:
old_email: str
new_email: str
@strawberry.type
class Mutation:
@staticmethod
async def ask(email, message):
response = await users_svc.ask(key=UserKey(email), value=UserRequest(message))
if response == 200:
return
else:
raise Exception("Failure")
@strawberry.mutation
async def create_user(self, info, input: CreateUserInput) -> UserType:
user = User(email=input.email, name=input.name, joined=datetime.datetime.now())
await Mutation.ask(input.email, UserCreated(user))
return user
@strawberry.mutation
async def change_user_name(self, info, input: ChangeUserNameInput) -> UserType:
await Mutation.ask(input.email, NameChanged(input.email, input.name))
return users_table[input.email]
@strawberry.mutation
async def change_user_email(self, info, input: ChangeUserEmailInput) -> UserType:
await Mutation.ask(
input.new_email, UpdatedEmail(input.old_email, input.new_email)
)
return users_table[input.new_email]
schema = strawberry.Schema(query=Query, mutation=Mutation)
# TODO -- routing! Currently this abuses partitions=1 and workers=1 to have consistency.
#
# Routing is a lot harder in graphql. It potentially needs to happen at the mutation level?
# It'd be worth investigating if the response could be the user object itself and/or an
# exception object. Serializing them with pickle would be okay since it is python/faust
# internal and not intended for outside consumption.
@app.page("/graphql")
class graphql(faust.web.View):
async def get(self, request: faust.web.Request) -> faust.web.Response:
html = strawberry.asgi.utils.get_playground_html(
"http://localhost:6066/graphql"
)
return aiohttp.web.Response(body=html, content_type="text/html")
async def execute(self, query, variables=None, context=None, operation_name=None):
return await strawberry.graphql.execute(
schema,
query,
variable_values=variables,
operation_name=operation_name,
context_value=context,
)
async def post(self, request: faust.web.Request) -> faust.web.Response:
response = await strawberry.asgi.http.get_http_response(request, self.execute)
return aiohttp.web.Response(
body=response.body, content_type=response.media_type
)
|
trauter/faust-avro
|
examples/event_sourced_user.py
|
event_sourced_user.py
|
py
| 9,993 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74416691389
|
# Ten program pokazuje przykład użycia funkcji range_sum().
def main():
# Utworzenie listy liczb.
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# Zsumowanie elementów
# o indeksach od 2 do 5.
my_sum = range_sum(numbers, 2, 5)
# Wyświetlenie obliczonej sumy.
print('Suma elementów o indeksach od 2 do 5 wynosi', my_sum)
# Funkcja range_sum() zwraca sumę podanego zakresu
# elementów listy num_list. Parametr start określa indeks
# pierwszego sumowanego elementu. Parametr end
# określa indeks ostatniego sumowanego elementu.
def range_sum(num_list, start, end):
if start > end:
return 0
else:
return num_list[start] + range_sum(num_list, start + 1, end)
# Wywołanie funkcji main().
main()
|
JeanneBM/Python
|
Owoce Programowania/R12/04. Recursive3.py
|
04. Recursive3.py
|
py
| 747 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
40126048193
|
from openeye import oechem
import pickle
import os
#ifs = oechem.oemolistream(oechem.OEFormat_SDF)
#ofs = oechem.oemolostream(oechem.OEFormat_PDB)
DRUG_DIR = 'Drugs/Lib'
DRUG_DB_OUT = 'Drugs/DB/DB.oeb.gz'
DRUG_TITLE_FILE = 'drug_titles.pickle'
MAX_TAUTOMERS = 100
def count_molecules():
ifs = oechem.oemolistream()
num_molecules = 0
if ifs.open(DRUG_DB_OUT):
num_molecules = len(list(ifs.GetOEGraphMols()))
return num_molecules
def preprocess_titles():
total_molecules = 0
drug_list = os.listdir(DRUG_DIR)
drug_titles = {}
non_zero_drugs = 0
ifs = oechem.oemolistream()
ofs = oechem.oemolostream()
if ofs.open(DRUG_DB_OUT):
for drug in drug_list:
if '.oeb.gz' not in drug:
continue
drug_name = drug.split('.')[0]
drug_path = os.path.join(DRUG_DIR, drug)
print(drug_name)
drug_titles[drug_name] = []
if ifs.open(drug_path):
tautomer_counter = 0
for molecule in ifs.GetOEGraphMols():
total_molecules += 1
title = f"{drug_name}-{molecule.GetTitle()}"
drug_titles[drug_name].append(title)
molecule.SetTitle(title)
oechem.OEWriteMolecule(ofs, molecule)
tautomer_counter += 1
if tautomer_counter >= MAX_TAUTOMERS:
break
if tautomer_counter > 0:
non_zero_drugs += 1
pickle.dump(drug_titles, open(DRUG_TITLE_FILE, 'wb'))
print(f"Non-zero conformers drug num: {non_zero_drugs}")
print(f"Total number of molecules: {total_molecules}")
if __name__ == "__main__":
preprocess_titles()
|
AlexandrNP/BindingScoresDRP
|
openeye_scripts/map_compound_id.py
|
map_compound_id.py
|
py
| 1,779 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43865403849
|
from .flags import JobListFlags, JobOutputFlags
from .format_options import format_job_options
from .job_utils import (
create_job,
create_job_link,
create_job_output_item,
get_job,
submit_job_app,
update_job,
)
__all__ = [
"create_job",
"update_job",
"submit_job_app",
"format_job_options",
"get_job",
"JobOutputFlags",
"create_job_output_item",
"create_job_link",
"JobListFlags",
]
|
No767/Kumiko
|
Bot/Libs/cog_utils/jobs/__init__.py
|
__init__.py
|
py
| 445 |
python
|
en
|
code
| 20 |
github-code
|
6
|
72742543867
|
# -*- coding: utf-8 -*-
# @Author : yxn
# @Date : 2022/1/25 11:26
# @IDE : PyCharm(2021.3.1) Python3.98
from pythonds.basic.stack import Stack
def matches(open, close):
opens = "([{"
closer = ")]}"
return opens.index(open) == closer.index(close)
def parCheck(brackets):
s = Stack() # 1.生成一个空栈
balanced = True
index = 0
while index < len(brackets) and balanced:
symbol = brackets[index] # 2.从左往右依次取括号
if symbol in "([{": # 左括号->压栈
s.push(symbol)
else: # 右括号->
if s.isEmpty(): # 栈空,匹配失败
balanced = False
else: # 不空,从栈顶移除
top = s.pop()
if not matches(top, symbol): # 与栈顶元素对比,是否匹配
balanced = False
index += 1 # 3.继续2
if balanced and s.isEmpty():
return True
else:
return False
if __name__ == '__main__':
print(parCheck("([{}])"))
print(parCheck("([()]){}"))
print(parCheck("()(]}{[])"))
|
yxn4065/Data-structure-and-algorithm-Python-
|
04_栈的应用1括号匹配.py
|
04_栈的应用1括号匹配.py
|
py
| 1,139 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19026984837
|
from django.shortcuts import render,redirect
import json
from django.conf import settings
import redis
from rest_framework.response import Response
from django.http import HttpResponse
from django.http import JsonResponse
import requests
from .forms import SomeForm
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
# Connect to our Redis instance
redis_instance = redis.StrictRedis(host=settings.REDIS_HOST,
port=settings.REDIS_PORT, db=0)
#view for get and post request
@csrf_exempt
def manage_items(request, *args, **kwargs):
if request.method == 'POST':
form = SomeForm(request.POST)
if form.is_valid():
item=(form.cleaned_data['name'])
item = eval(item)
key = list(item.keys())[0]
value = item[key]
if redis_instance.exists(key):
return redirect('failure')
else:
redis_instance.set(key, value)
return redirect('success')
else:
form = SomeForm()
return render(request, 'app1/Home.html')
return render(request,'app1/Home.html')
#view for post request success message
def success(request):
return render(request,'app1/success.html')
#view for post request failure message
def failure(request):
return render(request,'app1/failure.html')
#view for get request
def get_single_key(request):
if request.method == 'GET':
keyword = request.GET.get('search')
value = redis_instance.get(keyword)
if value:
data = {'key': keyword,'value': value.decode('utf-8'),'msg': 'success'}
else:
data = {'key': keyword,'value': None,'msg': 'Key Not found'}
return render(request,'app1/Home.html',{"data":data})
#view for delete request
def delete_key(request):
if request.method == 'GET':
keyword = request.GET.get('delete')
result = redis_instance.delete(keyword)
if result == 1:
response = {'msg': f"{keyword} successfully deleted"}
else:
response = {'key': keyword,'value': None,'msg': 'Key Not found'}
return render(request,'app1/Home.html',{"response":response})
|
nischithmk/freshworks_assignment
|
app1/views.py
|
views.py
|
py
| 2,317 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1068937513
|
import numpy as np
import gymnasium as gym
from pendulum_model import PendulumModel
import cvxpy as cp
dt3g2l = 3 * 10 / (2 * 1) * 0.05
dt = 0.05
dt3ml2 = 3 * 0.05 / (1 * 1 * 1)
class CVX_SQP:
def __init__(self):
self.N = 30
self.theta_cost_weight = 1
self.theta_dot_cost_weight = 0.1
self.u_cost_weight = 0.001
self.theta = np.zeros(self.N)
self.theta_dot = np.zeros(self.N)
self.u = np.zeros(self.N)
self.lambda_vec = np.zeros(self.N*8+2)
self.delta_theta = cp.Variable(self.N)
self.delta_theta_dot = cp.Variable(self.N)
self.delta_u = cp.Variable(self.N)
self.slack_var_1 = cp.Variable(self.N)
self.slack_var_2 = cp.Variable(self.N)
self.target_value=[]
def set_init_traj(self, model_log):
self.theta = model_log['theta']
self.theta_dot = model_log['theta_dot']
self.u = model_log['u']
def solve_once(self):
cost = 0
constr = []
constr += [self.delta_theta[0] == 0]
constr += [self.delta_theta_dot[0] == 0]
for i in range(0, self.N - 1):
cost += self.theta_cost_weight * cp.square(self.delta_theta[i]) + \
self.theta_dot_cost_weight * cp.square(self.delta_theta_dot[i]) + \
self.u_cost_weight * cp.square(self.delta_u[i]) + \
0.5 * self.lambda_vec[2+8*i] * cp.square(self.delta_theta[i]) * (-dt3g2l * np.sin(self.theta[i])) + \
self.theta_cost_weight * self.theta[i] * self.delta_theta[i] + \
self.theta_dot_cost_weight * self.theta_dot[i] * self.delta_theta_dot[i] + \
self.u_cost_weight * self.u[i] * self.delta_u[i]
# 0.1*cp.square(self.slack_var_1[i])+0.1*cp.square(self.slack_var_2[i])
constr += [dt3g2l * np.cos(self.theta[i]) * self.delta_theta[i] +
self.delta_theta_dot[i] + dt3ml2 * self.delta_u[i] - self.delta_theta_dot[i + 1]
== -(
-self.theta_dot[i + 1] + self.theta_dot[i] + dt3g2l * np.sin(self.theta[i]) +
dt3ml2 * self.u[i]
),
self.theta[i + 1] + self.delta_theta[i + 1] == self.theta[i] + self.delta_theta[i] + dt * (
self.theta_dot[i] + self.delta_theta_dot[i]),
self.theta_dot[i] + self.delta_theta_dot[i] <= 8,
self.theta_dot[i] + self.delta_theta_dot[i] >= -8,
self.u[i] + self.delta_u[i] <= 2,
self.u[i] + self.delta_u[i] >= -2,
self.delta_u[i] <= 0.1,
self.delta_u[i] >= -0.1,
]
problem = cp.Problem(cp.Minimize(cost), constr)
problem.solve()
print("status:", problem.status)
print("optimal value", problem.value)
print("optimal var: delta_theta", self.delta_theta.value)
print("optimal var: delta_theta_dot", self.delta_theta_dot.value)
print("optimal var: delta_u", self.delta_u.value)
self.target_value.append(problem.value)
for i in range(len(problem.constraints)):
self.lambda_vec[i] = problem.constraints[i].dual_value
def solve(self):
for i in range(30):
self.solve_once()
self.theta += self.delta_theta.value
self.theta_dot += self.delta_theta_dot.value
self.u += self.delta_u.value
print(self.target_value)
def make_env(name):
gym_env = gym.make(name, render_mode="human")
return gym_env
def main():
env = make_env("Pendulum-v1")
observation, info = env.reset(seed=1)
print(observation)
model = PendulumModel()
model.reset(observation)
print(model.state)
model_log = {'theta': [], 'theta_dot': [], 'u': []}
for i in range(30):
model_log['theta'].append(model.state[0])
model_log['theta_dot'].append(model.state[1])
action = np.random.uniform(-2, 2, 1)
# action=np.array([0])
model.step(action)
model_log['u'].append(action)
model_log['theta'] = np.hstack(model_log['theta'])
model_log['theta_dot'] = np.hstack(model_log['theta_dot'])
model_log['u'] = np.hstack(model_log['u'])
cvx_sqp = CVX_SQP()
cvx_sqp.set_init_traj(model_log)
cvx_sqp.solve()
control = cvx_sqp.u
for i in range(200):
observation, reward, terminated , truncated , info = env.step(control[i].reshape(1,))
print(observation, reward, control[i])
if __name__ == "__main__":
main()
|
CarlDegio/SQP_Pendulum
|
cvx_main.py
|
cvx_main.py
|
py
| 4,654 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16987276200
|
import socket
import sys
host = ''
port = 9999
# Create a socket conect computers
def create_socket():
try:
global host, port, s
s = socket.socket()
except socket.error as e:
print("Socket creation error: " + str(e))
# Binding the socket and listening for connections
# create socket has to be called before this function
def bind_socket():
try:
global host, port, s
print("Binding the port " + str(port))
s.bind((host, port))
s.listen(5)
except socket.error as e:
print("Socket binding error: " + str(e))
bind_socket()
# function that acepts connections
def socket_accept():
conn, address = s.accept()
print("connection has been established with address: " + address[0] + " Port: " + str(address[1]))
send_comands(conn)
conn.close()
# function that sends send comands to client
def send_comands(conn):
while True:
cmd = input()
if cmd == "quit":
conn.close()
s.close()
sys.exit()
str_encoded = str.encode(cmd)
if len(str_encoded) > 0:
conn.send(str_encoded)
client_response = str(conn.recv(1024), "utf-8")
print(client_response)
def main():
create_socket()
bind_socket()
socket_accept()
if __name__ == "__main__":
main()
|
Pr3d2t0r/reverseShell
|
server.py
|
server.py
|
py
| 1,363 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38238085136
|
import csv
from PIL import Image
import numpy as np
import os
X = []
index = 0
for img in os.listdir("base224/"):
if img[-3:] == "jpg":
image = Image.open("base224/" + img)
img2 = image.transpose(Image.FLIP_LEFT_RIGHT)
img2.save("base224flip/" + img, "JPEG", quality=224, optimize=True, progressive=True)
index += 1
if index % 500 == 0:
print(index)
|
PKUGoodSpeed/FashionAiContest
|
Kedan/flip_images.py
|
flip_images.py
|
py
| 395 |
python
|
en
|
code
| 3 |
github-code
|
6
|
70865858108
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 18 21:54:40 2019
@author: dingxu
"""
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
from photutils import DAOStarFinder
from astropy.stats import sigma_clipped_stats
from photutils import CircularAperture
import cv2
#import scipy.signal as signal
import os
import math
fitsname1 = 'E:\\BOOTES4\\20181118\\03088\\'+'20181118130518-952-RA.fits'
fitsname2 = 'E:\\BOOTES4\\20181118\\03088\\'+'20181118130621-081-RA.fits'
onehdu = fits.open(fitsname1)
imgdata1 = onehdu[0].data #hdu[0].header
copydata1 = np.copy(imgdata1)
imgdata1 = np.float32(copydata1)
oneimgdata = imgdata1
#oneimgdata = signal.medfilt2d(imgdata1, kernel_size=5) # 二维中值滤波
hang1,lie1 = oneimgdata.shape
twohdu = fits.open(fitsname2)
imgdata2 = twohdu[0].data #hdu[0].header
copydata2 = np.copy(imgdata2)
imgdata2 = np.float32(copydata2)
twoimgdata = imgdata2
#twoimgdata = signal.medfilt2d(imgdata2, kernel_size=5) # 二维中值滤波
hang2,lie2 = twoimgdata.shape
def adjustimage(imagedata, coffe):
mean = np.mean(imagedata)
sigma = np.std(imagedata)
mindata = np.min(imagedata)
maxdata = np.max(imagedata)
Imin = mean - coffe*sigma
Imax = mean + coffe*sigma
mindata = max(Imin,mindata)
maxdata = min(Imax,maxdata)
return mindata,maxdata
def displayimage(img, coff, i):
minimg,maximg = adjustimage(img, coff)
plt.figure(i)
plt.imshow(img, cmap='gray', vmin = minimg, vmax = maximg)
plt.savefig(str(i)+'.jpg')
def findsource(img):
mean, median, std = sigma_clipped_stats(img, sigma=3.0)
daofind = DAOStarFinder(fwhm=8.5, threshold=5.*std)
sources = daofind(img - median)
tezhen = np.transpose((sources['xcentroid'], sources['ycentroid']))
#tezhen = np.transpose((sources['xcentroid'], sources['ycentroid'],sources['sharpness']))
positions = np.transpose((sources['xcentroid'], sources['ycentroid']))
return tezhen,positions
###实现找星###
tezhen1,positions1 = findsource(oneimgdata)
tezhen2,positions2 = findsource(twoimgdata)
apertures1 = CircularAperture(positions1, r=5.)
apertures2 = CircularAperture(positions2, r=5.)
displayimage(oneimgdata,3,0)
apertures1.plot(color='blue', lw=1.5, alpha=0.5)
displayimage(twoimgdata,3,1)
apertures2.plot(color='blue', lw=1.5, alpha=0.5)
lenposition1 = len(positions1)
lenposition2 = len(positions2)
keyimg1 = np.zeros((lenposition1,128),dtype = np.float32)
keyimg2 = np.zeros((lenposition2,128),dtype = np.float32)
i = 0
j = 0
for i in range(lenposition1):
keyimg1[i,0:2] = tezhen1[i,:]
for j in range(lenposition2):
keyimg2[j,0:2] = tezhen2[j,:]
# FLANN 参数设计
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(keyimg1,keyimg2,k=2)
lenpipei = 0
temp1 = []
temp2 = []
for i, (m1, m2) in enumerate(matches):
if m1.distance < 0.75 * m2.distance:# 两个特征向量之间的欧氏距离,越小表明匹配度越高。
lenpipei = lenpipei+1
temp1.append(m1.queryIdx)
temp2.append(m1.trainIdx)
hmerge = np.hstack((oneimgdata, twoimgdata)) #水平拼接
displayimage(hmerge, 3, 2)
srckp1 = []
srckp2 = []
for i in range(lenpipei):
x = temp1[i]
y = temp2[i]
x10 = positions1[x][0]
y10 = positions1[x][1]
srckp1.append(x10)
srckp1.append(y10)
src_pts = np.float32(srckp1).reshape(-1,2)
x11 = positions2[y][0]
y11 = positions2[y][1]
srckp2.append(x11)
srckp2.append(y11)
dst_pts = np.float32(srckp2).reshape(-1,2)
#plt.plot(x10,y10,'*')
#plt.plot(x11+lie1,y11,'*')
plt.plot([x10,x11+lie1],[y10,y11],linewidth = 0.8)
H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
newimg1 = cv2.warpPerspective(imgdata1, H, (lie1,hang1))
addimg = np.float32(newimg1) + np.float32(imgdata2)
minusimg = np.float32(newimg1) - np.float32(imgdata2)
displayimage(addimg, 3, 3)
displayimage(minusimg, 3, 4)
def witefits(data,name):
os.remove(name + '.fits')
grey=fits.PrimaryHDU(data)
greyHDU=fits.HDUList([grey])
greyHDU.writeto(name + '.fits')
witefits(newimg1,'one')
witefits(imgdata2,'two')
witefits(minusimg,'minusimg')
tempmatrix = np.zeros((3,1),dtype = np.float64)
tempmatrix[2] = 1
deltemp = []
for j in range(lenpipei):
tempmatrix[0] = src_pts[j][0]
tempmatrix[1] = src_pts[j][1]
result = np.dot(H,tempmatrix)
rx11 = result[0]/result[2]
ry11 = result[1]/result[2]
delcha = math.sqrt((rx11-dst_pts[j][0])**2 + (ry11-dst_pts[j][1])**2)
deltemp.append(delcha)
plt.figure(5)
plt.plot(deltemp)
print(np.mean(deltemp[15:40]))
|
dingxu6207/newcode
|
newB4SIFT.py
|
newB4SIFT.py
|
py
| 4,808 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24739538824
|
#!/bin/python
import smtplib, os, sys
def printBanner():
print("*" * 50)
print("* %s " % "Welcome to the SMTP Vrfy enum script.")
print("* python %s %s " % (sys.argv[0], "to start execution."))
print("*" * 50)
def getUserInput(msg="Default message: "):
return raw_input(msg).strip()
def pullFileList(file_path=""):
file_contents = ""
if os.path.isfile(file_path):
with open(file_path, "r") as f:
file_contents = f.read().splitlines()
return file_contents
def vrfyUser(user="", smtp_server=""):
if user and smtp_server:
if "250" not in str(smtp_server.vrfy(user)[0]):
return False
else:
return True
else:
return None
def main():
printBanner()
server = getUserInput("SMTP Server to VRFY users: ")
if server:
smtp_server = smtplib.SMTP(server)
else:
print("Unable to connect to %s." % server)
exit()
valid_users = set()
users = pullFileList(getUserInput("Path to users list: "))
users = set([user.strip() for user in users if user])
for user in users:
if user and vrfyUser(user, smtp_server):
print("%s is a valid user" % user)
valid_users.add(user)
else:
print("%s is not a valid user or failed to connect." % user)
print("\n\nValid Users:")
print("\n".join(valid_users))
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt):
print("^C")
exit()
|
idleninja/smtp_vrfy_enum.py
|
smtp_vrfy_enum.py
|
smtp_vrfy_enum.py
|
py
| 1,358 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14536874544
|
# Callbacks
from dash import Input, Output, State
from dash.exceptions import PreventUpdate
def sidebar_callbacks(app, df):
@app.callback(
[
Output("brand_dropdwn", 'options'),
Output("brand_dropdwn", 'value')
],
[
State("date_picker", "start_date"),
State("date_picker", "end_date"),
],
[
Input("company_dropdwn", 'value')
]
)
def get_brand_options(start_date, end_date, company):
if company is None or start_date is None or end_date is None:
raise PreventUpdate
else:
get_brand_options.df2 = df[df['NombreDeProductor'] == company]
brand_options = [{'label': b, 'value': b} for b in get_brand_options.df2.marca.unique()]
return brand_options, None
@app.callback(
[Output("city_dropdwn", 'options'),
Output("city_dropdwn", 'value')],
[Input("brand_dropdwn", 'value')])
def get_city_list(marca):
if marca is None:
raise PreventUpdate
else:
get_city_list.df3 = get_brand_options.df2[get_brand_options.df2['marca'] == marca]
city_options = [{'label': c, 'value': c} for c in get_city_list.df3.Municipio.unique()]
return city_options, None
@app.callback(
[Output("zone_dropdwn", 'options'),
Output("zone_dropdwn", 'value')],
[Input("city_dropdwn", 'value')])
def get_zone_list(city):
if city is None:
raise PreventUpdate
else:
get_zone_list.df4 = get_city_list.df3[get_city_list.df3['Municipio'] == city]
zone_options = [{'label': z, 'value': z} for z in get_zone_list.df4.Zona.unique()]
return zone_options, None
|
jmalcovich21/ds4a_tiendareg
|
callbks/sidebar_calls.py
|
sidebar_calls.py
|
py
| 1,794 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74883090107
|
N, M = map(int, input().split())
n_switches = []
switches = []
for i in range(M):
ipt = list(map(int, input().split()))
n_switches.append(int(ipt[0]))
switches.append(ipt[1:])
p = list(map(int, input().split()))
ans = 0
# print(n_switches)
# print(switches)
# print(p)
# ランプの光り方のパターンだけ
for i in range(2 ** N):
ptn = bin(i)[2:].rjust(N, '0')
allLight = True # 全ての電球が光っているかどうか(一つでも光っていなければそのパターンは無効)
# 電球の数だけ
for j in range(M):
n_light = 0
# 電球に対するスウィッチの数だけ
for k in range(n_switches[j]):
# スウィッチが光っていれば
if ptn[switches[j][k] - 1] == '1':
n_light += 1
if n_light % 2 != p[j]:
allLight = False
break
if allLight:
ans += 1
print(ans)
|
kazuo-mu/at_coder_answers
|
ABC128/c_switches.py
|
c_switches.py
|
py
| 954 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
41978223591
|
from pyspark.sql.types import StructType, StructField, StringType, DateType, FloatType
from pyspark.sql import SparkSession
from datetime import datetime
from task_4 import get_min_or_max_by_ppu
import pytest
# create a spark session
spark = SparkSession.builder.appName("task_0").getOrCreate()
# create a test dataframe
schema = StructType([
StructField('ndc11', StringType()),
StructField('invoice_date', DateType()),
StructField('invoice_cost', FloatType()),
StructField('invoice_quan', FloatType()),
StructField('bu_.customer_name', StringType()),
])
data = [(1, datetime(2020, 1, 15), 40.0, 10.0, 'John'),
(1, datetime(2020, 1, 7), 50.0, 10.0, 'Ann'),
(1, datetime(2020, 1, 22), 40.0, 2.0, 'Ann'),
(1, datetime(2020, 2, 15), 20.0, 10.0, 'John'),
(1, datetime(2020, 2, 7), 50.0, 10.0, 'Ann'),
(1, datetime(2020, 2, 21), 40.0, 20.0, 'Mathew'),
(2, datetime(2020, 2, 22), 50.0, 10.0, 'Carter'),
(2, datetime(2020, 2, 22), 40.0, 8.0, 'Ann')
]
test_trx_df = spark.createDataFrame(data, schema=schema)
@pytest.mark.parametrize('rows, sort, expected_cost, expected_names',
[(1, 'min', [40.0, 20.0, 40.0], ['Ann', 'John', 'John']),
(1, 'max', [40.0, 50.0, 40.0], ['Ann', 'Ann', 'Ann'])])
def test_get_min_or_max_by_ppu(rows, sort, expected_cost, expected_names):
result_df = get_min_or_max_by_ppu(test_trx_df, rows, sort)
actual_result = result_df.collect()
actual_invoice_cost = [row.invoice_cost for row in actual_result]
actual_names = [row['bu_.customer_name'] for row in actual_result]
assert actual_invoice_cost == expected_cost
assert actual_names == expected_names
|
rkrvchnk/pyspark_tasks
|
tests/test_task_4.py
|
test_task_4.py
|
py
| 1,784 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37430808138
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: TRS wcm 6.x版本infoview信息泄露
referer: http://www.wooyun.org/bugs/wooyun-2012-012957
author: Lucifer
description: 文件infoview.do中导致信息泄露。
'''
import sys
import requests
class trs_wcm_infoview_disclosure_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
payload = "/wcm/infoview.do?serviceid=wcm6_user&MethodName=getOnlineUsers"
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
if r"<USERNAME>" in req.text and r"<Users>" in req.text:
return "[+]存在TRS wcm 6.x版本infoview信息泄露漏洞...(中危)\tpayload: "+vulnurl
except:
return "[-]connect timeout"
if __name__ == "__main__":
testVuln = trs_wcm_infoview_disclosure_BaseVerify(sys.argv[1])
testVuln.run()
|
iceyhexman/onlinetools
|
scanner/plugins/cms/trs/trs_wcm_infoview_disclosure.py
|
trs_wcm_infoview_disclosure.py
|
py
| 1,115 |
python
|
en
|
code
| 1,626 |
github-code
|
6
|
74288480507
|
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from seeds.api.views import AudioClipViewSet, BlobViewSet, SuiteViewSet, UserViewSet
from seeds.views import index, register
router = DefaultRouter()
router.register(r"users", UserViewSet, basename="User")
router.register(r"suites", SuiteViewSet, basename="Suite")
router.register(r"blobs", BlobViewSet, basename="Blob")
router.register(r"audioclips", AudioClipViewSet, basename="AudioClip")
urlpatterns = [
path("", index, name="index"),
path("register", register, name="register"),
path("accounts/", include("django.contrib.auth.urls")),
# api
path("api/v1/", include(router.urls)),
]
|
jacobshandling/soundseeker
|
backend/seeds/urls.py
|
urls.py
|
py
| 697 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8293013280
|
import cv2
import math
path = "img/angle.jpg"
img = cv2.imread(path)
pointsList = []
def mousePoints(event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
size = len(pointsList)
if size != 0 and size % 3 != 0:
cv2.line(img, tuple(pointsList[round((size-1)/3)*3]), (x,y), (0,0,255), 2)
cv2.circle(img, (x,y), 5, (0,0,255), cv2.FILLED)
pointsList.append([x,y])
def gradient(p1,p2):
return (p2[1] - p1[1])/(p2[0]- p1[0])
def getAngle(pointList):
p1, p2, p3 = pointList[-3:]
m1 = gradient(p1, p2)
m2 = gradient(p1, p3)
angle_radiance = math.atan((m2-m1)/(1+(m2*m1)))
angle_degrees = round(math.degrees(angle_radiance))
cv2.putText(img, str(angle_degrees), (p1[0]-40, p1[1]-20), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
while True:
if len(pointsList) % 3 == 0 and len(pointsList) != 0:
getAngle(pointsList)
cv2.imshow('Image', img)
cv2.setMouseCallback('Image', mousePoints)
if cv2.waitKey(1) & 0xFF == ord('q'):
pointsList = []
img = cv2.imread(path)
|
Demohack2022/hacktoberfest2022
|
Contributors/angle-finder.py
|
angle-finder.py
|
py
| 1,095 |
python
|
en
|
code
| 8 |
github-code
|
6
|
29564634745
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
from api.thanos_http import xtthanos_user_http, request_data
from api.http_api import ResultBase
from common.logger import logger
from common.get_signature import generate_auth_info
def adjust_leverage(leverage,positionSide,symbol):
'''调整杠杆倍数'''
result = ResultBase()
adjust_leverage = request_data.get('adjust_leverage')
params = {
'leverage' : leverage, # 杠杆倍数
'positionSide': positionSide, # 持仓方向:LONG;SHORT
'symbol' : symbol # 交易对
}
path = adjust_leverage.get('route') + adjust_leverage.get('path')
method = adjust_leverage.get('method')
headers = generate_auth_info(path=path, method=method,params=params,bodymod='x-www-form-urlencoded')
res = xtthanos_user_http.adjust_leverage(headers=headers, params=params)
result.status_code = res.status_code
result.response = res.json()
logger.info(f"调整杠杆倍数 ==>> 返回结果 ==>> {res.text}")
return result
|
shiqilouyang/thanos_test
|
operation/contract/client/position/adjust_leverage.py
|
adjust_leverage.py
|
py
| 1,084 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35571671171
|
import urllib.request as urllib2 #import the library used to query a website
from bs4 import BeautifulSoup #import the Beautiful soup functions to parse the data returned from the website
import pandas as pd #import pandas to convert list to data frame
from openpyxl import load_workbook
# INPUT VARIABLES SPECIFIED BY THE USER
input1 = input('Please input the column in excel that you want modified (UPPERCASE ONLY): ')
input2 = input('\nPlease count the number of the table from the web you would like to parse.\nFor example input 9 if you would like to read from the 9th table listed: ')
input3 = input('\nPlease input the number of the column from the table on the web you would like to parse.\nFor example input 3 if you would like to read from the 3rd column: ')
input4 = input('\nPlease input the number of the excel sheet that you would like to read from.\n For example from left to right the sheet tbas would be 1,2,3... accordingly: ')
input5 = input('\nPlease input the name of the file you would like to read from (extension included).\n For example Verisk Model_Send_Excel_2.xlsx: ')
input6 = input('\nPlease input the path where this folder is located on your computer (please include a "/" at the end of the path).\nFor Example ~/Documents/Git/Html_scraping_project/: ')
input7 = input('\nPlease input the url containing the table that you want to parse.\nFor example http://www.verisk.com/press-releases/2017/february/verisk-analytics-inc-reports-fourth-quarter-2016-financial-results.html: ')
print('\nThe file "temp.xlsx" has now been created in your directory...')
#Convert user input into proper indexes
def excelColumnToIndex(column):
return ord(column) - 65
def tableFromWebToIndex(index):
return int(index) - 1
def tableColumnFromWebToIndex(index):
return int(index) - 1
def excelSheetToIndex(index):
return int(index) - 1
#Set global variabes to correct values
EXCEL_COLUMN_INDEX = excelColumnToIndex(input1)
TABLE_FROM_WEB_INDEX = tableFromWebToIndex(input2)
TABLE_FROM_WEB_COLUMN_INDEX = tableColumnFromWebToIndex(input3)
EXCEL_SHEET_INDEX = excelSheetToIndex(input4)
FILENAME = input5
PATH = input6
URL = input7
def parseTables(all_tables):
parsed_tables = []
for i in range(len(all_tables)-1):
table_body = all_tables[i].find('tbody')
rows = table_body.find_all('tr')
df_temp = []
for row in rows:
cols =row.find_all('td')
cols = [ele.text.strip() for ele in cols]
df_temp.append([ele for ele in cols]) #get rid of empty values
parsed_tables.append(df_temp)
return parsed_tables
def loadExcelDoc(sheet_index):
# Open up Faton Excel file
xl = pd.ExcelFile(PATH + FILENAME)
sheets = xl.sheet_names
## open up the first sheet and print our data
df = xl.parse(sheets[sheet_index],header=None)
#row_index = df.iloc[0][0]
#df = xl.parse(sheets[sheet_index])
#df = df.set_index(row_index)
return df
def main():
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
headers={'User-Agent':user_agent,}
request=urllib2.Request(URL,None,headers) #The assembled request
page = urllib2.urlopen(request)
data = page.read() # The data
#Parse the html in the 'page' variable, and store it in Beautiful Soup format
soup = BeautifulSoup(data,'html.parser')
#####gather all tables from page into array
all_tables = soup.find_all("table", class_="table-blue")
parsed_tables = parseTables(all_tables)
df_from_web = pd.DataFrame(parsed_tables[TABLE_FROM_WEB_INDEX])
df = loadExcelDoc(EXCEL_SHEET_INDEX)
wb = load_workbook(FILENAME, keep_vba = True)
wb.get_sheet_names()
active_sheet = wb.sheetnames[EXCEL_SHEET_INDEX]
ws = wb[active_sheet]
# Lets try to match the row index names from the web to the excel doc
excel_labels = [i.value for i in ws['A']]# we assume that the labels are always in column A
web_labels = df_from_web.loc[:,0] # we assume that the label is always in the first column of the dataframe
for i,excel_label in enumerate(excel_labels):
for j,web_label in enumerate(web_labels):
if excel_label == web_label:
#set the cell value in the excel file to match the value found from the web
ws[i+1][EXCEL_COLUMN_INDEX].value = df_from_web.loc[j,TABLE_FROM_WEB_COLUMN_INDEX]
wb.save("temp.xlsx")
#########################################################################
# Lets run our script
main()
|
rich-coleman-gh/Html_scraping_project
|
main.py
|
main.py
|
py
| 4,466 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13083798365
|
# -*- coding: utf-8 -*-
import re
from setuptools import setup
version = re.search(
'^__version__\s*=\s*"(.*)"',
open('pushscreeps/pushscreeps.py').read(),
re.M
).group(1)
with open("README.rst", "rb") as f:
long_description = f.read().decode("utf-8")
setup(
name="pushscreeps",
packages=["pushscreeps"],
entry_points={
"console_scripts": ['pushscreeps = pushscreeps.pushscreeps:main']
},
version=version,
description="Python3 script for pushing code to screeps",
long_description=long_description,
author="Mathias Bøhn Grytemark",
author_email="[email protected]",
url="https://github.com/mboehn/pushscreeps",
install_requires=[
"requests",
],
)
|
mboehn/pushscreeps
|
setup.py
|
setup.py
|
py
| 744 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25032770232
|
#pip install dash dash-renderer dash-html-components dash-core-components plotly
import dash
import dash_core_components as dcc
import dash_html_components as html
app = dash.Dash()
app.layout = html.Div(children=[
html.H1("Consumo dos clientes"), #separando elementos dos childs por virgula
dcc.Dropdown(
options=[
{'label': 'Fulano', 'value': "Fulano" }, #value = valor de id
{'label': 'Sicrano', 'value': "Sicrano"} #value = valor de id
],
value= '' #id do escolhido
),
dcc.Graph(id="Fulano",
figure = { #Consumo Mensal x Nome dos produtos
"data": [{"x": ["pale ale", "weissbier", "itaipava", "skol"], "y": [0, 5, 4, 2], "type":"bar", "name": "Cervejas"},
{"x": ["expresso", "cappuccino", "mocaccino", "cafe4"], "y": [0, 0, 2, 1], "type":"line", "name": "Cafés"},
],
"layout": {
"title": "Fulano"
}
}),
dcc.Graph(id="Sicrano",
figure = { #Consumo Mensal x Nome dos produtos
"data": [{"x": ["pale ale", "weissbier", "itaipava", "skol"], "y": [0, 1, 1, 0], "type":"bar", "name": "Cervejas"},
{"x": ["expresso", "cappucino", "mocaccino", "cafe4"], "y": [7, 0, 3, 2], "type":"line", "name": "Cafés"}
],
"layout": {
"title": "Sicrano"
}
})
])
if __name__ == '__main__':
app.run_server(debug=True)
|
grupoflux/dashboard
|
dashboard.py
|
dashboard.py
|
py
| 1,527 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24354798885
|
import os
import torch
import gym
import gym_donkeycar
import time
from env.vae_env import VaeEnv
from vae.vae import VAE
from stable_baselines.sac.policies import MlpPolicy
from stable_baselines import SAC
VARIANTS_SIZE = 32
DONKEY_SIM_PATH = f"/Applications/donkey_sim.app/Contents/MacOS/sdsim"
SIM_HOST="127.0.0.1"
DONKEY_SIM_PORT=9091
image_channels = 3
if __name__ == '__main__':
#model_path = 'vae-gt-80-160-10k-beta25-150.torch'#for 6
#model_path = 'vae-gt-80-160-18k-beta25-50-loss.torch'
model_path = 'vae-gt-30k-50.torch'
torch_device = 'cpu'
vae = VAE(image_channels=image_channels, z_dim=VARIANTS_SIZE)
vae.load_state_dict(torch.load(model_path, map_location=torch.device(torch_device)))
vae.to(torch.device(torch_device))
vae.eval()
env = gym.make('donkey-generated-track-v0', exe_path=DONKEY_SIM_PATH, host=SIM_HOST, port=DONKEY_SIM_PORT)
env.viewer.set_car_config("donkey", (128, 128, 128), "masato-ka", 20)
vae_env = VaeEnv(env, vae, device=torch_device)
model = SAC.load('donkey8')
obs = vae_env.reset()
dones=False
for step in range(10000): # 500ステップ実行
if step % 10 == 0: print("step: ", step)
#if dones:
# o = env.reset()
# break
action, _states = model.predict(obs)
obs, rewards, dones, info = vae_env.step(action)
# env.render()
env.close()
|
masato-ka/sac-car-racing
|
run_donkey.py
|
run_donkey.py
|
py
| 1,406 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35813238442
|
import typing
from typing import Optional
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession, create_async_engine, async_sessionmaker
from sqlalchemy.orm import declarative_base
from backend.services.database import db
if typing.TYPE_CHECKING:
pass
class Database:
def __init__(self, url: str):
self._url = url
self._engine: Optional[AsyncEngine] = None
self._db: Optional[declarative_base] = None
self.session: Optional[AsyncSession] = None
async def connect(self, *_: list, **__: dict) -> None:
self._db = db
self._engine = create_async_engine(url=self._url, echo=True)
self.session = async_sessionmaker(bind=self._engine, class_=AsyncSession, expire_on_commit=False)
async def disconnect(self, *_: list, **__: dict) -> None:
if self._engine:
await self._engine.dispose()
|
jendox/tg_bot
|
backend/services/database/database/base.py
|
base.py
|
py
| 883 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34653033371
|
import math, os
from dataclasses import dataclass
import pygame
from const import *
@dataclass
class Position:
sector: int
layer: int
index: int
x: float
y: float
def __init__(self, sector, layer, index):
self.sector = sector
self.layer = layer
self.index = index
sector_angle = self.sector * math.pi / 3
pos_angle = sector_angle + 2 * math.pi / 3
self.x = BOARD_CENTER[0] + 2 * (TILE_SIDE + TILE_PADDING) * (self.layer * math.sin(sector_angle) + self.index * math.sin(pos_angle))
self.y = BOARD_CENTER[1] - 2 * (TILE_SIDE + TILE_PADDING) * (self.layer * math.cos(sector_angle) + self.index * math.cos(pos_angle))
def to_map(self, offset: (float, float) = (0, 0)):
return (self.x + offset[0], self.y + offset[1])
def to_coor(self):
return (self.sector, self.layer, self.index)
def circle_intersect(self, radius: float, pos: (float, float)):
return math.sqrt((pos[0] - self.x)**2 + (pos[1] - self.y)**2) <= radius
class Resource:
textures: dict[str, list[pygame.Surface]] = {}
fonts: dict[str, pygame.font.Font] = {}
@staticmethod
def init():
Resource.textures = {
"town": [pygame.image.load(os.path.join("res", "images", "buildings", f"town{i}.png")) for i in range(NUM_PLAYERS)],
"farm": [pygame.image.load(os.path.join("res", "images", "buildings", f"farm{i}.png")) for i in range(NUM_PLAYERS)],
"soldier": [pygame.image.load(os.path.join("res", "images", "units", f"soldier{i}.png")) for i in range(NUM_PLAYERS)]
}
for items in Resource.textures.values():
for index, item in enumerate(items):
items[index] = pygame.transform.scale(item, PIECE_SIZE).convert_alpha()
Resource.fonts = {
"system": pygame.font.SysFont("Calibri", 18),
"systeml": pygame.font.SysFont("Calibri", 24),
}
|
martin-hanekom/persian-silver-2
|
src_old/tools.py
|
tools.py
|
py
| 1,951 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38890746457
|
import dialogflow_v2 as dialogflow
import os
path_key = "C:\wilasinee_pj\pj\python\ggthaluangbot-a6aed6caf27a.json"
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = path_key
#*****************************************************************
project_id = "thaluangbot-lhrv"
session_id = "82d12b78-40b4-4028-a8f7-3a6a479cb2f7"
language_code = "Thai-th"
#****************************************************************
from flask import Flask ,request,abort
from linebot import(
LineBotApi , WebhookHandler
)
from linebot.exceptions import(
InvalidSignatureError
)
from linebot.models import *
import json
#**************************************
text = input("let's text : ")
app = Flask(__name__)
def detect_intent_texts(project_id, session_id, texts, language_code):
session_client = dialogflow.SessionsClient()
session = session_client.session_path(project_id, session_id)
print('Session path: {}\n'.format(session))
#for text in texts:
text = texts
text_input = dialogflow.types.TextInput(text=text, language_code=language_code)
query_input = dialogflow.types.QueryInput(text=text_input)
response = session_client.detect_intent(session=session, query_input=query_input)
return response.query_result.fulfillment_text
text_re = detect_intent_texts(project_id,session_id,text,language_code)
print(text_re)
|
wilasineePE/chatbot
|
index.py
|
index.py
|
py
| 1,363 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41265462253
|
import pandas as pd
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
#datamızı okuduk
df = pd.read_csv("C:\projects\intropattern\otu.csv")
#verinin tranzpozasını alarak left ve rightları column hale getirdik.
df=df.T
X = df.iloc[:,1:]
y = df.iloc[:,0]
#test ve veri setlerimizi ayırdık
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# AdaBoost modelimizi oluşturduk.n_estimators, dosyanın bir parçası olarak eğitilen zayıf modellerin sayısını ifade eder.
# Zayıf bir model, rastgele tahminden yalnızca biraz daha iyi olan ve tipik olarak tek bir güçlü modelden daha az doğru olan bir modeldir.
model = AdaBoostClassifier(n_estimators=100)
# Modeli oluşturduk
model.fit(X_train, y_train)
# Tahminde bulunuyoruz
predictions = model.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
print("Accuracy:", accuracy)
|
Haticenurcoskunn/Introduction-to-pattern-term-project
|
binary_classficiton/boosting_algorithms.py
|
boosting_algorithms.py
|
py
| 970 |
python
|
tr
|
code
| 0 |
github-code
|
6
|
38882380606
|
from django.db import models
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFill
class ImageList(models.Model):
def __str__(self):
return self.file_name
file_path = models.CharField(
verbose_name='ファイルパス',
max_length=1000,
blank=False,
null=False,
)
file_name = models.CharField(
verbose_name='ファイル名',
max_length=100,
)
class ImageListDetail(models.Model):
imageList = models.ForeignKey(ImageList, on_delete=models.CASCADE)
def __str__(self):
return self.file_path
# ファイルパス(オリジナルの画像ファイルパス)
file_path = models.CharField(
verbose_name='ファイルパス',
max_length=500,
blank=False,
null=False,
)
# 画像データ
image_data = models.ImageField(
verbose_name='画像データ',
upload_to='images/',
)
# サムネイル
thumbnail = ImageSpecField(source="image_data",
processors=[ResizeToFill(150,150)],
format='JPEG',
options={'quality': 60}
)
# 表示順
disp_order = models.IntegerField(
verbose_name='表示順',
blank=False,
null=False,
)
|
hogendan/SuzuImage
|
imagelist/models.py
|
models.py
|
py
| 1,294 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
20843364345
|
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import DataLoader
from torch.utils.data.sampler import WeightedRandomSampler
import math
from network import Network
from losses import WalkerVisitLosses
from input_pipeline import get_datasets
from utils import evaluate, write_logs, make_weights_for_balanced_classes
"""
The purpose of this script is to train a simple
CNN on mnist and svhn using associative domain adaptation.
"""
BATCH_SIZE = 200
NUM_EPOCHS = 15
EMBEDDING_DIM = 64
DELAY = 1000 # number of steps before turning on additional losses
GROWTH_STEPS = 1000 # number of steps of linear growth of additional losses
# so domain adaptation losses are in full strength after `DELAY + GROWTH_STEPS` steps
BETA1, BETA2 = 1.0, 0.5
DEVICE = torch.device('cuda:0')
SOURCE_DATA = 'svhn' # 'svhn' or 'mnist'
SAVE_PATH = 'models/svhn_source'
LOGS_PATH = 'logs/svhn_source.json'
def train_and_evaluate():
svhn, mnist = get_datasets(is_training=True)
source_dataset = svhn if SOURCE_DATA == 'svhn' else mnist
target_dataset = mnist if SOURCE_DATA == 'svhn' else svhn
weights = make_weights_for_balanced_classes(source_dataset, num_classes=10)
sampler = WeightedRandomSampler(weights, len(weights))
source_loader = DataLoader(source_dataset, BATCH_SIZE, sampler=sampler, pin_memory=True, drop_last=True)
target_loader = DataLoader(target_dataset, BATCH_SIZE, shuffle=True, pin_memory=True, drop_last=True)
val_svhn, val_mnist = get_datasets(is_training=False)
val_svhn_loader = DataLoader(val_svhn, BATCH_SIZE, shuffle=False, drop_last=False)
val_mnist_loader = DataLoader(val_mnist, BATCH_SIZE, shuffle=False, drop_last=False)
print('\nsource dataset is', SOURCE_DATA, '\n')
num_steps_per_epoch = math.floor(min(len(svhn), len(mnist)) / BATCH_SIZE)
embedder = Network(image_size=(32, 32), embedding_dim=EMBEDDING_DIM).to(DEVICE)
classifier = nn.Linear(EMBEDDING_DIM, 10).to(DEVICE)
model = nn.Sequential(embedder, classifier)
model.train()
optimizer = optim.Adam(lr=1e-3, params=model.parameters(), weight_decay=1e-3)
scheduler = CosineAnnealingLR(optimizer, T_max=num_steps_per_epoch * NUM_EPOCHS - DELAY, eta_min=1e-6)
cross_entropy = nn.CrossEntropyLoss()
association = WalkerVisitLosses()
text = 'e:{0:2d}, i:{1:3d}, classification loss: {2:.3f}, ' +\
'walker loss: {3:.3f}, visit loss: {4:.4f}, ' +\
'total loss: {5:.3f}, lr: {6:.6f}'
logs, val_logs = [], []
i = 0 # iteration
for e in range(NUM_EPOCHS):
model.train()
for (x_source, y_source), (x_target, _) in zip(source_loader, target_loader):
x_source = x_source.to(DEVICE)
x_target = x_target.to(DEVICE)
y_source = y_source.to(DEVICE)
x = torch.cat([x_source, x_target], dim=0)
embeddings = embedder(x)
a, b = torch.split(embeddings, BATCH_SIZE, dim=0)
logits = classifier(a)
usual_loss = cross_entropy(logits, y_source)
walker_loss, visit_loss = association(a, b, y_source)
if i > DELAY:
growth = torch.clamp(torch.tensor((i - DELAY)/GROWTH_STEPS).to(DEVICE), 0.0, 1.0)
loss = usual_loss + growth * (BETA1 * walker_loss + BETA2 * visit_loss)
else:
loss = usual_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i > DELAY:
scheduler.step()
lr = scheduler.get_lr()[0]
log = (e, i, usual_loss.item(), walker_loss.item(), visit_loss.item(), loss.item(), lr)
print(text.format(*log))
logs.append(log)
i += 1
result1 = evaluate(model, cross_entropy, val_svhn_loader, DEVICE)
result2 = evaluate(model, cross_entropy, val_mnist_loader, DEVICE)
print('\nsvhn loss {0:.3f} and accuracy {1:.3f}'.format(*result1))
print('mnist loss {0:.3f} and accuracy {1:.3f}\n'.format(*result2))
val_logs.append((i,) + result1 + result2)
torch.save(model.state_dict(), SAVE_PATH)
write_logs(logs, val_logs, LOGS_PATH)
train_and_evaluate()
|
TropComplique/associative-domain-adaptation
|
train.py
|
train.py
|
py
| 4,289 |
python
|
en
|
code
| 7 |
github-code
|
6
|
5707554791
|
'''
activation_key module
'''
from dataclasses import dataclass
from sqlalchemy import Integer, Column, String, ForeignKey
from databases.models.user import User
from config.db import db
@dataclass
class ActivationKey(db.Model): # pylint: disable=too-few-public-methods
'''
activation_key model class
'''
id: int # pylint: disable=C0103
hash_key: str
user_id: int
__tablename__ = 'activation_key'
id = Column(Integer, primary_key=True)
hash_key = Column(String(255), unique=True, nullable=False)
user_id = Column(Integer, ForeignKey(User.id), nullable=False)
def __repr__(self):
return f'ActivationKey {self.id}'
|
Dolzhenkov-Andrii/api
|
databases/models/activation_key.py
|
activation_key.py
|
py
| 681 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12569604796
|
from rest_framework import permissions
from rest_framework.permissions import BasePermission
# this class will findout if the user has permission to delete or update the post , to check if logged in user and post owner is same or not
class IsOwnerOrReadOnly(BasePermission):
message = "you must be the owner of this post "
def has_object_permission(self, request, view, obj):
my_safe_method = ['PUT']
print(request.user.is_staff)
print(request.user.is_superuser)
if request.method in my_safe_method:
return True
return request.user.is_superuser or obj.author == request.user
|
Maniabhishek/ContentManagementSystem
|
appcms/api/permissions.py
|
permissions.py
|
py
| 639 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30323378836
|
#
#
#
import openpyxl
import datetime
import pandas as pd
from Syne_TestReportMapping import EmpId_Name_Mapping
def Emp_Syne_Client_Mapping(inputFormat,empID):
Emp_Syne_Client_Mapping={}
Emp_Syne_Client_Mapping[empID]=EmpID_Mapping(inputFormat,int(empID))
return Emp_Syne_Client_Mapping
def EmpID_Mapping(inputFormat, empID):
Map_Syne_UserName = "SYNECHRON USER NAME"
Map_Client_UserName = "CLIENT USER NAME"
Emp_Name_Mapping = {}
dfFormatSyne = pd.read_excel(inputFormat, "Name_UserId_Mapping")
df_new = dfFormatSyne[(dfFormatSyne['EMP ID'] == empID)]
Syne_UserName_List = [x for x in df_new['SYNECHRON USER NAME'].tolist() if isinstance(x, str)]
Client_UserName_List = [x for x in df_new['CLIENT USER NAME'].tolist() if isinstance(x, str)]
for syne_Name, Client_Name in zip(Syne_UserName_List,Client_UserName_List):
Emp_Name_Mapping[Map_Syne_UserName]=syne_Name
Emp_Name_Mapping[Map_Client_UserName]=Client_Name
return Emp_Name_Mapping
|
Aditi9109/TimeSheet
|
Emp_Syne_ClientMapping.py
|
Emp_Syne_ClientMapping.py
|
py
| 1,013 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11779793970
|
import os
from scripts.util import read_supertopics, SuperTopic, get_spottopics, DateFormat, read_temp_dist, smooth
import numpy as np
from plotly.subplots import make_subplots
import plotly.graph_objects as go
BOOST = ['raw', # 0
'retweets', # 1
'replies', # 2
'likes', # 3
'retweets_likes', # 4
'replies_likes', # 5
'retweets_replies', # 6
'retweets_likes_replies' # 7
][0]
FILE_SUPERTOPICS = f'data/climate2/topics_big2/supertopics.csv'
FILES_TEMP_DIST = {
'keep (majority)': f'data/climate2/topics_big2/temporal_keep_majority/daily/temporal_daily_{BOOST}_abs.json',
'fresh (majority)': f'data/climate2/topics_big2/temporal_fresh_majority/daily/temporal_daily_{BOOST}_abs.json'
}
FILE_TEMP_DIST = FILES_TEMP_DIST[['keep (majority)', 'fresh (majority)'][0]]
BOUNDARY = '2020-03-01'
SMOOTHING = 90
EPS = 1e-12
annotations = read_supertopics(FILE_SUPERTOPICS)
td_groups, td_topics, td_counts = read_temp_dist(FILE_TEMP_DIST)
supertopic_counts = []
st_summed_counts = []
st_topic_counts = []
for st in SuperTopic:
t_counts = td_counts.T[annotations[:, st] > 0].sum(axis=0)
supertopic_counts.append(t_counts)
print(st.name, f'{t_counts.sum():,}')
st_summed_counts.append(t_counts.sum())
st_topic_counts.append(sum(annotations[:, st] > 0))
supertopic_counts = np.array(supertopic_counts)
BOUND = td_groups.index(BOUNDARY)
sts_plot = [SuperTopic.COVID, SuperTopic.Causes, SuperTopic.Impacts, SuperTopic.Solutions,
SuperTopic.POLITICS, SuperTopic.Movements, SuperTopic.Contrarian,
# SuperTopic.Other, # SuperTopic.Interesting, SuperTopic.NotRelevant
]
tweets_per_day = np.sum(td_counts, axis=1)
tweets_per_topic = np.sum(td_counts, axis=0)
st_plot_counts = supertopic_counts[sts_plot]
st_plot_shares = st_plot_counts / tweets_per_day
st_plot_shares_smooth = smooth(st_plot_shares, kernel_size=SMOOTHING)
subplot_titles = [
f'{st.name}: {sum(annotations[:, st] > 0):,} topics with {int(st_summed_counts[sti]):,} tweets'
for sti, st in enumerate(sts_plot)
]
os.makedirs('data/climate2/figures/supertopic_shares_split/', exist_ok=True)
for i, st in enumerate(sts_plot, start=1):
fig = go.Figure(layout={'title': {'text': subplot_titles[i - 1]}})
n_st_tweets = td_counts.T[annotations[:, st] > 0].T
n_st_tweets_per_day = n_st_tweets.sum(axis=1)
subfig = []
subfig_y = smooth(n_st_tweets.T / (n_st_tweets_per_day + EPS), kernel_size=SMOOTHING)
topic_nums = np.arange(annotations.shape[0])[annotations[:, st] > 0]
for ti, (y_, yt) in enumerate(zip(subfig_y, n_st_tweets.T)):
fig.add_trace(go.Scatter(x=td_groups,
y=y_,
mode='lines',
stackgroup='one',
name=f'Topic {topic_nums[ti]} ({int(yt.sum()):,} tweets)'))
fig.update_layout(height=1000, width=1000)
fig.write_html(f'data/climate2/figures/supertopic_shares_split/supertopic_{st.name}.html')
os.makedirs('data/climate2/figures/supertopic_abs_split/', exist_ok=True)
for i, st in enumerate(sts_plot, start=1):
fig = go.Figure(layout={'title': {'text': subplot_titles[i - 1]}})
n_st_tweets = td_counts.T[annotations[:, st] > 0].T
n_st_tweets_per_day = n_st_tweets.sum(axis=1)
subfig_y = smooth(n_st_tweets.T, kernel_size=SMOOTHING)
topic_nums = np.arange(annotations.shape[0])[annotations[:, st] > 0]
for ti, (y_, yt) in enumerate(zip(subfig_y, n_st_tweets.T)):
fig.add_trace(go.Scatter(x=td_groups,
y=y_,
mode='lines',
stackgroup='one',
name=f'Topic {topic_nums[ti]} ({int(yt.sum()):,} tweets)'))
fig.update_layout(height=1000, width=1000)
fig.write_html(f'data/climate2/figures/supertopic_abs_split/supertopic_{st.name}.html')
|
TimRepke/twitter-climate
|
code/figures/supertopics/stacked_area_charts_interactive_separate.py
|
stacked_area_charts_interactive_separate.py
|
py
| 3,987 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3423610291
|
import json
from functools import wraps
from flask import request
from flask.ext.restful import reqparse, Api, Resource
from api_json_example import app
# Database? We don't need no stinkin database
db = {}
api = Api(app)
def accept_json(func):
"""
Decorator which returns a 406 Not Acceptable if the client won't accept JSON
"""
@wraps(func)
def wrapper(*args, **kwargs):
accept = api.mediatypes()
if "*/*" in accept or "application/json" in accept:
return func(*args, **kwargs)
return {"message": "Request must accept JSON"}, 406
return wrapper
def require_json(func):
"""
Decorator which returns a 415 Unsupported Media Type if the client sends
something other than JSON
"""
@wraps(func)
def wrapper(*args, **kwargs):
if request.mimetype == "application/json":
return func(*args, **kwargs)
return {"message": "Request must contain JSON"}, 415
return wrapper
class User(Resource):
"""
A simple RESTful API for a user
"""
parser = reqparse.RequestParser()
method_decorators = [accept_json]
def get(self, id):
if not id in db:
return {"message": "User not found"}, 404
return db[id], 200
@require_json
def put(self, id):
args = User.parser.parse_args()
# Validate arguments
if args["name"] and not isinstance(args["name"], basestring):
return {"message": "Name must be a string"}, 422
if args["email"] and not isinstance(args["email"], basestring):
return {"message": "Email address must be a string"}, 422
if (args["roles"] and
not all(isinstance(role, basestring) for role in args["roles"])):
return {"message": "Roles must be a strings"}, 422
if id in db:
# Edit user
# SMELL: Merging could be nicer
if args["name"]:
db[id]["name"] = args["name"]
if args["email"]:
db[id]["email"] = args["email"]
if args["roles"]:
db[id]["roles"] = args["roles"]
return db[id], 201, {"Location": "/api/user/{}".format(id)}
else:
# Create new user
if not args["name"] or not args["email"]:
return {"message": "Must provide name and email"}, 422
db[id] = {
"name": args["name"],
"email": args["email"]
}
if args["roles"]:
db[id]["roles"] = args["roles"]
else:
db[id]["roles"] = []
return db[id], 200
User.parser.add_argument("name", type=str, location="get_json")
User.parser.add_argument("email", type=str, location="get_json")
User.parser.add_argument("roles", type=list, location="get_json")
api.add_resource(User, "/api/user/<int:id>")
|
sjl421/thinkful-python-code-examples
|
flask/api_json_example/api_json_example/api.py
|
api.py
|
py
| 2,931 |
python
|
en
|
code
| null |
github-code
|
6
|
6194000945
|
import os
import json
from dotenv import load_dotenv
load_dotenv()
chinput = os.getenv('CHATINPUT')
chinput = '-1001799753250 -1001574745581 -1001322515232 -1001725353361'
channel_input = [int(i) for i in chinput.split(' ')]
choutput = os.getenv('CHATOUTPUT')
choutput = '-1001802541407'
channel_output = [int(i) for i in choutput.split(' ')]
REDIS_URL = os.getenv('REDIS_URL')
session = os.getenv("SESSION")
api_hash = os.getenv("API_HASH")
api_id = os.getenv("API_ID")
sentry_env = os.getenv("SENTRY_ENV")
|
Lj6890/Forwarded
|
config.py
|
config.py
|
py
| 513 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37431367661
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 14:32:30 2020
@author: zjerma1
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
from mlxtend.evaluate import bias_variance_decomp
#data for final plot
x_plot = np.linspace(0,1,num=10)
y_plot = np.sin(2*np.pi*x_plot)
j = 0
#y_avg = np.array[100]
test_error = []
bias = []
variance = []
reg_parameter = []
bias_variance = []
for i in range(8):
reg_parameter.append(10**i)
reg_parameter.append(10**(-i))
reg_parameter.sort()
print(reg_parameter)
for j in reg_parameter:
error_holder = 0
bias_holder = 0
variance_holder = 0
#generate training data
x_data = np.linspace(0,1,num=10).reshape(-1,1)
y_data = np.sin(2.0*np.pi*x_plot) + .1*np.random.randn(10)
#print(x_plot)
#print("\n")
#print(y_data)
#add polynomials to the model
poly_features = PolynomialFeatures(degree=9,include_bias=False)
#print(poly_features)
#x_data is extened by including its powers
x_data_poly = poly_features.fit_transform(x_data)
#print(x_data_poly)
#Ridge regression or Tikhonov regularizaiton
ridge_reg = Ridge(alpha = j ,solver="cholesky")
#fit the extended data set
ridge_reg.fit(x_data_poly,y_data)
#generate the test data set
x_new = np.linspace(0,1,num = 100).reshape(100,1)
x_new_poly = poly_features.transform(x_new)
#print(x_new)
#print('\n')
#print(x_new_poly)
#prediction on the test data set
y_new = ridge_reg.predict(x_new_poly)
error_holder, bias_holder, variance_holder = bias_variance_decomp(ridge_reg, x_data, y_data, x_new, y_new, loss = 'mse') #bias-variance decomp
test_error.append(error_holder)
bias.append(bias_holder)
variance.append(variance_holder)
for j in range(len(bias)):
bias[j] = bias[j]**2
for j in range(len(bias)):
bias_variance.append(bias[j] + variance[j])
print(test_error)
print(bias_variance)
plt.plot(reg_parameter, test_error, label = 'test error')
plt.plot(reg_parameter, bias, label = 'bias')
plt.plot(reg_parameter,variance, label = 'variance')
plt.plot(reg_parameter, bias_variance, label = 'bias + variance')
plt.xscale('log')
plt.legend()
plt.show
|
zjermain/Math-7390-Machine-Learning-Jermain
|
Homework 2-Bias Variance Decomp.py
|
Homework 2-Bias Variance Decomp.py
|
py
| 2,428 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14594650315
|
import tensorflow as tf
import pathlib
import os
import cv2
import numpy as np
import tqdm
import argparse
class TFRecordsGAN:
def __init__(self,
image_dir="/volumes2/datasets/horse2zebra/trainA",
tfrecord_path="data.tfrecords",
img_pattern="*.jpgg"):
"""
:param data_dir: the path to iam directory containing the subdirectories of xml and lines from iam dataset
:param tfrecord_path:
"""
self.image_dir = image_dir
self.tfrecord_path = tfrecord_path
self.img_pattern = img_pattern
self.image_feature_description = \
{
'image': tf.io.FixedLenFeature([], tf.string)
}
@staticmethod
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
@staticmethod
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
@staticmethod
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _parse_example_function(self, example_proto):
# Parse the input tf.Example proto using the dictionary above.
return tf.io.parse_example(example_proto, self.image_feature_description)
def image_example(self, image_string):
feature = {
'image': self._bytes_feature(image_string)
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def write_tfrecords(self, training=False, dataset_name=""):
img_paths = sorted(pathlib.Path(self.image_dir).rglob(self.img_pattern))
with tf.io.TFRecordWriter(self.tfrecord_path) as writer:
for img_path in tqdm.tqdm(img_paths):
img_string = open(str(img_path), 'rb').read()
tf_example = self.image_example(img_string)
writer.write(tf_example.SerializeToString())
if training:
import json
if os.path.exists('{}/data_samples.json'.format(os.path.dirname(self.tfrecord_path))):
with open('{}/data_samples.json'.format(os.path.dirname(self.tfrecord_path))) as f:
data = json.load(f)
if dataset_name in list(data.keys()):
print("Dataset {} value was already present but value was updated".format(dataset_name))
else:
data = {}
data[dataset_name] = len(img_paths)
with open('{}/data_samples.json'.format(os.path.dirname(self.tfrecord_path)), 'w') as json_file:
json.dump(data, json_file)
def decode_strings(self, record):
images = tf.io.decode_jpeg(record['image'], 3)
return images
def read_tfrecords(self):
"""
Read iam tfrecords
:return: Returns an image
"""
raw_dataset = tf.data.TFRecordDataset(self.tfrecord_path)
parsed_dataset = raw_dataset.map(self._parse_example_function)
decoded_dataset = parsed_dataset.map(self.decode_strings)
return decoded_dataset
if __name__ == "__main__":
args = argparse.ArgumentParser(description="Create tfrecords with the following settings")
args.add_argument("-d", "--dataset", type=str, default=f"no_name_{str(np.random.randint(0, 20000))}",
help="Name a dataset to be later used with seg_train script, highly recommended to have one")
args.add_argument("--img_dir", "-i", type=str, required=True, help="Directory containing the dataset images")
args.add_argument("--save_dir", "-s", type=str, required=True, help="Directory to save the tfrecords")
args.add_argument("--img_pat", "-i_p", type=str, default="*.jpg", help="Image pattern/extension in directory, "
"glob regex convention")
args.add_argument("--visualize", "-v", action="store_true", help="Show 4 samples after creation. As visual check.")
args.add_argument("--eval", "-e", action="store_true", help="Set to true in case the records are for evaluation")
args = args.parse_args()
dataset_name = args.dataset
os.makedirs(args.save_dir, exist_ok=True)
record_type = "train" if not args.eval else "val"
records = TFRecordsGAN(image_dir=f"{args.img_dir}",
tfrecord_path=f"{args.save_dir}/{dataset_name}_{record_type}.tfrecords",
img_pattern=args.img_pat)
records.write_tfrecords(training=True, dataset_name=dataset_name) if not args.eval else records.write_tfrecords()
if args.visualize:
image_dataset = records.read_tfrecords().batch(1).take(4)
cv2.namedWindow("img", 0)
for image_features in image_dataset:
img = image_features[0, ..., ::-1]
cv2.imshow("img", img.numpy())
cv2.waitKey()
|
AhmedBadar512/Badr_AI_Repo
|
utils/create_gan_tfrecords.py
|
create_gan_tfrecords.py
|
py
| 5,294 |
python
|
en
|
code
| 2 |
github-code
|
6
|
21043710164
|
from datetime import datetime, time
from typing import Dict
# getting info from administrator
def getting_payload() -> Dict:
"""
this function takes nothing and return a dictionary of the users answers
:return: id, company_name, departure_time, arrival_time
"""
temp_id = int(input("Enter the new plane ID(int):\n"))
temp_company_name = input("Enter the company name:\n")
# making sure time is in the correct format
try:
temp_departure_time = input("Enter the departure time => (hh:mm:ss) example, (14:05:20):\n")
temp_departure_time = datetime.strptime(temp_departure_time, "%H:%M:%S").time()
except ValueError:
print("You have entered time in the wrong Format, the system will assign the time to\n"
"1:1:1 instead. Feel free to modify the departure time in plane modification")
temp_departure_time = time(1, 1, 1)
# making sure time is in the correct format
try:
temp_arrival_time = input("Enter the arrival time => (hh:mm:ss) example, (14:05:20):\n")
temp_arrival_time = datetime.strptime(temp_arrival_time, "%H:%M:%S").time()
except ValueError:
print("You have entered time in the wrong Format, the system will assign the time to\n"
"1:1:1 instead. Feel free to modify the arrival time in plane modification")
temp_arrival_time = time(1, 1, 1)
return {
"id": temp_id,
"company_name": temp_company_name,
"departure_time": temp_departure_time,
"arrival_time": temp_arrival_time
}
|
Mohamad-Hachem/Airplane_Booking_System
|
utils/getting_airplane_information.py
|
getting_airplane_information.py
|
py
| 1,567 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21617134732
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: GPL-3.0
#
# GNU Radio Python Flow Graph
# Title: GPS Simulator Playback - 10Msps
# Author: Damien Dusha
# GNU Radio version: 3.8.1.0
from gnuradio import analog
from gnuradio import blocks
import pmt
from gnuradio import gr
from gnuradio.filter import firdes
import sys
import signal
from argparse import ArgumentParser
from gnuradio.eng_arg import eng_float, intx
from gnuradio import eng_notation
from gnuradio import uhd
import time
class gnss_sim_playback_10MHz_nogui(gr.top_block):
def __init__(self):
gr.top_block.__init__(self, "GPS Simulator Playback - 10Msps")
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 10e6
self.freq = freq = 1575.42e6
##################################################
# Blocks
##################################################
self.uhd_usrp_sink_0 = uhd.usrp_sink(
",".join(("", "")),
uhd.stream_args(
cpu_format="fc32",
args='',
channels=list(range(0,1)),
),
'',
)
self.uhd_usrp_sink_0.set_clock_source('external', 0)
self.uhd_usrp_sink_0.set_center_freq(freq, 0)
self.uhd_usrp_sink_0.set_gain(0, 0)
self.uhd_usrp_sink_0.set_antenna('TX/RX', 0)
self.uhd_usrp_sink_0.set_samp_rate(samp_rate)
# No synchronization enforced.
self.blocks_multiply_const_vxx_1_1 = blocks.multiply_const_cc(1.0)
self.blocks_multiply_const_vxx_1_0 = blocks.multiply_const_cc(0.25)
self.blocks_multiply_const_vxx_1 = blocks.multiply_const_cc(1.0 / (2**12))
self.blocks_interleaved_short_to_complex_0 = blocks.interleaved_short_to_complex(False, False)
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_short*1, '/home/damien/bitbucket/gnss-sim/gpssim.bin', False, 0, 0)
self.blocks_file_source_0.set_begin_tag(pmt.PMT_NIL)
self.blocks_add_xx_0 = blocks.add_vcc(1)
self.analog_noise_source_x_0 = analog.noise_source_c(analog.GR_GAUSSIAN, 1.0, 0)
##################################################
# Connections
##################################################
self.connect((self.analog_noise_source_x_0, 0), (self.blocks_add_xx_0, 1))
self.connect((self.blocks_add_xx_0, 0), (self.blocks_multiply_const_vxx_1_0, 0))
self.connect((self.blocks_file_source_0, 0), (self.blocks_interleaved_short_to_complex_0, 0))
self.connect((self.blocks_interleaved_short_to_complex_0, 0), (self.blocks_multiply_const_vxx_1, 0))
self.connect((self.blocks_multiply_const_vxx_1, 0), (self.blocks_multiply_const_vxx_1_1, 0))
self.connect((self.blocks_multiply_const_vxx_1_0, 0), (self.uhd_usrp_sink_0, 0))
self.connect((self.blocks_multiply_const_vxx_1_1, 0), (self.blocks_add_xx_0, 0))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.uhd_usrp_sink_0.set_samp_rate(self.samp_rate)
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
self.uhd_usrp_sink_0.set_center_freq(self.freq, 0)
def main(top_block_cls=gnss_sim_playback_10MHz_nogui, options=None):
tb = top_block_cls()
def sig_handler(sig=None, frame=None):
tb.stop()
tb.wait()
sys.exit(0)
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
tb.start()
tb.wait()
if __name__ == '__main__':
main()
|
damiendusha/gnss-sim
|
gnuradio/gnss_sim_playback_10MHz_nogui.py
|
gnss_sim_playback_10MHz_nogui.py
|
py
| 3,767 |
python
|
en
|
code
| 3 |
github-code
|
6
|
24526724853
|
import json
from arXivo.models import ArXivoUser
from arXivo.serializers import ArXivoUserSerializer, SearchSerializer
from arXivo.utils import get_tokens_for_user
from django.conf import settings
from django.contrib.auth import authenticate
from django.http.response import JsonResponse
from django.middleware import csrf
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_simplejwt.tokens import RefreshToken
class RegisterView(APIView):
serializer_class = ArXivoUserSerializer
def post(self, request, format=None):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save()
csrf.get_token(request)
return JsonResponse(
{"message": "User Created Successfully"}, status=status.HTTP_200_OK
)
else:
return JsonResponse(
{"message": "There was an error!", "error": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
class LoginView(APIView):
def post(self, request, format=None):
data = request.data
response = Response()
username = data.get("username", None)
user = ArXivoUser.objects.filter(username=username)
if not user.exists():
return Response(
{"error": "Username does not exist!!"},
status=status.HTTP_403_FORBIDDEN,
)
password = data.get("password", None)
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
data = get_tokens_for_user(user)
response.set_cookie(
key=settings.SIMPLE_JWT["AUTH_COOKIE"],
value=data["access"],
expires=settings.SIMPLE_JWT["ACCESS_TOKEN_LIFETIME"],
secure=settings.SIMPLE_JWT["AUTH_COOKIE_SECURE"],
httponly=settings.SIMPLE_JWT["AUTH_COOKIE_HTTP_ONLY"],
samesite=settings.SIMPLE_JWT["AUTH_COOKIE_SAMESITE"],
)
response.set_cookie(
key="refresh_token",
value=data["refresh"],
expires=settings.SIMPLE_JWT["REFRESH_TOKEN_LIFETIME"],
secure=settings.SIMPLE_JWT["AUTH_COOKIE_SECURE"],
httponly=settings.SIMPLE_JWT["AUTH_COOKIE_HTTP_ONLY"],
samesite=settings.SIMPLE_JWT["AUTH_COOKIE_SAMESITE"],
)
csrf.get_token(request)
response.data = {"message": "Login successfully", "data": data}
return response
else:
return Response(
{"error": "This account is not active!!"},
status=status.HTTP_403_FORBIDDEN,
)
else:
return Response(
{"error": "Invalid Password!!"},
status=status.HTTP_403_FORBIDDEN,
)
class RefreshView(APIView):
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
refresh = RefreshToken(request.COOKIES.get("refresh_token"))
response = Response(status=status.HTTP_200_OK)
response.set_cookie(
key=settings.SIMPLE_JWT["AUTH_COOKIE"],
value=refresh.access_token,
expires=settings.SIMPLE_JWT["ACCESS_TOKEN_LIFETIME"],
secure=settings.SIMPLE_JWT["AUTH_COOKIE_SECURE"],
httponly=settings.SIMPLE_JWT["AUTH_COOKIE_HTTP_ONLY"],
samesite=settings.SIMPLE_JWT["AUTH_COOKIE_SAMESITE"],
)
response.set_cookie(
key=settings.SIMPLE_JWT["REFRESH_COOKIE"],
value=str(refresh),
expires=settings.SIMPLE_JWT["REFRESH_TOKEN_LIFETIME"],
secure=settings.SIMPLE_JWT["AUTH_COOKIE_SECURE"],
httponly=settings.SIMPLE_JWT["AUTH_COOKIE_HTTP_ONLY"],
samesite=settings.SIMPLE_JWT["AUTH_COOKIE_SAMESITE"],
)
response.data = {"message": "Tokens Refreshed Successfully"}
return response
class LogoutView(APIView):
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
response = Response(status=status.HTTP_200_OK)
response.delete_cookie(settings.SIMPLE_JWT["AUTH_COOKIE"])
response.delete_cookie(settings.SIMPLE_JWT["REFRESH_COOKIE"])
response.delete_cookie(settings.SIMPLE_JWT["CSRF_COOKIE"])
response.data = {"message": "Logged Out Successfully"}
return response
class SearchView(APIView):
permission_classes = [IsAuthenticated]
serializer_class = SearchSerializer
def post(self, request, format=None):
users = ArXivoUser.objects.filter(
username__icontains=request.data["search_term"]
)
serializer = self.serializer_class(users, many=True)
return JsonResponse({"data": serializer.data}, status=status.HTTP_200_OK)
class GetNotificationView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
notif_data = request.user.notification_array
notif_pyobj = json.loads(notif_data)["data"]
for _notif in notif_pyobj:
_notif["seen"] = True
request.user.notification_array = json.dumps({"data": notif_pyobj})
request.user.save()
return JsonResponse(notif_data, status=status.HTTP_200_OK, safe=False)
class SendNotificationView(APIView):
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
other_user = ArXivoUser.objects.get(username=request.data["send_to"])
notif_data = {
"filename": request.data["filename"],
"address": request.data["address"],
"key": request.data["key"],
"file_type": request.data["file_type"],
"seen": False,
"sender": request.user.username,
}
prev_data = json.loads(other_user.notification_array)
prev_data["data"].append(notif_data)
other_user.notification_array = json.dumps(prev_data)
other_user.save()
data = {"reponse": "good_response"}
return JsonResponse(data, status=status.HTTP_200_OK)
|
DebadityaPal/arXivo
|
backend/arXivo/views.py
|
views.py
|
py
| 6,438 |
python
|
en
|
code
| 1 |
github-code
|
6
|
23715644007
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from event.models import Event
# Create your models here.
class Episode(models.Model):
event = models.ForeignKey(
Event,
on_delete=models.CASCADE,
related_name='episode')
session_id = models.CharField(max_length=200)
archive_id = models.CharField(max_length=200)
|
emilarran/channelshowdown
|
channelshowdown/livestream/models.py
|
models.py
|
py
| 396 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30861890052
|
#!/usr/bin/env python
# Given an integer k and a string s, find the length of the longest substring
# that contains at most k distinct characters.
#
# For example, given s = "abcba" and k = 2, the longest substring with k distinct
# characters is "bcb".
def longest_substr(s, k):
chars_met = 0
chars = [0] * 26
substr = ""
tmp = ""
for i in range(0, len(s)):
index = ord(s[i]) - ord('a')
if chars_met < k:
substr += s[i]
if chars[index] == 0:
chars[index] = 1
chars_met += 1
elif chars_met == k and chars[index] == 1:
substr += s[i]
else:
break
if len(s[1:]) > len(substr):
tmp = longest_substr(s[1:], k)
return substr if len(substr) > len(tmp) else tmp
assert longest_substr("abcba", 2) == "bcb"
assert longest_substr("abcdeff", 2) == "eff"
assert longest_substr("abcdffjtheef", 5) == "ffjtheef"
print("[+] All tests done.")
|
mdolmen/daily_coding_problem
|
012-longest-substring/solution.py
|
solution.py
|
py
| 987 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35395868914
|
from django.db import models
from django.urls import reverse
from phone_field import PhoneField
# Create your models here.
class Department(models.Model):
"""Отдел компании"""
name = models.CharField(max_length=200, verbose_name="Название отдела")
class Meta:
db_table = 'department'
ordering = ['name']
def __str__(self):
return self.name
class Employee(models.Model):
"""Сотрудник компании"""
first_name = models.CharField(max_length=50, verbose_name='Имя')
middle_name = models.CharField(max_length=50, verbose_name='Отчество')
last_name = models.CharField(max_length=50, verbose_name='Фамилия')
birthday = models.DateField(verbose_name='Дата рождения')
email = models.EmailField(verbose_name='e-mail')
phone = PhoneField(verbose_name='Телефон')
begin_work = models.DateField(verbose_name='Начало работы')
end_work = models.DateField(
blank=True,
null=True,
help_text='Введите дату увольнения сотрудника',
verbose_name='Окончание работы'
)
position = models.CharField(max_length=200, verbose_name='Должность')
department = models.ForeignKey(
Department,
on_delete=models.SET_NULL,
blank=True,
null=True,
verbose_name='Отдел')
class Meta:
db_table = 'employee'
ordering = ['last_name', 'first_name', 'middle_name']
def get_absolute_url(self):
return reverse('employee-detail', args=[str(self.id)])
def display_last_name(self):
return '{0} {1} {2}'.format(self.last_name, self.first_name, self.middle_name)
display_last_name.short_description = 'Ф.И.О.'
def __str__(self):
return self.display_last_name()
|
zarmoose/eastwood_test
|
employees/models.py
|
models.py
|
py
| 1,894 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26214436184
|
import random
when = ["a few years ago", "once upon a time", "last night", "a long time ago", "yesterday"]
who = [" rabbit", "squirrel", "turtle", "dog", "cat"]
name = ["Ali", "Stan", "Tanisha", "Sehej", "Ram"]
where = ["India", "Germany", "Italy", "Romania"]
went = ["school", "seminar", "class", "laundry", "restraunt"]
happened = ["made new friends", "wrote a book", "had dinner", "did chores"]
print(random.choice(when)+ " , " + random.choice(who) + " named " + random.choice(name) + " lived in " + random.choice(where) + " went to a " +random.choice(went) + " and " + random.choice(happened) )
|
sehej3/Random-Story-Generator
|
randomStoryGenerator.py
|
randomStoryGenerator.py
|
py
| 613 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35368433605
|
import sys
def priority(item):
p = ord(item) - ord("a") + 1
if p > 0 and p <= 26:
return p
return ord(item) - ord("A") + 27
total = 0
for line in sys.stdin:
rucksack = line.rstrip()
compartment_size = len(rucksack)
seen = set()
for i, item in enumerate(rucksack):
if i < compartment_size / 2:
seen.add(item)
else:
if item in seen:
total += priority(item)
print(priority(item))
break
print(total)
|
tynovsky/advent-of-code-2022
|
03a.py
|
03a.py
|
py
| 523 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30354483311
|
import sys
import vtk
from vtk.util import vtkConstants
try:
from vtk.util import numpy_support
except ImportError:
numpy_support = None
import numpy
# Enthought library imports.
try:
from tvtk.array_ext import set_id_type_array
HAS_ARRAY_EXT = True
except ImportError:
HAS_ARRAY_EXT = False
# Useful constants for VTK arrays.
VTK_ID_TYPE_SIZE = vtk.vtkIdTypeArray().GetDataTypeSize()
if VTK_ID_TYPE_SIZE == 4:
ID_TYPE_CODE = numpy.int32
elif VTK_ID_TYPE_SIZE == 8:
ID_TYPE_CODE = numpy.int64
VTK_LONG_TYPE_SIZE = vtk.vtkLongArray().GetDataTypeSize()
if VTK_LONG_TYPE_SIZE == 4:
LONG_TYPE_CODE = numpy.int32
ULONG_TYPE_CODE = numpy.uint32
elif VTK_LONG_TYPE_SIZE == 8:
LONG_TYPE_CODE = numpy.int64
ULONG_TYPE_CODE = numpy.uint64
BASE_REFERENCE_COUNT = vtk.vtkObject().GetReferenceCount()
def getbuffer(array):
return getattr(numpy, 'getbuffer', memoryview)(array)
def set_id_type_array_py(id_array, out_array):
"""Given a 2D Int array (`id_array`), and a contiguous 1D numarray array
(`out_array`) having the correct size, this function sets the data from
`id_array` into `out_array` so that it can be used in place of a
`vtkIdTypeArray` in order to set the cells of a `vtkCellArray`.
Note that if `shape = id_array.shape` then `size(out_array) ==
shape[0]*(shape[1] + 1)` should be true. If not you'll get an
`AssertionError`.
`id_array` need not be contiguous but `out_array` must be.
"""
assert numpy.issubdtype(id_array.dtype, numpy.signedinteger)
assert out_array.flags.contiguous == 1, \
"out_array must be contiguous."
shp = id_array.shape
assert len(shp) == 2, "id_array must be a two dimensional array."
sz = out_array.size
e_sz = shp[0]*(shp[1]+1)
assert sz == e_sz, \
"out_array size is incorrect, expected: %s, given: %s" % (e_sz, sz)
# we are guaranteed contiguous, so these just change the view (no copy)
out_shp = out_array.shape
out_array.shape = (shp[0], shp[1] + 1)
out_array[:, 0] = shp[1]
out_array[:, 1:] = id_array
out_array.shape = out_shp
if not HAS_ARRAY_EXT:
set_id_type_array = set_id_type_array_py
######################################################################
# The array cache.
######################################################################
class ArrayCache(object):
"""Caches references to numpy arrays that are not copied but views
of which are converted to VTK arrays. The caching prevents the user
from deleting or resizing the numpy array after it has been sent
down to VTK. The cached arrays are automatically removed when the
VTK array destructs."""
######################################################################
# `object` interface.
######################################################################
def __init__(self):
# The cache.
self._cache = {}
def __len__(self):
return len(self._cache)
def __contains__(self, vtk_arr):
key = vtk_arr.__this__
return key in self._cache
######################################################################
# `ArrayCache` interface.
######################################################################
def add(self, vtk_arr, np_arr):
"""Add numpy array corresponding to the vtk array to the
cache."""
key = vtk_arr.__this__
cache = self._cache
# Setup a callback so this cached array reference is removed
# when the VTK array is destroyed. Passing the key to the
# `lambda` function is necessary because the callback will not
# receive the object (it will receive `None`) and thus there
# is no way to know which array reference one has to remove.
vtk_arr.AddObserver(
'DeleteEvent', lambda o, e, key=key: self._remove_array(key)
)
# Cache the array
cache[key] = np_arr
def get(self, vtk_arr):
"""Return the cached numpy array given a VTK array."""
key = vtk_arr.__this__
return self._cache[key]
######################################################################
# Non-public interface.
######################################################################
def _remove_array(self, key):
"""Private function that removes the cached array. Do not
call this unless you know what you are doing."""
try:
del self._cache[key]
except KeyError:
pass
######################################################################
# Setup a global `_array_cache`. The array object cache caches all the
# converted numpy arrays that are not copied. This prevents the user
# from deleting or resizing the numpy array after it has been sent down
# to VTK.
######################################################################
_dummy = None
# This makes the cache work even when the module is reloaded.
for name in ['array_handler', 'tvtk.array_handler']:
if name in sys.modules:
mod = sys.modules[name]
if hasattr(mod, '_array_cache'):
_dummy = mod._array_cache
del mod
break
if _dummy:
_array_cache = _dummy
else:
_array_cache = ArrayCache()
del _dummy
def get_vtk_array_type(numeric_array_type):
"""Returns a VTK typecode given a numpy array."""
# This is a Mapping from numpy array types to VTK array types.
_arr_vtk = {
numpy.dtype('S'): vtkConstants.VTK_UNSIGNED_CHAR, # numpy.character
numpy.dtype(numpy.uint8): vtkConstants.VTK_UNSIGNED_CHAR,
numpy.dtype(numpy.uint16): vtkConstants.VTK_UNSIGNED_SHORT,
numpy.dtype(numpy.int8): vtkConstants.VTK_CHAR,
numpy.dtype(numpy.int16): vtkConstants.VTK_SHORT,
numpy.dtype(numpy.int32): vtkConstants.VTK_INT,
numpy.dtype(numpy.uint32): vtkConstants.VTK_UNSIGNED_INT,
numpy.dtype(numpy.uint64): vtkConstants.VTK_UNSIGNED_LONG,
numpy.dtype(numpy.float32): vtkConstants.VTK_FLOAT,
numpy.dtype(numpy.float64): vtkConstants.VTK_DOUBLE,
numpy.dtype(numpy.complex64): vtkConstants.VTK_FLOAT,
numpy.dtype(numpy.complex128): vtkConstants.VTK_DOUBLE,
}
_extra = {
numpy.dtype(ID_TYPE_CODE): vtkConstants.VTK_ID_TYPE,
numpy.dtype(ULONG_TYPE_CODE): vtkConstants.VTK_UNSIGNED_LONG,
numpy.dtype(LONG_TYPE_CODE): vtkConstants.VTK_LONG,
}
for t in _extra:
if t not in _arr_vtk:
_arr_vtk[t] = _extra[t]
try:
return _arr_vtk[numeric_array_type]
except KeyError:
for key in _arr_vtk:
if numpy.issubdtype(numeric_array_type, key):
return _arr_vtk[key]
raise TypeError(
"Couldn't translate array's type to VTK %s" % numeric_array_type
)
def get_vtk_to_numeric_typemap():
"""Returns the VTK array type to numpy array type mapping."""
_vtk_arr = {
vtkConstants.VTK_BIT: numpy.bool_,
vtkConstants.VTK_CHAR: numpy.int8,
vtkConstants.VTK_SIGNED_CHAR: numpy.int8,
vtkConstants.VTK_UNSIGNED_CHAR: numpy.uint8,
vtkConstants.VTK_SHORT: numpy.int16,
vtkConstants.VTK_UNSIGNED_SHORT: numpy.uint16,
vtkConstants.VTK_INT: numpy.int32,
vtkConstants.VTK_UNSIGNED_INT: numpy.uint32,
vtkConstants.VTK_LONG: LONG_TYPE_CODE,
vtkConstants.VTK_UNSIGNED_LONG: ULONG_TYPE_CODE,
vtkConstants.VTK_LONG_LONG: numpy.int64,
vtkConstants.VTK_ID_TYPE: ID_TYPE_CODE,
vtkConstants.VTK_FLOAT: numpy.float32,
vtkConstants.VTK_DOUBLE: numpy.float64
}
return _vtk_arr
def get_numeric_array_type(vtk_array_type):
"""Returns a numpy array typecode given a VTK array type."""
return get_vtk_to_numeric_typemap()[vtk_array_type]
def get_sizeof_vtk_array(vtk_array_type):
"""Returns the size of a VTK array type."""
_size_dict = {
vtkConstants.VTK_BIT: 1,
vtkConstants.VTK_CHAR: 1,
vtkConstants.VTK_SIGNED_CHAR: 1,
vtkConstants.VTK_UNSIGNED_CHAR: 1,
vtkConstants.VTK_SHORT: 2,
vtkConstants.VTK_UNSIGNED_SHORT: 2,
vtkConstants.VTK_INT: 4,
vtkConstants.VTK_UNSIGNED_INT: 4,
vtkConstants.VTK_LONG: VTK_LONG_TYPE_SIZE,
vtkConstants.VTK_UNSIGNED_LONG: VTK_LONG_TYPE_SIZE,
vtkConstants.VTK_LONG_LONG: 8,
vtkConstants.VTK_ID_TYPE: VTK_ID_TYPE_SIZE,
vtkConstants.VTK_FLOAT: 4,
vtkConstants.VTK_DOUBLE: 8
}
return _size_dict[vtk_array_type]
def create_vtk_array(vtk_arr_type):
"""Internal function used to create a VTK data array from another
VTK array given the VTK array type.
"""
tmp = vtk.vtkDataArray.CreateDataArray(vtk_arr_type)
# CreateDataArray sets the refcount to 3 and this causes a severe
# memory leak.
tmp.SetReferenceCount(BASE_REFERENCE_COUNT)
return tmp
def array2vtk(num_array, vtk_array=None):
"""Converts a real numpy Array (or a Python list) to a VTK array
object.
This function only works for real arrays. Complex arrays are NOT
handled. It also works for multi-component arrays. However, only
1, and 2 dimensional arrays are supported. This function is very
efficient, so large arrays should not be a problem.
Even in cases when no copy of the numpy array data is performed,
a reference to the array is cached. The passed array can
therefore be deleted safely in all circumstances.
Parameters
----------
- num_array : numpy array or Python list/tuple
The input array must be 1 or 2D. A copy of the numeric array
data passed is made in the following circumstances:
1. A Python list/tuple was passed.
2. A non-contiguous numpy array was passed.
3. A `vtkBitArray` instance was passed as the second argument.
4. The types of the `vtk_array` and the `num_array` are not
equivalent to each other. For example if one is an integer
array and the other a float.
- vtk_array : `vtkDataArray` (default: `None`)
If an optional `vtkDataArray` instance, is passed as an argument
then a new array is not created and returned. The passed array
is itself returned.
"""
z = numpy.asarray(num_array)
shape = z.shape
assert len(shape) < 3, \
"Only arrays of dimensionality 2 or lower are allowed!"
assert not numpy.issubdtype(z.dtype, numpy.complexfloating), \
"Complex numpy arrays cannot be converted to vtk arrays."\
"Use real() or imag() to get a component of the array before"\
" passing it to vtk."
# First create an array of the right type by using the typecode.
# Bit arrays need special casing.
bit_array = False
if vtk_array is None:
vtk_typecode = get_vtk_array_type(z.dtype)
result_array = create_vtk_array(vtk_typecode)
elif vtk_array.GetDataType() == vtkConstants.VTK_BIT:
vtk_typecode = vtkConstants.VTK_CHAR
result_array = create_vtk_array(vtkConstants.VTK_CHAR)
bit_array = True
else:
vtk_typecode = vtk_array.GetDataType()
result_array = vtk_array
# Find the shape and set number of components.
if len(shape) == 1:
result_array.SetNumberOfComponents(1)
else:
result_array.SetNumberOfComponents(shape[1])
result_array.SetNumberOfTuples(shape[0])
# Ravel the array appropriately.
arr_dtype = get_numeric_array_type(vtk_typecode)
if numpy.issubdtype(z.dtype, arr_dtype):
z_flat = numpy.ravel(z)
else:
z_flat = numpy.ravel(z).astype(arr_dtype)
# Point the VTK array to the numpy data. The last argument (1)
# tells the array not to deallocate.
result_array.SetVoidArray(getbuffer(z_flat), len(z_flat), 1)
if bit_array:
# Handle bit arrays -- they have to be copied. Note that bit
# arrays are used ONLY when the user has passed one as an
# argument to this function.
vtk_array.SetNumberOfTuples(result_array.GetNumberOfTuples())
vtk_array.SetNumberOfComponents(result_array.GetNumberOfComponents())
for i in range(result_array.GetNumberOfComponents()):
vtk_array.CopyComponent(i, result_array, i)
result_array = vtk_array
else:
# Save a reference to the flatted array in the array cache.
# This prevents the user from deleting or resizing the array
# and getting into serious trouble. This is only done for
# non-bit array cases where the data is not copied.
global _array_cache
_array_cache.add(result_array, z_flat)
return result_array
def vtk2array(vtk_array):
"""Converts a VTK data array to a numpy array.
Given a subclass of vtkDataArray, this function returns an
appropriate numpy array containing the same data. The function
is very efficient since it uses the VTK imaging pipeline to
convert the data. If a sufficiently new version of VTK (5.2) is
installed then it actually uses the buffer interface to return a
view of the VTK array in the returned numpy array.
Parameters
----------
- vtk_array : `vtkDataArray`
The VTK data array to be converted.
"""
typ = vtk_array.GetDataType()
assert typ in get_vtk_to_numeric_typemap().keys(), \
"Unsupported array type %s" % typ
shape = (vtk_array.GetNumberOfTuples(),
vtk_array.GetNumberOfComponents())
if shape[0] == 0:
dtype = get_numeric_array_type(typ)
return numpy.array([], dtype)
# First check if this array already has a numpy array cached,
# if it does and the array size has not been changed, reshape
# that and return it.
if vtk_array in _array_cache:
arr = _array_cache.get(vtk_array)
if shape[1] == 1:
shape = (shape[0], )
if arr.size == numpy.prod(shape):
arr = numpy.reshape(arr, shape)
return arr
# If VTK's new numpy support is available, use the buffer interface.
if numpy_support is not None and typ != vtkConstants.VTK_BIT:
dtype = get_numeric_array_type(typ)
result = numpy.frombuffer(vtk_array, dtype=dtype)
if shape[1] == 1:
shape = (shape[0], )
result.shape = shape
return result
# Setup an imaging pipeline to export the array.
img_data = vtk.vtkImageData()
img_data.SetDimensions(shape[0], 1, 1)
if typ == vtkConstants.VTK_BIT:
iarr = vtk.vtkCharArray()
iarr.DeepCopy(vtk_array)
img_data.GetPointData().SetScalars(iarr)
elif typ == vtkConstants.VTK_ID_TYPE:
# Needed since VTK_ID_TYPE does not work with VTK 4.5.
iarr = vtk.vtkLongArray()
iarr.SetNumberOfTuples(vtk_array.GetNumberOfTuples())
nc = vtk_array.GetNumberOfComponents()
iarr.SetNumberOfComponents(nc)
for i in range(nc):
iarr.CopyComponent(i, vtk_array, i)
img_data.GetPointData().SetScalars(iarr)
else:
img_data.GetPointData().SetScalars(vtk_array)
if typ == vtkConstants.VTK_ID_TYPE:
r_dtype = get_numeric_array_type(vtkConstants.VTK_LONG)
elif typ == vtkConstants.VTK_BIT:
r_dtype = get_numeric_array_type(vtkConstants.VTK_CHAR)
else:
r_dtype = get_numeric_array_type(typ)
img_data.Modified()
exp = vtk.vtkImageExport()
exp.SetInputData(img_data)
# Create an array of the right size and export the image into it.
im_arr = numpy.empty((shape[0]*shape[1],), r_dtype)
exp.Export(im_arr)
# Now reshape it.
if shape[1] == 1:
shape = (shape[0], )
im_arr = numpy.reshape(im_arr, shape)
return im_arr
def array2vtkCellArray(num_array, vtk_array=None):
"""Given a nested Python list or a numpy array, this method
creates a vtkCellArray instance and returns it.
A variety of input arguments are supported as described in the
Parameter documentation. If numpy arrays are given, this method
is highly efficient. This function is most efficient if the
passed numpy arrays have a typecode `ID_TYPE_CODE`. Otherwise a
typecast is necessary and this involves an extra copy. This
method *always copies* the input data.
An alternative and more efficient way to build the connectivity
list is to create a vtkIdTypeArray having data of the form
(npts,p0,p1,...p(npts-1), repeated for each cell) and then call
<vtkCellArray_instance>.SetCells(n_cell, id_list).
Parameters
----------
- num_array : numpy array or Python list/tuple
Valid values are:
1. A Python list of 1D lists. Each 1D list can contain one
cell connectivity list. This is very slow and is to be
used only when efficiency is of no consequence.
2. A 2D numpy array with the cell connectivity list.
3. A Python list of 2D numpy arrays. Each numeric array can
have a different shape. This makes it easy to generate a
cell array having cells of different kinds.
- vtk_array : `vtkCellArray` (default: `None`)
If an optional `vtkCellArray` instance, is passed as an argument
then a new array is not created and returned. The passed array
is itself modified and returned.
Example
-------
>>> a = [[0], [1, 2], [3, 4, 5], [6, 7, 8, 9]]
>>> cells = array_handler.array2vtkCellArray(a)
>>> a = numpy.array([[0,1,2], [3,4,5], [6,7,8]], 'l')
>>> cells = array_handler.array2vtkCellArray(a)
>>> l_a = [a[:,:1], a[:2,:2], a]
>>> cells = array_handler.array2vtkCellArray(l_a)
"""
if vtk_array:
cells = vtk_array
else:
cells = vtk.vtkCellArray()
assert cells.GetClassName() == 'vtkCellArray', \
'Second argument must be a `vtkCellArray` instance.'
if len(num_array) == 0:
return cells
########################################
# Internal functions.
def _slow_array2cells(z, cells):
cells.Reset()
vtk_ids = vtk.vtkIdList()
for i in z:
vtk_ids.Reset()
for j in i:
vtk_ids.InsertNextId(j)
cells.InsertNextCell(vtk_ids)
def _get_tmp_array(arr):
try:
tmp_arr = numpy.asarray(arr, ID_TYPE_CODE)
except TypeError:
tmp_arr = arr.astype(ID_TYPE_CODE)
return tmp_arr
def _set_cells(cells, n_cells, id_typ_arr):
vtk_arr = vtk.vtkIdTypeArray()
array2vtk(id_typ_arr, vtk_arr)
cells.SetCells(n_cells, vtk_arr)
########################################
msg = "Invalid argument. Valid types are a Python list of lists,"\
" a Python list of numpy arrays, or a numpy array."
if issubclass(type(num_array), (list, tuple)):
assert len(num_array[0]) > 0, "Input array must be 2D."
tp = type(num_array[0])
if issubclass(tp, list): # Pure Python list.
_slow_array2cells(num_array, cells)
return cells
elif issubclass(tp, numpy.ndarray): # List of arrays.
# Check shape of array and find total size.
tot_size = 0
n_cells = 0
for arr in num_array:
assert len(arr.shape) == 2, "Each array must be 2D"
shp = arr.shape
tot_size += shp[0]*(shp[1] + 1)
n_cells += shp[0]
# Create an empty array.
id_typ_arr = numpy.empty((tot_size,), ID_TYPE_CODE)
# Now populate it with the ids.
count = 0
for arr in num_array:
tmp_arr = _get_tmp_array(arr)
shp = arr.shape
sz = shp[0]*(shp[1] + 1)
set_id_type_array(tmp_arr, id_typ_arr[count:count+sz])
count += sz
# Now set them cells.
_set_cells(cells, n_cells, id_typ_arr)
return cells
else:
raise TypeError(msg)
elif issubclass(type(num_array), numpy.ndarray):
assert len(num_array.shape) == 2, "Input array must be 2D."
tmp_arr = _get_tmp_array(num_array)
shp = tmp_arr.shape
id_typ_arr = numpy.empty((shp[0]*(shp[1] + 1),), ID_TYPE_CODE)
set_id_type_array(tmp_arr, id_typ_arr)
_set_cells(cells, shp[0], id_typ_arr)
return cells
else:
raise TypeError(msg)
def array2vtkPoints(num_array, vtk_points=None):
"""Converts a numpy array/Python list to a vtkPoints object.
Unless a Python list/tuple or a non-contiguous array is given, no
copy of the data is made. Thus the function is very efficient.
Parameters
----------
- num_array : numpy array or Python list/tuple
The input array must be 2D with `shape[1] == 3`.
- vtk_points : `vtkPoints` (default: `None`)
If an optional `vtkPoints` instance, is passed as an argument
then a new array is not created and returned. The passed array
is itself modified and returned.
"""
if vtk_points:
points = vtk_points
else:
points = vtk.vtkPoints()
arr = numpy.asarray(num_array)
assert len(arr.shape) == 2, "Points array must be 2 dimensional."
assert arr.shape[1] == 3, "Incorrect shape: shape[1] must be 3."
vtk_array = array2vtk(arr)
points.SetData(vtk_array)
return points
def array2vtkIdList(num_array, vtk_idlist=None):
"""Converts a numpy array/Python list to a vtkIdList object.
Parameters
----------
- num_array : numpy array or Python list/tuple
The input array must be 2D with `shape[1] == 3`.
- vtk_idlist : `vtkIdList` (default: `None`)
If an optional `vtkIdList` instance, is passed as an argument
then a new array is not created and returned. The passed array
is itself modified and returned.
"""
if vtk_idlist:
ids = vtk_idlist
else:
ids = vtk.vtkIdList()
arr = numpy.asarray(num_array)
assert len(arr.shape) == 1, "Array for vtkIdList must be 1D"
ids.SetNumberOfIds(len(arr))
for i, j in enumerate(arr):
ids.SetId(i, j)
return ids
######################################################################
# Array argument handling functions.
######################################################################
def is_array(arr):
"""Returns True if the passed `arr` is a numpy array or a List."""
if issubclass(type(arr), (numpy.ndarray, list)):
return True
return False
def convert_array(arr, vtk_typ=None):
"""Convert the given array to the optional type specified by
`vtk_typ`.
Parameters
----------
- arr : numpy array/list.
- vtk_typ : `string` or `None`
represents the type the array is to be converted to.
"""
if vtk_typ:
conv = {'vtkCellArray': array2vtkCellArray,
'vtkPoints': array2vtkPoints,
'vtkIdList': array2vtkIdList}
if vtk_typ in conv.keys():
vtk_arr = getattr(vtk, vtk_typ)()
return conv[vtk_typ](arr, vtk_arr)
elif vtk_typ.find('Array') > -1:
try:
vtk_arr = getattr(vtk, vtk_typ)()
except TypeError: # vtk_typ == 'vtkDataArray'
return array2vtk(arr)
else:
return array2vtk(arr, vtk_arr)
else:
return arr
else:
return array2vtk(arr)
def is_array_sig(s):
"""Given a signature, return if the signature has an array."""
if not isinstance(s, str):
return False
arr_types = ['Array', 'vtkPoints', 'vtkIdList']
for i in arr_types:
if s.find(i) > -1:
return True
return False
def is_array_or_vtkarray(arg):
"""Returns True if the argument is an array/Python list or if it
is a vtk array."""
if is_array(arg):
return True
else:
if hasattr(arg, '_vtk_obj'):
if is_array_sig(arg._vtk_obj.__class__.__name__):
return True
return False
def get_correct_sig(args, sigs):
"""Given a list of args and a collection of possible signatures,
this function returns the most appropriate signature. This
function is only called by deref_array. This implies that one of
the signatures has an array type.
"""
# First do the trivial cases.
if sigs is None:
return None
if len(sigs) == 1:
return sigs[0]
else:
# Non-trivial cases.
la = len(args)
candidate_sigs = [s for s in sigs if len(s) == la]
count = len(candidate_sigs)
if count == 0:
# No sig has the right number of args.
msg = "Insufficient number of arguments to method."\
"Valid arguments are:\n%s" % sigs
raise TypeError(msg)
elif count == 1:
# If only one of the sigs has the right number of args,
# return it.
return candidate_sigs[0]
else:
# More than one sig has the same number of args.
# Check if args need conversion at all.
array_idx = [i for i, a in enumerate(args)
if is_array_or_vtkarray(a)]
n_arr = len(array_idx)
if n_arr == 0:
# No conversion necessary so signature info is
# useless.
return None
else:
# Need to find the right sig. This is done by finding
# the first signature that matches all the arrays in
# the argument.
for sig in candidate_sigs:
array_in_sig = [is_array_sig(s) for s in sig]
if array_in_sig.count(True) != len(array_idx):
continue
bad = False
for i in array_idx:
if not array_in_sig[i]:
bad = True
if not bad:
return sig
# Could not find any valid signature, so give up.
return None
def deref_vtk(obj):
"""Dereferences the VTK object from the object if possible. This
is duplicated from `tvtk_base.py` because I'd like to keep this
module independent of `tvtk_base.py`.
"""
if hasattr(obj, '_vtk_obj'):
return obj._vtk_obj
else:
return obj
def deref_array(args, sigs=None):
"""Given a bunch of arguments and optional signature information,
this converts the arguments suitably. If the argument is either a
Python list or a numpy array it is converted to a suitable type
based on the signature information. If it is not an array, but a
TVTK object the VTK object is dereferenced. Otherwise nothing is
done. If no signature information is provided the arrays are
automatically converted (this can sometimes go wrong). The
signature information is provided in the form of a list of lists.
"""
ret = []
sig = get_correct_sig(args, sigs)
if sig:
for a, s in zip(args, sig):
if is_array(a) and is_array_sig(s):
ret.append(convert_array(a, s))
else:
ret.append(deref_vtk(a))
else:
for a in args:
if is_array(a):
ret.append(convert_array(a))
else:
ret.append(deref_vtk(a))
return ret
|
enthought/mayavi
|
tvtk/array_handler.py
|
array_handler.py
|
py
| 27,563 |
python
|
en
|
code
| 1,177 |
github-code
|
6
|
19304565869
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from time import sleep
from selenium.webdriver.chrome.options import Options
import pandas as pd
import requests
"""
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
driver.get('https://shifucon.ppihgroup.com/staffpage/')
print(driver)
elemname = driver.find_element_by_id("login_email")
elemname.send_keys('0167938')
elemname = driver.find_element_by_id("login_pass")
elemname.send_keys('3104chalo')
log_in = driver.find_element_by_class_name('btn btn-lg btn-primary btn-block')
log_in.click()
"""
def main():
options = Options()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
#options.add_argument('--disable-features=VizDisplayCompositor')
browser = webdriver.Chrome(ChromeDriverManager().install(), options=options)
browser.get("https://scraping-for-beginner.herokuapp.com/login_page")
username = browser.find_element_by_id('username')
username.send_keys("imanishi")
userpass = browser.find_element_by_id('password')
userpass.send_keys("ki")
log_in = browser.find_element_by_id('login-btn')
log_in.click()
NAME = browser.find_element_by_id("name")
print("名前:", NAME.text)
COM = browser.find_element_by_id("company")
print("所属企業:", COM.text)
birthday = browser.find_element_by_id("birthday")
print("生年月日:", birthday.text)
birthplace = browser.find_element_by_id("come_from")
print("出身地:", birthplace.text)
hobby = browser.find_element_by_id("hobby")
print("趣味:", hobby.text)
#要素一つ
elemth = browser.find_element_by_tag_name("th")
#要素複数
elemth = browser.find_elements_by_tag_name("th")
print(elemth[0].text)
key = []
for i in elemth:
key.append(i.text)
value = []
elemtd = browser.find_elements_by_tag_name("td")
for i in elemtd:
value.append(i.text)
sleep(5)
browser.quit()
df = pd.DataFrame()
df["項目"] = key
df["値"] = value
print(df)
df.to_csv("講師情報.csv", index = False)
if __name__ == '__main__':
main()
|
satoshi-python/109
|
train_sele.py
|
train_sele.py
|
py
| 2,427 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33068133857
|
import sys
import argparse
import importlib
commands = {
'train': {
'script': 'ocr4all_pixel_classifier.scripts.train',
'main': 'main',
'help': 'Train the neural network. See more via "* train --help"'
},
'predict': {
'script': 'ocr4all_pixel_classifier.scripts.predict',
'main': 'main',
'help': 'Predict a result with the neural network. See more via "* predict --help"'
},
'predict-json': {
'script': 'ocr4all_pixel_classifier.scripts.predict_json',
'main': 'main',
'help': 'Predict a result with the neural network, input via JSON. See more via "* predict --help"'
},
'create-dataset-file': {
'script': 'ocr4all_pixel_classifier.scripts.create_dataset_file',
'main': 'main',
'help': 'Create a dataset file'
},
'compute-image-normalizations': {
'script': 'ocr4all_pixel_classifier.scripts.compute_image_normalizations',
'main': 'main',
'help': 'Compute image normalizations'
},
'compute-image-map': {
'script': 'ocr4all_pixel_classifier.scripts.generate_image_map',
'main': 'main',
'help': 'Generates color map'
},
'migrate-model': {
'script': 'ocr4all_pixel_classifier.scripts.migrate_model',
'main': 'main',
'help': 'Convert old model to new format'
},
}
def main():
# Pretty print help for main programm
usage = 'page-segmentation <command> [<args>]\n\nCOMMANDS:'
# Add all commands to help
max_name_length = max(len(name) for name, _ in commands.items())
for name, command in commands.items():
usage += '\n\t{name:<{col_width}}\t{help}'.format(name=name, col_width=max_name_length, help=command["help"])
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument('command', help='The sub command to execute, see COMMANDS')
args = parser.parse_args(sys.argv[1:2])
sys.argv = sys.argv[:1] + sys.argv[2:]
if args.command in commands.keys():
command = commands[args.command]
command_module = importlib.import_module(command['script'])
command_main = getattr(command_module, command['main'])
command_main()
else:
print('Unrecognized command')
parser.print_help()
exit(1)
if __name__ == "__main__":
main()
|
OMMR4all/ommr4all-page-segmentation
|
ocr4all_pixel_classifier/scripts/main.py
|
main.py
|
py
| 2,358 |
python
|
en
|
code
| 2 |
github-code
|
6
|
22905238470
|
import tensorflow as tf
import argparse
import sys
sys.path.insert(0, "../CycleGAN-TensorFlow")
import model # nopep8
# Transform image bitstring to float tensor
def preprocess_bitstring_to_float_tensor(input_bytes, image_size):
input_bytes = tf.reshape(input_bytes, [])
# Transform bitstring to uint8 tensor
input_tensor = tf.image.decode_png(input_bytes, channels=3)
# Convert to float32 tensor
input_tensor = tf.image.convert_image_dtype(input_tensor,
dtype=tf.float32)
input_tensor = input_tensor / 127.5 - 1.0
# Ensure tensor has correct shape
input_tensor = tf.reshape(input_tensor, [image_size, image_size, 3])
# CycleGAN's inference function accepts a batch of images
# So expand the single tensor into a batch of 1
input_tensor = tf.expand_dims(input_tensor, 0)
return input_tensor
# Transform float tensor to image bitstring
def postprocess_float_tensor_to_bitstring(output_tensor):
# Convert to uint8 tensor
output_tensor = (output_tensor + 1.0) / 2.0
output_tensor = tf.image.convert_image_dtype(output_tensor, tf.uint8)
# Remove the batch dimension
output_tensor = tf.squeeze(output_tensor, [0])
# Transform uint8 tensor to bitstring
output_bytes = tf.image.encode_png(output_tensor)
output_bytes = tf.identity(output_bytes, name="output_bytes")
return output_bytes
# Export graph to ProtoBuf
def export_graph():
graph = tf.Graph()
with graph.as_default():
# Instantiate a CycleGAN
cycle_gan = model.CycleGAN(ngf=64,
norm="instance",
image_size=FLAGS.image_size)
# Create placeholder for image bitstring
# This is the first injection layer
input_bytes = tf.placeholder(tf.string, shape=[], name="input_bytes")
# Preprocess input (bitstring to float tensor)
input_tensor = preprocess_bitstring_to_float_tensor(input_bytes,
FLAGS.image_size)
# Get style transferred tensor
output_tensor = cycle_gan.G.sample(input_tensor)
# Postprocess output
output_bytes = postprocess_float_tensor_to_bitstring(output_tensor)
# Instantiate a Saver
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
# Access variables and weights from last checkpoint
latest_ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
saver.restore(sess, latest_ckpt)
# Export graph to ProtoBuf
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [output_bytes.op.name])
tf.train.write_graph(output_graph_def,
FLAGS.protobuf_dir,
FLAGS.model_name + "_v" + str(FLAGS.version),
as_text=False)
# Wrap a SavedModel around ProtoBuf
# Necessary for using the tensorflow-serving RESTful API
def build_saved_model():
# Instantiate a SavedModelBuilder
# Note that the serve directory MUST have a model version subdirectory
builder = tf.saved_model.builder.SavedModelBuilder(FLAGS.serve_dir +
"/" +
str(FLAGS.version))
# Read in ProtoBuf file
with tf.gfile.GFile(FLAGS.protobuf_dir +
"/" +
FLAGS.model_name +
"_v" +
str(FLAGS.version),
"rb") as protobuf_file:
graph_def = tf.GraphDef()
graph_def.ParseFromString(protobuf_file.read())
# Get input and output tensors from GraphDef
# These are our injected bitstring layers
[inp, out] = tf.import_graph_def(graph_def,
name="",
return_elements=["input_bytes:0",
"output_bytes:0"])
with tf.Session(graph=out.graph) as sess:
# Signature_definition expects a batch
# So we'll turn the output bitstring into a batch of 1 element
out = tf.expand_dims(out, 0)
# Build prototypes of input and output
input_bytes = tf.saved_model.utils.build_tensor_info(inp)
output_bytes = tf.saved_model.utils.build_tensor_info(out)
# Create signature for prediction
signature_definition = tf.saved_model.signature_def_utils.build_signature_def( # nopep8
inputs={"input_bytes": input_bytes},
outputs={"output_bytes": output_bytes},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
# Add meta-information
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_definition
})
# Create the SavedModel
builder.save()
def main(_):
print("Exporting model to ProtoBuf...")
export_graph()
print("Wrapping ProtoBuf in SavedModel...")
build_saved_model()
print("Exported successfully!")
print("""Run the server with:
tensorflow_model_server --rest_api_port=8501 """
"--model_name=saved_model --model_base_path=$(path)")
if __name__ == "__main__":
# Instantiate an arg parser
parser = argparse.ArgumentParser()
# Establish default arguments
parser.add_argument("--checkpoint_dir",
type=str,
default="../CycleGAN-TensorFlow/"
"checkpoints/20180628-1208",
help="Path to checkpoints directory")
parser.add_argument("--protobuf_dir",
type=str,
default="../CycleGAN-TensorFlow/protobufs",
help="Path to protobufs directory")
parser.add_argument("--model_name",
type=str,
default="model",
help="Model name")
parser.add_argument("--serve_dir",
type=str,
default="serve",
help="Path to serve directory")
parser.add_argument("--version",
type=int,
default=1,
help="Model version number")
parser.add_argument("--image_size",
type=int,
default=64,
help="Image size")
# Parse known arguments
FLAGS, unparsed = parser.parse_known_args()
# Run the tensorflow app
tf.app.run(argv=[sys.argv[0]] + unparsed)
|
tmlabonte/tendies
|
minimum_working_example/export_graph_for_serving.py
|
export_graph_for_serving.py
|
py
| 6,953 |
python
|
en
|
code
| 37 |
github-code
|
6
|
38152844484
|
from typing import List
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.chrome.options import Options
class PathsParser:
def __init__(self): # if url changed replace it
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "eager"
# setting the "eager" parameter so as not to wait for the full download
options = Options()
options.add_argument("--headless")
# setting for hide the browser while running
self.driver = webdriver.Chrome(desired_capabilities=caps,
options=options)
def check_existence(self, page: webdriver) -> bool:
"""
This method input page and checks if the last page
:param page:
:return flag:
"""
flag = True
xpath = "//strong[text()='No results available']"
try:
page.find_element(By.XPATH, xpath)
flag = False
except Exception:
pass
return flag
def parse_urls(self) -> List:
"""
This method parse all available url contains rows with articles
:return page_list:
"""
page_list = []
counter_page = 0
flag = True
current_page = 'https://www.aceee.org/news?keys=&field_' \
'authors_target_id=&field_related_programs_target_id' \
'=&field_related_topics_target_id=&' \
'sort_bef_combine=created_DESC&' \
'sort_by=created&sort_order=DESC&page={}'
while flag:
current_url = current_page.format(counter_page)
self.driver.get(current_url)
flag = self.check_existence(self.driver)
if not flag:
break
page_list.extend(self.parse_paths(current_url))
print(current_url, '--- complete!')
counter_page += 1
self.driver.quit()
return page_list
def parse_paths(self, url: str) -> List:
"""
Get rows with articles paths
:param url:
:return path_list:
"""
path_list = []
self.driver.get(url)
rows = self.driver.find_elements(By.CLASS_NAME, 'views-row')
for row in rows:
path_list.append(
row.find_element(By.TAG_NAME, 'a').get_attribute('href')
)
return path_list
|
stolzor/test_task
|
models/paths_parser.py
|
paths_parser.py
|
py
| 2,546 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42123297938
|
from collections import namedtuple
import numpy as np
from ...utils.interpolation.levels import ( # noqa
LevelsDefinition as ConversionLevelsDefinition,
)
INPUT_REQUIRED_FIELDS = dict(
export_format=str,
levels_method=(None, str),
levels_number=(None, int),
levels_dzmin=(None, float),
levels_ztop=(None, float),
comment=(None, str),
campaign=(None, str),
source_domain=(None, str),
reference=(None, str),
# AUTHOR=CREATOR
author=(None, str),
modifications=(None, str),
# CASE=FLIGHT
case=(None, str),
adv_temp=[0, 1],
adv_theta=[0, 1],
adv_thetal=[0, 1],
adv_qv=[0, 1],
adv_qt=[0, 1],
adv_rv=[0, 1],
adv_rt=[0, 1],
rad_temp=[0, 1, "adv"],
rad_theta=[0, 1, "adv"],
rad_thetal=[0, 1, "adv"],
forc_omega=[0, 1],
forc_w=[0, 1],
forc_geo=[0, 1],
nudging_u=(0, np.nan, float),
nudging_v=(0, np.nan, float),
nudging_temp=(0, np.nan, float),
nudging_theta=(0, np.nan, float),
nudging_thetal=(0, np.nan, float),
nudging_qv=(0, np.nan, float),
nudging_qt=(0, np.nan, float),
nudging_rv=(0, np.nan, float),
nudging_rt=(0, np.nan, float),
surfaceType=["ocean", "land", "mixed"],
surfaceForcing=["ts", "Flux", "surfaceFlux"],
surfaceForcingWind=["z0", "ustar", "z0_traj"],
nudging_method_scalar_traj=(None, str),
nudging_time_scalar_traj=(None, float),
nudging_height_scalar_traj=(None, float),
nudging_transition_scalar_traj=(None, float),
nudging_method_momentum_traj=(None, str),
nudging_time_momentum_traj=(None, float),
nudging_height_momentum_traj=(None, float),
nudging_transition_momentum_traj=(None, float),
)
ConversionDefinition = namedtuple(
"ConversionDefinition",
["export_format", "levels", "name", "metadata", "parameters"],
)
ConversionParametersDefinition = namedtuple(
"ConversionParametersDefinition",
[
"adv_temp",
"adv_theta",
"adv_thetal",
"adv_qv",
"adv_qt",
"adv_rv",
"adv_rt",
"rad_temp",
"rad_theta",
"rad_thetal",
"forc_omega",
"forc_w",
"forc_geo",
"nudging_u",
"nudging_v",
"nudging_temp",
"nudging_theta",
"nudging_thetal",
"nudging_qv",
"nudging_qt",
"nudging_rv",
"nudging_rt",
"surfaceType",
"surfaceForcing",
"surfaceForcingWind",
"nudging_parameters_scalar_traj",
"nudging_parameters_momentum_traj",
],
)
ConversionNudgingDefinition = namedtuple(
"ConversionNudgingDefinition",
[
"method",
"time",
"height",
"transition",
],
)
ConversionMetadataDefinition = namedtuple(
"ConversionMetadataDefinition",
[
"comment",
"campaign",
"source_domain",
"reference",
"author",
"modifications",
"case",
],
)
|
EUREC4A-UK/lagtraj
|
lagtraj/forcings/conversion/input_definitions.py
|
input_definitions.py
|
py
| 2,969 |
python
|
en
|
code
| 8 |
github-code
|
6
|
35004893553
|
# programmers 위클리 챌린지 2주차
def solution(scores):
answer = ''
i_len = len(scores)
temp_arr = [0 for _ in range(i_len)]
for i in range(i_len):
for j in range(i_len):
temp_arr[j] = scores[j][i]
max_val = max(temp_arr)
min_val = min(temp_arr)
max_self, min_self = False, False
if max_val == temp_arr[i]:
max_self = True
if min_val == temp_arr[i]:
min_self = True
selfOK = False
selfScore = 0
if not max_self and not min_self:
selfOK = True
for j in range(i_len):
if max_self and i != j and max_val == temp_arr[j]:
selfOK = True
if min_self and i != j and min_val == temp_arr[j]:
selfOK = True
selfScore = sum(temp_arr)
if not selfOK:
selfScore -= temp_arr[i]
selfScore /= (i_len-1)
else:
selfScore /= i_len
answer += classify(selfScore)
return answer
def classify(score):
if score >= 90:
return 'A'
elif 80 <= score < 90:
return 'B'
elif 70 <= score < 80:
return 'C'
elif 50 <= score < 70:
return 'D'
else:
return 'F'
print(solution([[100,90,98,88,65],[50,45,99,85,77],[47,88,95,80,67],[61,57,100,80,65],[24,90,94,75,65]]))
|
Inflearn-everyday/study
|
wookiist/programmers/week2.py
|
week2.py
|
py
| 1,368 |
python
|
en
|
code
| 5 |
github-code
|
6
|
27577948701
|
from django.urls import path
from .views import (HomePageView, MessageView, UserProfile, delete_message,
spam_message, AddReview, AbouUs, ContactUs, ReviewView, SettingsView, EditProfile)
urlpatterns = [
path('', HomePageView.as_view(), name='home'),
path('profile/', UserProfile.as_view(), name='user_profile'),
# path('profile/edit/<int:pk>', EditProfile.as_view(), name='edit_profile'),
path('profile/edit/<str:username>', EditProfile, name='edit_profile'),
path('about/', AbouUs.as_view(), name='about'),
path('contact/', ContactUs.as_view(), name='contact'),
path('review/', ReviewView, name='review'),
path('review/add/', AddReview, name='add_review'),
path('settings/', SettingsView, name='settings'),
path('message/<str:username>/', MessageView, name='message'),
path('delete/<int:m_id>/', delete_message, name='delete'),
path('spam/<int:m_id>/', spam_message, name='spam'),
]
|
Afeez1131/Anonymous-v1
|
anonymous/urls.py
|
urls.py
|
py
| 954 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1068948273
|
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import minimize
import gymnasium as gym
import imageio
from pendulum_model import PendulumModel
# Constants
g = 10.0 # gravitational acceleration
l = 1.0 # length of the pendulum
m = 1.0 # mass of the pendulum
dt = 0.05 # time step
n = 100 # number of time steps
x0 = np.zeros(3 * n)
class ScipySolver:
def __init__(self):
self.theta0 = np.pi
self.theta_dot0 = 0.0
@staticmethod
def objective(x):
theta = x[:n]
theta_dot = x[n:2 * n]
u = x[2 * n:]
cost = np.sum(theta ** 2) + 0.1 * np.sum(theta_dot ** 2) + 0.001 * np.sum(u ** 2)
return cost
def dynamics(self, x):
theta = x[:n]
theta_dot = x[n:2 * n]
u = x[2 * n:]
constraints = []
constraints.append(theta[0] - self.theta0)
constraints.append(theta_dot[0] - self.theta_dot0)
for t in range(n - 1):
constraints.append(theta_dot[t + 1] - (
theta_dot[t] + (3 * g * dt) / (2 * l) * np.sin(theta[t]) + (3 * dt) / (m * l ** 2) * u[t]))
constraints.append(theta[t + 1] - (theta[t] + theta_dot[t+1] * dt))
return np.array(constraints)
@staticmethod
def plot_results(theta, theta_dot, u):
time = np.linspace(0, dt * n, n)
plt.figure(figsize=(10, 8))
# Plot theta
plt.subplot(3, 1, 1)
plt.plot(time, theta)
plt.ylabel('Theta (rad)')
plt.title('Optimal Control Results')
# Plot theta_dot
plt.subplot(3, 1, 2)
plt.plot(time, theta_dot)
plt.ylabel('Theta_dot (rad/s)')
# Plot u
plt.subplot(3, 1, 3)
plt.plot(time, u)
plt.ylabel('Control Input (u)')
plt.xlabel('Time (s)')
plt.tight_layout()
plt.show()
def solve(self):
env = make_env("Pendulum-v1")
observation, info = env.reset(seed=4)
model = PendulumModel()
model.reset(observation)
model_log = []
self.theta0 = model.state[0]
self.theta_dot0 = model.state[1]
x0[:n] = np.linspace(self.theta0, 0, n)
# x0[:n] = self.theta0
x0[n:2 * n] = np.linspace(self.theta_dot0, 0, n)
# x0[n:2] = self.theta_dot0
for i in range(100):
model_log.append(model.state)
action = np.array([0.0])
model.step(action)
model_log.append(action)
model_log = np.hstack(model_log)
# Initial guess
# Bounds
theta_dot_bounds = (-8, 8)
u_bounds = (-2, 2)
bounds = [(None, None)] * n + [theta_dot_bounds] * n + [u_bounds] * n
# Constraints
constraints = {'type': 'eq', 'fun': self.dynamics}
# Optimize
result = minimize(self.objective, x0, method='trust-constr', bounds=bounds, constraints=constraints,
options={'gtol': 1e-5})
print(result)
if result.success:
theta_opt = result.x[:n]
theta_dot_opt = result.x[n:2 * n]
u_opt = result.x[2 * n:]
print(theta_opt)
print(theta_dot_opt)
print(u_opt)
else:
print("Optimization failed.")
theta_opt = result.x[:n]
theta_dot_opt = result.x[n:2 * n]
u_opt = result.x[2 * n:]
print(theta_opt)
print(theta_dot_opt)
print(u_opt)
frames = []
for i in range(100):
observation, reward, terminated, truncated, info = env.step(u_opt[i].reshape(1, ))
print(observation, reward, u_opt[i])
frame = env.render()
frames.append(frame)
imageio.mimsave('pendulum_run.gif', frames, duration=1.0 / 20)
self.plot_results(theta_opt, theta_dot_opt, u_opt)
def make_env(name):
gym_env = gym.make(name, render_mode='rgb_array')
return gym_env
if __name__ == "__main__":
scipy_solve = ScipySolver()
scipy_solve.solve()
|
CarlDegio/SQP_Pendulum
|
scipy_trust.py
|
scipy_trust.py
|
py
| 4,069 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12611003899
|
from ..utils import *
##
# Minions
class OG_006:
"""Vilefin Inquisitor"""
play = Summon(CONTROLLER, "OG_006b")
class OG_006b:
"""The Tidal Hand"""
requirements = {PlayReq.REQ_NUM_MINION_SLOTS: 1}
activate = Summon(CONTROLLER, "OG_006a")
class OG_221:
"""Selfless Hero"""
deathrattle = GiveDivineShield(RANDOM_FRIENDLY_MINION)
class OG_229:
"""Ragnaros, Lightlord"""
events = OWN_TURN_END.on(Heal(RANDOM(FRIENDLY + DAMAGED_CHARACTERS), 8))
class OG_310:
"""Steward of Darkshire"""
events = Summon(CONTROLLER, MINION + (CURRENT_HEALTH == 1)).on(
GiveDivineShield(Summon.CARD)
)
##
# Spells
class OG_223:
"""Divine Strength"""
requirements = {PlayReq.REQ_MINION_TARGET: 0, PlayReq.REQ_TARGET_TO_PLAY: 0}
play = Buff(TARGET, "OG_223e")
OG_223e = buff(+1, +2)
class OG_273:
"""Stand Against Darkness"""
requirements = {PlayReq.REQ_NUM_MINION_SLOTS: 1}
play = Summon(CONTROLLER, "CS2_101t") * 5
class OG_311:
"""A Light in the Darkness"""
play = DISCOVER(RandomMinion()).then(Buff(Discover.CARDS, "OG_311e"))
OG_311e = buff(+1, +1)
##
# Weapons
class OG_222:
"""Rallying Blade"""
play = Buff(FRIENDLY_MINIONS + DIVINE_SHIELD, "OG_222e")
OG_222e = buff(+1, +1)
class OG_198:
"""Forbidden Healing"""
requirements = {PlayReq.REQ_TARGET_TO_PLAY: 0}
def play(self):
mana = self.controller.mana
yield SpendMana(CONTROLLER, mana)
yield Heal(TARGET, mana * 2)
|
jleclanche/fireplace
|
fireplace/cards/wog/paladin.py
|
paladin.py
|
py
| 1,409 |
python
|
en
|
code
| 645 |
github-code
|
6
|
859001844
|
from __future__ import division
from vistrails.core.modules.vistrails_module import Module
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.modules.basic_modules import List, String
from engine_manager import EngineManager
from map import Map
def initialize(*args,**keywords):
reg = get_module_registry()
reg.add_module(Map)
reg.add_input_port(Map, 'FunctionPort', (Module, ''))
reg.add_input_port(Map, 'InputList', (List, ''))
reg.add_input_port(Map, 'InputPort', (List, ''))
reg.add_input_port(Map, 'OutputPort', (String, ''))
reg.add_output_port(Map, 'Result', (List, ''))
def finalize():
EngineManager.cleanup()
def menu_items():
return (
("Start new engine processes",
lambda: EngineManager.start_engines()),
("Show information on the cluster",
lambda: EngineManager.info()),
("Change profile",
lambda: EngineManager.change_profile()),
("Cleanup started processes",
lambda: EngineManager.cleanup()),
("Request cluster shutdown",
lambda: EngineManager.shutdown_cluster()),
)
|
VisTrails/VisTrails
|
vistrails/packages/parallelflow/init.py
|
init.py
|
py
| 1,195 |
python
|
en
|
code
| 100 |
github-code
|
6
|
27580622561
|
from django.shortcuts import render, HttpResponse, get_object_or_404, HttpResponseRedirect
from .models import fizzURL
from django.views import View
from fiz.utils import create_shortcode
from .forms import SubmitURLForm
class HomeView(View):
'''
for a CBV, post and get function will be written separately, unlike
FBV which handles the two by itself
'''
def get(self, request):
form = SubmitURLForm
return render(request, 'fiz/home.html', {'form': form, 'title': 'Fiz.co'})
def post(self, request):
form = SubmitURLForm(request.POST)
if form.is_valid():
new_url = form.cleaned_data['url']
obj, created = fizzURL.objects.get_or_create(url=new_url)
context = {
'obj': obj,
'created': created,
}
if created:
template = 'fiz/created.html'
else:
template= 'fiz/already-exist.html'
return render(request, template, context)
class FizCBV(View):
def get(self, request, shortcode):
obj = get_object_or_404(fizzURL, shortcode=shortcode)
return HttpResponseRedirect(obj.url)
|
Afeez1131/shortener
|
fiz/views.py
|
views.py
|
py
| 1,203 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72995173628
|
import os
import time
import sys
import pickle
import gzip
#import numpy as np
import zlib
try:
import eosapi
producer = eosapi.Producer()
print('This DL example is not supported anymore, a turly AI on blockchain will not looks like this.')
print('Please make sure you are running the following command before test')
print('./pyeos/pyeos --manual-gen-block --debug -i')
except Exception as e:
print(e)
def init():
'''
psw = 'PW5Kd5tv4var9XCzvQWHZVyBMPjHEXwMjH1V19X67kixwxRpPNM4J'
wallet.open('mywallet')
wallet.unlock('mywallet',psw)
'''
key1 = 'EOS61MgZLN7Frbc2J7giU7JdYjy2TqnfWFjZuLXvpHJoKzWAj7Nst'
key2 = 'EOS5JuNfuZPATy8oPz9KMZV2asKf9m8fb2bSzftvhW55FKQFakzFL'
if not eosapi.get_account('mnist'):
with producer:
r = eosapi.create_account('inita', 'mnist', key1, key2)
assert r
with producer:
r = eosapi.set_contract('mnist', '../../programs/pyeos/contracts/mnist/mnist.py', '../../programs/pyeos/contracts/mnist/mnist.abi', eosapi.py_vm_type)
assert r
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9) into a corresponding desired output from the neural
network."""
e = np.zeros((10, 1))
e[j] = 1.0
return e
def test():
producer()
p = os.path.join(os.getcwd(), '../../programs/pyeos/contracts/mnist')
sys.path.insert(0, p)
import mnist_loader
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
print('load done!')
training_data = list(training_data)
txids = []
counter = 0
for d in training_data[:1]:
data = pickle.dumps([d, ])
data = zlib.compress(data)
# print(data)
r = eosapi.push_action('mnist', 'train', data, ['mnist'], {'mnist':'active'})
assert r
print(r.transaction_id)
if r.transaction_id in txids:
raise 'dumplicate ts id'
txids.append(r.transaction_id)
counter += 1
if counter % 50 == 0:
print(counter)
with producer:
pass
producer()
if __name__ == '__main__':
sys.path.insert(0, '..')
import mnist
net = mnist.Network([784, 30, 10])
import mnist_loader
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
training_data = list(training_data)
# print(data)
# data0 = np.reshape(data[0], (784, 1))
# data1 = vectorized_result(data[1])
net.SGD(training_data[:1], 1, 1, 3.0, test_data=None)
|
learnforpractice/pyeos
|
programs/pyeos/tests/python/mnist/t.py
|
t.py
|
py
| 2,645 |
python
|
en
|
code
| 131 |
github-code
|
6
|
42420354636
|
import sqlite3
from tkinter import *
from tkinter import messagebox
#Nombre la Base de Datos
db = "timerSIAT.db"
busca = Tk()
busca.iconbitmap('buscar.ico')
busca.title("Buscar Numero de Serie")
busca.geometry("330x250")
uno=Label(busca, text=" ")
uno.place(x = 30, y = 70)
dos=Label(busca, text=" ")
dos.place(x = 155, y = 70)
tres=Label(busca, text=" " )
tres.place(x = 30, y = 110)
cuatro=Label(busca, text=" ")
cuatro.place(x = 155, y = 110)
cinco=Label(busca, text=" " )
cinco.place(x = 30, y = 150)
seis=Label(busca, text=" ")
seis.place(x = 155, y = 150)
def Busca_Serial():
if NSerie.get() == '':
messagebox.showerror("Error", "Ingresa un Numero de Serie")
else:
conexionbuscar = sqlite3.connect(db)
cursorbuscar = conexionbuscar.cursor()
data = NSerie.get()
cursorbuscar.execute(f"SELECT fechahora,empleado,tiempo FROM log WHERE serie='{data}'")
usuario = cursorbuscar.fetchone()
if usuario:
uno.configure(text="Fecha de Prueba")
dos.configure(text=" "+str(usuario[0]))
tres.configure(text="Numero de Empleado")
cuatro.configure(text=" "+str(usuario[1]))
cinco.configure(text="Tiempo de Prueba")
seis.configure(text=" "+str(usuario[2]))
else:
uno.configure(text=" ")
dos.configure(text=" ")
tres.configure(text=" ")
cuatro.configure(text=" ")
cinco.configure(text=" ")
seis.configure(text=" ")
messagebox.showerror("Error", "No se encontro Numero de Serie")
conexionbuscar.close()
busca.mainloop()
def Limpiar():
uno.configure(text=" ")
dos.configure(text=" ")
tres.configure(text=" ")
cuatro.configure(text=" ")
cinco.configure(text=" ")
seis.configure(text=" ")
NSerie = StringVar()
Label(busca, text = "Numero de Serie ").place(x = 30,y = 30)
Entry(busca, textvariable=NSerie).place(x = 155, y = 30)
Button(busca, text = "BUSCAR",
command = Busca_Serial,
activebackground = "green",
activeforeground = "white").place(x = 50, y = 190)
Button(busca,text = "BORRAR",
command = Limpiar,
activebackground = "RED",
activeforeground = "white").place(x = 250, y = 190)
Label(busca).place(x = 155, y = 70)
Label(busca).place(x = 155, y = 110)
# Mostrar la ventana
busca.mainloop()
|
AnaNicoSerrano88/Timmer-SIAT
|
Buscar_Serie.py
|
Buscar_Serie.py
|
py
| 2,474 |
python
|
es
|
code
| 0 |
github-code
|
6
|
34252791072
|
import argparse
import json
import os
from flask import Flask, render_template, request
import openai
import requests
import base64
app = Flask(__name__)
# Configure OpenAI API credentials
openai.api_key = 'OPEN_API_KEY'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/search', methods=['POST'])
def search():
search_query = request.form['search']
if("picture" in search_query):
process(search_query)
return render_template('index.html', search_query=False, results=True)
else:
results = search_openai(search_query)
return render_template('index.html', search_query=search_query, results=results)
def search_openai(query):
response = openai.Completion.create(
engine='text-davinci-003', # Use the GPT-3.5 engine
prompt=query,
max_tokens=4000, # Adjust the response length as needed
temperature=0.7, # Adjust the temperature for response randomness
n=1, # Generate a single response
stop=None, # Optional stop sequence to end the response
timeout=10, # Optional timeout for the API request
)
return response.choices[0].text.strip()
def process(prompt):
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--prompt", help="Text to image prompt:", default='an isometric view of a miniature city, tilt shift, bokeh, voxel, vray render, high detail')
parser.add_argument("-n", "--number", help="Number of images generated", default=1)
parser.add_argument("-s", "--size", help="Image size: 256, 512 or 1024", default=256)
args = parser.parse_args()
api_key ="OPEN_API_KEY"
url = 'https://api.openai.com/v1/images/generations'
custom_headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + api_key,
}
reqBody = {
"prompt": prompt,
"n": int(args.number),
"size": f'{args.size}x{args.size}',
"response_format": "b64_json"
}
res = requests.post(url,
data=json.dumps(reqBody),
headers=custom_headers,
)
# print(r)
# print(r.url)
# print(r.status_code)
# print(res.text)
# print(r.content)
res_json = json.loads(res.text)
for i in range(0, len(res_json['data'])):
img_file_name = 'image.jpeg'
folder="static"
file_path = os.path.join(folder, img_file_name)
with open(file_path, 'wb') as f:
f.write(base64.urlsafe_b64decode(res_json['data'][i]['b64_json']))
if __name__ == '__main__':
app.run(debug=True)
|
Guhan-jb/HippoGPT
|
Hippo_GPT/main.py
|
main.py
|
py
| 2,661 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26826965262
|
# Brainfuck interpreter made in Desmos graphing calculator
# https://www.desmos.com/calculator/sfjibaru0n
# This is a python script that converts bf code to the numbers used in Desmos
import re
CODE_MAP = {"+": 0, "-": 1, ">": 2, "<": 3, ".": 4, ",": 5, "[": 6, "]": 7}
with open(f"test.bf", "r") as f:
code = f.read()
code = re.sub("[^><\+-\.,\[\]]", "", code)
out = "["
for char in code:
out += str(CODE_MAP[char]) + ","
out += "]"
print(out)
|
idkhow2type/brainfuck
|
interpreters/desmos.py
|
desmos.py
|
py
| 459 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37204853562
|
import logging
import logging.config
import os
import yaml
from commons.path import LOG_PATH, CONFIG_PATH
class MyLog:
def __init__(self, file_name, config_path=CONFIG_PATH, handel_name='server', level=logging.INFO):
"""
自定义日志对象
:param config_path: 自定义日志配置文件
:param file_name: 自定义日志的日志名称
:param handel_name: 自定义的handler的名称, 如果自己定义了一个handler在config文件里面的话,可以修改此值,否则不要修改
:param level: 自定义的日志等级
"""
self.config_path = config_path
self.file_name = LOG_PATH + file_name
self.handler = handel_name
self.level = level
def setup_logging(self, env_key='LOG_CFG'):
"""
| **@author:** Prathyush SP
| Logging Setup
"""
value = os.getenv(env_key, None)
if value:
self.config_path = value
if os.path.exists(self.config_path):
with open(self.config_path, 'rt', encoding="utf-8") as f:
try:
config = yaml.safe_load(f.read())
logconfig = config['logConfig']
logconfig['handlers']['file']['filename'] = self.file_name
logging.config.dictConfig(logconfig)
except Exception as e:
print(e)
print('Error in Logging Configuration. Using default configs')
logging.basicConfig(level=self.level)
else:
logging.basicConfig(level=self.level)
print('Failed to load configuration file. Using default configs')
def get_loger(self):
self.setup_logging()
loger = logging.getLogger(self.handler)
return loger
if __name__ == '__main__':
logger = MyLog('../config.yaml','tjk.log').get_loger()
logger.info("testssss")
|
tangjikuo/pdfHandlerSite
|
commons/logs.py
|
logs.py
|
py
| 1,948 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13919204702
|
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class PublicHolidays(models.Model):
_name = "public.holidays"
_description = "Public Holidays"
@api.depends("date")
def _compute_weekday(self):
"""Compute weekday based on the date."""
for day in self:
day.weekday = day.date.strftime("%A") if day.date else ""
partner_id = fields.Many2one("res.partner", string="Supplier")
date = fields.Date("Date", required=True)
weekday = fields.Char(compute="_compute_weekday", string="Day")
reason = fields.Char("Reason")
@api.constrains("date")
def _check_holiday(self):
"""
Check for the overlapping record
Raises:
ValidationError: If there's an overlapping record.
"""
for holiday in self:
domain = [
("date", "=", holiday.date),
("partner_id", "=", holiday.partner_id.id),
("id", "!=", holiday.id),
]
if self.search_count(domain):
raise ValidationError(
_("Already, That day has been declared as holiday!")
)
|
onesteinbv/ProjectManagement
|
project_team_leave_management/models/public_holidays.py
|
public_holidays.py
|
py
| 1,197 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21529723220
|
import logging
import sys
import re
import time
import json
def run(ctx):
# Get the ticket data from the context
ticket = ctx.config.get('data').get('ticket')
ticket_srn = ticket.get('srn')
# Create GraphQL client
graphql_client = ctx.graphql_client()
#query ticket endpoint for swimlanes
queryTicketsForSwimlanes = ('''
{
Tickets
(where: { srn: {op:EQ, value:"'''+ ticket_srn + '''"}})
{
items {
swimlaneSRNs
}
}
}
''')
variables = { }
logging.info('Searching for swimlanes of ticket {}'.format(ticket_srn))
r_ticket_swimlanes = graphql_client.query(queryTicketsForSwimlanes, variables)
swimlaneList = r_ticket_swimlanes['Tickets']['items'][0]['swimlaneSRNs']
# get resourceIDs of the Swimlanes of the tickets
querySwimlanes =('''
query Swimlanes ($swimlaneSRNs: [String]){Swimlanes
(where:
{srn: {op:IN_LIST, values:$swimlaneSRNs}}
)
{
items {
resourceId
}}}
''')
#Build the variable to use the query
variables = ('{"swimlaneSRNs": [')
for resourceId in swimlaneList:
variables += '"'+resourceId+'",'
variables = variables[ : -1]
variables += ']}'
logging.info('Searching for resourceIds of swimlanes {}'.format(swimlaneList))
r_swimlanes = graphql_client.query(querySwimlanes, variables)
group_srns = None
# Loop through each of the custom fields and set the values that we need
for customField in ticket.get('customFields'):
if 'value' not in customField.keys():
continue
name = customField['name']
value = customField['value']
if name == 'AD Group':
group_srns = value.strip('][').split(',')
# Built query for groups
group_filter = ""
for srn in group_srns:
#get each individual group and put in the proper format to work in the graphQL below
group_filter += ('{srn: { op:CONTAINS, value: '+srn+'}}')
#GraphQL query for the groups
queryADUsersByGroup = ('''
query ActiveDirectoryUsersInGroup {
Users(
where: {
and: [
{
and: [
{ active: { op: EQ, value: true } }
{ type: { op: EQ, value: ActiveDirectoryUser } }
]
}
{
isMemberOf: {
count: { op: GT, value: 0 }
items: {
and: [
{
or: [ '''
+ group_filter +
''']
}
{}
]
}
}
}
]
}
) {
count
items {
userName
name
}
}
}
''')
# get emails, names from AD groups
variables = { }
logging.info('Searching for users in AD groups: {}'.format(group_srns))
r_AD_query = graphql_client.query(queryADUsersByGroup, variables)
# Query for the current users of the platform
querySonraiUsers = 'query sonraiusers{SonraiUsers {items{ email } } }'
variables = { }
logging.info('Searching for existing Platform users')
r_platform_users = graphql_client.query(querySonraiUsers, variables)
# Query for users on the invite list
querySonraiInvites = 'query sonraiinvites{SonraiInvites {items {email} } }'
variables = { }
logging.info('Searching for users already invited')
r_invited_users = graphql_client.query(querySonraiInvites, variables)
# Only allowing this script to assign "Data Viewer" role
role = "srn:supersonrai::SonraiRole/DataViewer"
#build pendingRolesAssigners from role and swimlanes
pending_role_assigners = '"pendingRoleAssigners":[ '
for sw in r_swimlanes['Swimlanes']['items']:
pending_role_assigners += ( '{"roleSrn": "'+role+'",')
pending_role_assigners += ( '"scope": "'+sw['resourceId']+'"},')
#remove the last comma from the pending role assigners
pending_role_assigners = pending_role_assigners[ : -1]
pending_role_assigners += ']'
# invite user mutation
mutation_invite = '''mutation inviteUser($input: [SonraiInviteCreator!]!) {
CreateSonraiInvites(input: $input) {
items { srn resourceId email dateSent expiry isPending pendingRoleAssignments
{ items { srn role { items { srn name }} scope } } } } }'''
for email in r_AD_query['Users']['items']:
invite_user = True
#check if the userName is in the invite list
for already_invited in r_invited_users['SonraiInvites']['items']:
if email['userName'] == already_invited['email']:
invite_user = False
#check if the userName is in the platform user list
for already_added in r_platform_users['SonraiUsers']['items']:
if email['userName'] == already_added['email']:
invite_user = False
if invite_user:
variables = ( '{ "input" : { ' +
'"email":"' +email['userName']+ '",'
'"name":"' + email['name'] + '",' +
pending_role_assigners +
'} }')
logging.info('inviting users {}'.format(email['userName']))
r_create_invite = graphql_client.query(mutation_invite, variables)
|
sonraisecurity/sonrai-bots
|
remediation/azure/add_sonrai_platform_user/bot.py
|
bot.py
|
py
| 5,348 |
python
|
en
|
code
| 5 |
github-code
|
6
|
32522069165
|
from codigo.funcionesAuxiliares import *
import time
import copy
#Algoritmo principal, realiza las llamadas a las funciones necesarias para obtener la salida esperada.
#Cada una de estas funciones están descritas en el archivo codigo/funcionesAuxiliares.py
def clustering(iteraciones_prueba, datos_entrada, tipo_input, num_cluster, criterio_parada, numero_iteraciones,
circunferencias_entrada):
estadisticas = []
tiempo = time.time()
for i in range(iteraciones_prueba):
# 0. Leer datos
datos = leer_datos(datos_entrada)
similitud_cluster = True
# 1. Inicialización de circunferencias iniciales (centro y radio de cada una)
circunferencias = []
if tipo_input == 1:
inicializar_datos(datos, num_cluster, circunferencias)
else:
circunferencias = copy.deepcopy(circunferencias_entrada)
# 2. Repetir (hasta condición de parada)
if criterio_parada == 0:
iteraciones = criterio_iteraciones(numero_iteraciones, datos, circunferencias)
else:
iteraciones = criterio_similitud(similitud_cluster, circunferencias, datos)
# 3. Asignar cada punto únicamente a su cluster de mayor grado de pertenencia,
asignar_puntos(circunferencias, datos, estadisticas)
datos_salida = mostrar_resultados(datos, estadisticas, iteraciones, iteraciones_prueba, tiempo)
return datos_salida
|
sergioperez1998/ProyectoClusteringIA
|
codigo/clustering.py
|
clustering.py
|
py
| 1,446 |
python
|
es
|
code
| 0 |
github-code
|
6
|
70472477627
|
import numpy as np
import matplotlib.pyplot as plt
# data to plot
n_groups = 5
meso4 = (0.5, 0.65, 0.84, 0.7,0.51)
capsule = (0.84, 0.89, 0.96, 0.95, 0.88)
xception = (0.93, 0.97, 0.98, 0.95, 0.88)
gan = (0.72, 0.73, 0.86, 0.86, 0.72)
spectrum = (0.81, 0.83, 0.98, 0.67, 0.57)
headpose = (0.64, 0.64, 0.64, 0.64, 0.62)
visual = (0.96, 0.96, 0.97, 0.84, 0.69)
# create plot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.8
rects1 = plt.barh(index, meso4, bar_width,
alpha=opacity,
color='b',
label='meso4')
rects2 = plt.barh(index + bar_width, capsule, bar_width,
alpha=opacity,
color='g',
label='capsule')
rects3 = plt.barh(index + 2*bar_width, capsule, bar_width,
alpha=opacity,
color='r',
label='xception')
# plt.xlabel('Person')
# plt.ylabel('Scores')
plt.title('Scores by person')
plt.yticks(index + bar_width, ('meso4', 'capsule', 'xception', 'gan'))
plt.legend()
plt.tight_layout()
plt.show()
|
phuc180155/GraduationThesis
|
dfd_benchmark/plot_image/more_bar_chart.py
|
more_bar_chart.py
|
py
| 981 |
python
|
en
|
code
| 2 |
github-code
|
6
|
20513626833
|
from selenium import webdriver
from time import sleep
# validateText = "Option3"
driver = webdriver.Chrome(executable_path="/home/chaitanya/Documents/software/drivers/chromedriver_linux64/chromedriver")
driver.get("https://rahulshettyacademy.com/AutomationPractice/")
# Positive case
driver.find_element_by_css_selector("input#name").send_keys("Option3")
validateText = driver.find_element_by_xpath("//input[@id='name']").text
driver.find_element_by_xpath("//input[@value='Alert']").click()
alert = driver.switch_to.alert
assert validateText in alert.text
sleep(5)
alert.accept()
# Negative case
driver.find_element_by_id("confirmbtn").click()
# validateText = driver.find_element_by_xpath("//input[@value='Confirm']").text
# assert validateText in alert.text
sleep(5)
alert.dismiss()
|
ChaithanyaRepo/PythonTesting
|
PythonSelenium/alerts.py
|
alerts.py
|
py
| 788 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31355510321
|
import unittest
import pyproj
import simplegrid as sg
class TestNearest(unittest.TestCase):
def test_nearest_sw_corner(self):
geod = pyproj.Geod(ellps='sphere')
mg = sg.gridio.read_mitgridfile('./data/tile005.mitgrid', 270, 90)
i,j,dist = sg.util.nearest(-128.,67.5,mg['XG'],mg['YG'],geod)
self.assertEqual((i,j),(0,0))
self.assertAlmostEqual(dist,1.20941759e-09)
def test_nearest_ne_corner(self):
geod = pyproj.Geod(ellps='sphere')
mg = sg.gridio.read_mitgridfile('./data/tile005.mitgrid', 270, 90)
i,j,dist = sg.util.nearest(-115.,-88.17570,mg['XG'],mg['YG'],geod)
self.assertEqual((i,j),(270,90))
self.assertAlmostEqual(dist,1.14379740)
def test_nearest_center(self):
geod = pyproj.Geod(ellps='sphere')
mg = sg.gridio.read_mitgridfile('./data/tile005.mitgrid', 270, 90)
i,j,dist = sg.util.nearest(-83.,-24.310,mg['XG'],mg['YG'],geod)
self.assertEqual((i,j),(135,45))
self.assertAlmostEqual(dist,6.2719790)
if __name__=='__main__':
unittest.main()
|
nasa/simplegrid
|
simplegrid/tests/test_nearest.py
|
test_nearest.py
|
py
| 1,096 |
python
|
en
|
code
| 5 |
github-code
|
6
|
6545029693
|
from sqlalchemy.orm import Session
from . import models, schemas
def get_items(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Item).offset(skip).limit(limit).all()
def create_objective(db: Session, objective: schemas.ObjectiveBase):
db_objective = models.Objective(**objective.dict())
db.add(db_objective)
db.commit()
db.refresh(db_objective)
return db_objective
def update_objective(db: Session, objective_id: int, objective: schemas.ObjectiveBase):
db_objective = db.query(models.Objective).filter(models.Objective.id == objective_id).first()
db_objective.title = objective.title
db_objective.order = objective.order
db.commit()
db.refresh(db_objective)
return db_objective
def delete_objective(db: Session, objective_id: int):
db_objective = db.query(models.Objective).filter(models.Objective.id == objective_id).delete()
db.commit()
return db_objective
|
yaseralnajjar/Fast-API-Sample
|
my_app/crud.py
|
crud.py
|
py
| 949 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31543780794
|
lines = []
print("\n\tEnter lines: \n")
while True :
string = input("\t")
if string == '':
break
lines.append(string)
print("\n\tThe lines you entered are:\n")
for i in lines:
print('\t' + i)
print()
|
Shobhit0109/programing
|
EveryOther/Practical File/python/P9.py
|
P9.py
|
py
| 232 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1171875743
|
#!/usr/bin/env python
import ROOT
class DefineHistograms:
"""Class to define all histograms to be filled for SMELLIE analysis
Attributes:
t_res (TH1D) : time residual histogram for all events
t_res_beam (TH1D) : time residual histogram for direct beam light
t_res_double (TH1D) : time residual histogram for late pulses
t_res_psup (TH1D) : time residual histogram for PSUP reflections
t_res_avin (TH1D) : time residual histogram for far side AV reflections
t_res_avout (TH1D) : time residual histogram for near side AV reflections
t_res_scatt (TH1D) : time residual histogram for scattered photons
t_res_multi (TH1D) : time residual histogram for multiple optical interactions
angle_time (TH2D) : PMT angle with respect to the fibre direction vs time
residual for all events
angle_time_beam (TH2D) : PMT angle with respect to the fibre direction
vs time residual for direct beam light
angle_time_double (TH2D) : PMT angle with respect to the fibre direction
vs time residual for late pulses
angle_time_scatt (TH2D) : PMT angle with respect to the fibre direction
vs time residual for scattered photons
angle_time_avin (TH2D) : PMT angle with respect to the fibre direction
vs time residual for far side AV reflections
angle_time_avout (TH2D) : PMT angle with respect to the fibre direction
vs time residual for near side AV reflections
angle_time_psup (TH2D) : PMT angle with respect to the fibre direction
vs time residual for PSUP reflections
angle_time_multi (TH2D) : PMT angle with respect to the fibre direction
vs time residual for multiple optical interactions
z_time (TH2D) : PMT z coordinate vs time residual for all events
z_time_beam (TH2D) : PMT z coordinate vs time residual for direct beam light
z_time_double (TH2D) : PMT z coordinate vs time residual for late pulses
z_time_scatt (TH2D) : PMT z coordinate vs time residual for scattered photons
z_time_avin (TH2D) : PMT z coordinate vs time residual for far side AV reflections
z_time_avout (TH2D) : PMT z coordinate vs time residual for near side AV reflecitons
z_time_psup (TH2D) : PMT z coordinate vs time residual for PSUP reflections
z_time_multi (TH2D) : PMT z coordinate vs time residual for multiple optical interactions
theta_phi (TH2D) : PMT theta coordinate vs PMT phi coordinate for all events
theta_phi_beam (TH2D) : PMT theta coordinate vs PMT phi coordinate for direct beam light
theta_phi_double (TH2D) : PMT theta coordinate vs PMT phi coordinate for late pulses
theta_phi_scatt (TH2D) : PMT theta coordinate vs PMT phi coordinate for scattered photons
theta_phi_avin (TH2D) : PMT theta coordinate vs PMT phi coordinate for far side AV reflections
theta_phi_avout (TH2D) : PMT theta coordinate vs PMT phi coordinate for near side AV reflections
theta_phi_psup (TH2D) : PMT theta coordinate vs PMT phi coordinate for PSUP reflections
theta_phi_multi (TH2D) : PMT theta coordinate vs PMT phi coordinate for multiple interactions
h_theta (TH1D) : PMT theta coordinate for all events
h_theta_beam (TH1D) : PMT theta coordinate for direct beam light
h_theta_double (TH1D) : PMT theta coordinate for late pulses
h_theta_scatt (TH1D) : PMT theta coordinate for scattered photons
h_theta_avin (TH1D) : PMT theta coordinate for far side AV reflections
h_theta_avout (TH1D) : PMT theta coordinate for near side AV reflections
h_theta_psup (TH1D) : PMT theta coordinate for PSUP reflections
h_theta_multi (TH1D) : PMT theta coordinate for multiple optical interactions
h_phi (TH1D) : PMT phi coordinate for all events
h_phi_beam (TH1D) : PMT phi coordinate for direct beam light
h_phi_double (TH1D) : PMT phi coordinate for late pulses
h_phi_scatt (TH1D) : PMT phi coordinate for scattered photons
h_phi_avin (TH1D) : PMT phi coordinate for far side AV reflections
h_phi_avout (TH1D) : PMT phi coordinate for near side AV reflections
h_phi_psup (TH1D) : PMT phi coordinate for PSUP reflections
h_phi_multi (TH1D) : PMT phi coordinate for multiple optical interactions
h_nhits (TH1D) : Number of hits for all events
"""
def __init__(self):
self.t_res = ROOT.TH1D("time_residual", "", 600, -20.0, 500.0)
self.t_res_beam = ROOT.TH1D("time_residual_beam", "", 500, -20.0, 300.0)
self.t_res_double = ROOT.TH1D("time_residual_double", "", 500, -20.0, 300.0)
self.t_res_psup = ROOT.TH1D("time_residual_psup", "", 500, -20.0, 300.0)
self.t_res_avin = ROOT.TH1D("time_residual_avin", "", 500, -20.0, 300.0)
self.t_res_avout = ROOT.TH1D("time_residual_avout", "", 500, -20.0, 300.0)
self.t_res_scatt = ROOT.TH1D("time_residual_scatt", "", 500, -20.0, 300.0)
self.t_res_multi = ROOT.TH1D("time_residual_multi", "", 500, -20.0, 300.0)
self.angle_time_beam = ROOT.TH2D("angle_time_beam", "", 150, -20.0, 300.0, 100, 0.0, 200.0)
self.angle_time_double = ROOT.TH2D("angle_time_double", "", 150, -20.0, 300.0, 100, 0.0, 200.0)
self.angle_time = ROOT.TH2D("angle_time", "", 150, -20.0, 300.0, 100, 0.0, 200.0)
self.angle_time_scatt = ROOT.TH2D("angle_time_scatt", "", 150, -20.0, 300.0, 100, 0.0, 200.0)
self.angle_time_avin = ROOT.TH2D("angle_time_avin", "", 150, -20.0, 300.0, 100, 0.0, 200.0)
self.angle_time_avout = ROOT.TH2D("angle_time_avout", "", 150, -20.0, 300.0, 100, 0.0, 200.0)
self.angle_time_psup = ROOT.TH2D("angle_time_psup", "", 150, -20.0, 300.0, 100, 0.0, 200.0)
self.angle_time_multi = ROOT.TH2D("angle_time_multi", "", 150, -20.0, 300.0, 100, 0.0, 200.0)
self.z_time = ROOT.TH2D("z_time", "", 150, -20.0, 300.0, 1000, -10000.0, 10000.0)
self.z_time_beam = ROOT.TH2D("z_time_beam", "", 150, -20.0, 300.0, 1000, -10000.0, 10000.0)
self.z_time_double = ROOT.TH2D("z_time_double", "", 150, -20.0, 300.0, 1000, -10000.0, 10000.0)
self.z_time_scatt = ROOT.TH2D("z_time_scatt", "", 150, -20.0, 300.0, 1000, -10000.0, 10000.0)
self.z_time_avin = ROOT.TH2D("z_time_avin", "", 150, -20.0, 300.0, 1000, -10000.0, 10000.0)
self.z_time_avout = ROOT.TH2D("z_time_avout", "", 150, -20.0, 300.0, 1000, -10000.0, 10000.0)
self.z_time_psup = ROOT.TH2D("z_time_psup", "", 150, -20.0, 300.0, 1000, -10000.0, 10000.0)
self.z_time_multi = ROOT.TH2D("z_time_multi", "", 150, -20.0, 300.0, 1000, -10000.0, 10000.0)
self.theta_phi = ROOT.TH2D("theta_phi", "", 100, -3.5, 3.5, 100, 0, 3.5)
self.theta_phi_beam = ROOT.TH2D("theta_phi_beam", "", 100, -3.5, 3.5, 100, 0, 3.5)
self.theta_phi_double = ROOT.TH2D("theta_phi_double", "", 100, -3.5, 3.5, 100, 0, 3.5)
self.theta_phi_scatt = ROOT.TH2D("theta_phi_scatt", "", 100, -3.5, 3.5, 100, 0, 3.5)
self.theta_phi_avin = ROOT.TH2D("theta_phi_avin", "", 100, -3.5, 3.5, 100, 0, 3.5)
self.theta_phi_avout = ROOT.TH2D("theta_phi_avout", "", 100, -3.5, 3.5, 100, 0, 3.5)
self.theta_phi_psup = ROOT.TH2D("theta_phi_psup", "", 100, -3.5, 3.5, 100, 0, 3.5)
self.theta_phi_multi = ROOT.TH2D("theta_phi_multi", "", 100, -3.5, 3.5, 100, 0, 3.5)
self.h_theta = ROOT.TH1D("theta", "", 100, 0.0, 3.5)
self.h_theta_beam = ROOT.TH1D("theta_beam", "", 100, 0.0, 3.5)
self.h_theta_double = ROOT.TH1D("theta_double", "", 100, 0.0, 3.5)
self.h_theta_scatt = ROOT.TH1D("theta_scatt", "", 100, 0.0, 3.5)
self.h_theta_avin = ROOT.TH1D("theta_avin", "", 100, 0.0, 3.5)
self.h_theta_avout = ROOT.TH1D("theta_avout", "", 100, 0.0, 3.5)
self.h_theta_psup = ROOT.TH1D("theta_psup", "", 100, 0.0, 3.5)
self.h_theta_multi = ROOT.TH1D("theta_multi", "", 100, 0.0, 3.5)
self.h_phi = ROOT.TH1D("phi", "", 100, -3.5, 3.5)
self.h_phi_beam = ROOT.TH1D("phi_beam", "", 100, -3.5, 3.5)
self.h_phi_double = ROOT.TH1D("phi_double", "", 100, -3.5, 3.5)
self.h_phi_scatt = ROOT.TH1D("phi_scatt", "", 100, -3.5, 3.5)
self.h_phi_avin = ROOT.TH1D("phi_avin", "", 100, -3.5, 3.5)
self.h_phi_avout = ROOT.TH1D("phi_avout", "", 100, -3.5, 3.5)
self.h_phi_psup = ROOT.TH1D("phi_psup", "", 100, -3.5, 3.5)
self.h_phi_multi = ROOT.TH1D("phi_multi", "", 100, -3.5, 3.5)
self.h_nhits = ROOT.TH1D("number_of_hits","",200,0.0,1000.0)
|
slangrock/SCUNC
|
define_histograms.py
|
define_histograms.py
|
py
| 8,867 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31280623414
|
__author__ = 'Vincent'
from sqlalchemy import *
from threading import Thread
from threading import Event
from gps import *
from utils.bdd import *
from utils.functions import *
class GpsThread(Thread):
def __init__(self, session_db, event):
Thread.__init__(self)
self.db_session = session_db
self.recording_interval = 0
self.track_session_id = 0
self.can_run = Event()
self.stop_program = Event()
self.event = event
def set_recording_inteval(self, recording_interval):
self.recording_interval = recording_interval
def set_track_session_id(self, track_session_id):
self.track_session_id = track_session_id
def run(self):
session = gps(mode=WATCH_ENABLE)
while not self.stop_program.isSet():
self.can_run.wait()
while not self.stop_program.isSet() and self.can_run.isSet():
# get the gps datas
if session.waiting():
datas = session.next()
if datas['class'] == "TPV":
self.event.set()
# create gps datas and insert it
gps_data = GPSData(latitude=session.fix.latitude, longitude=session.fix.longitude,
speed=session.fix.speed,
date_time=func.strftime('%Y-%m-%d %H:%M:%f', datetime.now()),
session_id=self.track_session_id)
log.log("Insert: " + str(gps_data), log.LEVEL_DEBUG)
self.db_session.add(gps_data)
self.db_session.commit()
self.event.clear()
time.sleep(self.recording_interval)
log.log("GpsThread stopped !!", log.LEVEL_DEBUG)
session = None
def pause(self):
log.log("GpsThread pausing ....", log.LEVEL_DEBUG)
self.can_run.clear()
def resume(self):
log.log("GpsThread resuming ....", log.LEVEL_DEBUG)
self.can_run.set()
def stop(self):
log.log("GpsThread stopping ....", log.LEVEL_DEBUG)
self.stop_program.set()
self.resume()
|
capic/KartTrackerCore
|
threads/gps_thread.py
|
gps_thread.py
|
py
| 2,197 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43627739614
|
class Solution:
# ends0: if we meet 0, we can append 0 to all existing ends0 + ends1
# ends0 = ends0 + ends1
#
# ends1: if we meet 1, we can append 1 to all existing ends0 + ends1
# and also adding "1"
# ends1 = ends0 + ends1 + 1
#
# example:
# num 1 0 1 1
# end0Count 0 1 1 1
# end1Count 1 1 3 5
# end0Arr [] [10] [10] [10]
# end1Arr [1] [1] [11, 101, 1] [101, 111, 1011, 11, 1]
def numberOfUniqueGoodSubsequences(self, binary: str) -> int:
MOD = 10**9 + 7
ends0 = 0
ends1 = 0
has0 = 0
for i, c in enumerate(binary):
if c == '1':
ends1 = (ends0 + ends1 + 1) % MOD
else:
ends0 = (ends0 + ends1) % MOD
has0 = 1
return (ends0 + ends1 + has0) % MOD
def test(self):
test_cases = [
'001',
'11',
'101',
]
for binary in test_cases:
res = self.numberOfUniqueGoodSubsequences(binary)
print('res: %s' % res)
print('-='*30 + '-')
if __name__ == '__main__':
Solution().test()
|
MichaelTQ/LeetcodePythonProject
|
solutions/leetcode_1951_2000/LeetCode1987_NumberOfUniqueGoodSubsequences.py
|
LeetCode1987_NumberOfUniqueGoodSubsequences.py
|
py
| 1,320 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21188009718
|
""" Module for city related models. """
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from data import CONFIG
from models import Base
CONSUMPTION_RATES = CONFIG.get("game.cities.consumption")
class City(Base):
""" Model for tracking city data. """
__tablename__ = "City"
id = Column(Integer, primary_key=True)
name = Column(String, unique=True, nullable=False)
population = Column(Integer, nullable=False)
location_id = Column(
Integer, ForeignKey("Location.id"), unique=True, nullable=False, index=True
)
location = relationship("Location")
resources = relationship("CityResource", cascade="all, delete-orphan")
def __str__(self) -> str:
return f"{self.name}"
def __repr__(self) -> str:
return (
"City("
f"id={self.id}, "
f"name='{self.name}', "
f"location={self.location}, "
f"population={self.population}, "
f"location_id={self.location_id})"
)
@property
def json(self):
""" Get json data to send to client. """
return {
"id": self.id,
"position": self.location.coordinate.json,
"name": self.name,
"population": self.population,
"resources": {slot.resource.name: slot for slot in self.resources},
}
class CityResource(Base):
""" Model for tracking which resources a city has. """
__tablename__ = "CityResource"
id = Column(Integer, primary_key=True)
amount = Column(Integer, nullable=False)
city_id = Column(Integer, ForeignKey("City.id"), nullable=False, index=True)
city = relationship("City")
resource_id = Column(
Integer, ForeignKey("ResourceType.id"), nullable=False, index=True
)
resource = relationship("ResourceType", uselist=False)
def __str__(self) -> str:
return f"{self.city} has {self.amount} {self.resource}"
def __repr__(self) -> str:
return (
"CityResource("
f"id={self.id}, "
f"amount={self.amount}, "
f"city_id={self.city_id}, "
f"resource_id={self.resource_id})"
)
@property
def price(self):
""" Return price per unit of held resource. """
saturation = max(self.amount, self.city.population / 2) / (
self.city.population * CONSUMPTION_RATES[self.resource.name]
)
return self.resource.base_cost / saturation
@property
def amount_in_market(self):
""" Return amount of resources available in market for players to purchase. """
reserved = self.city.population * 2
if reserved > self.amount:
return 0
return self.amount - reserved
@property
def json(self):
""" Send json data for client. """
return {"amount": self.amount, "price": self.price}
|
Jordan-Cottle/Game-Design-Capstone
|
StarcorpServer/starcorp/models/city.py
|
city.py
|
py
| 2,949 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41244932240
|
'''Crie um programa que tenha uma tupla com várias palavras
(não usar acentos). Depois disso, você deve mostrar, para cada palavra,
quais são as vogais'''
tupla = ('andre', 'camila', 'davi', 'isabella')
for palavra in tupla:
print(f'\nNa palavra {palavra} temos as vogais: ', end='')
for letra in palavra:
if letra.lower() in 'aeiou':
print(letra, end=' ')
'''
Primeiro, definimos a tupla de palavras: tupla = ('andre', 'camila', 'davi', 'isabella').
Em seguida, iniciamos um loop for para percorrer cada palavra na tupla: for palavra in tupla:.
Para cada palavra na tupla, imprimimos a mensagem inicial com a palavra atual: print(f'\nNa palavra {palavra} temos as vogais: ', end=''). O uso de \n antes da mensagem imprime uma nova linha para cada palavra.
Agora, iniciamos um segundo loop for para percorrer cada letra na palavra atual: for letra in palavra:.
Para cada letra na palavra, verificamos se ela é uma vogal. Usamos letra.lower() para converter a letra para minúscula e garantir uma comparação case-insensitive. A condição if letra.lower() in 'aeiou': verifica se a letra está presente na string de vogais.
Se a letra for uma vogal, a imprimimos seguida de um espaço em branco: print(letra, end=' '). O uso de end=' ' substitui a quebra de linha padrão por um espaço em branco, permitindo que as vogais sejam impressas lado a lado.
Após o término do segundo loop for, o fluxo do programa volta para o primeiro loop for, avançando para a próxima palavra na tupla.
Esse processo se repete para cada palavra na tupla até que todas as palavras tenham sido percorridas.'''
|
andrematos90/Python
|
CursoEmVideo/Módulo 3/Desafio 077.py
|
Desafio 077.py
|
py
| 1,633 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
17694953325
|
from Transformer import Transformer
from MultiHeadAttention import MultiHeadAttention
from tqdm import tqdm
from Metrics import grad
from Metrics import loss_function
from Metrics import loss_function2
from Metrics import accuracy_function
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from einops import rearrange
import tensorflow as tf
import os
import numpy as np
import joblib
import pandas as pd
import logging
import sqlite3 as sql
import time
num_layers = int(os.environ["TRANSFORMER_LAYERS"])
d_model = int(os.environ["W2V_EMBED_SIZE"])
dff = int(os.environ["TRANSFORMER_DFF"])
num_heads = int(os.environ["TRANSFORMER_HEADS"])
batch_size = int(os.environ["BATCH_SIZE"])
training = bool(int(os.environ["TRAINING"]))
epochs = int(os.environ["EPOCHS"])
max_seq_len = 200
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model: int, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
learning_rate = CustomSchedule(d_model)
optimus_prime = None
adm_optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
epoch_loss = tf.keras.metrics.Mean(name='train_loss')
epoch_accuracy = tf.keras.metrics.Mean(name='train_accuracy')
train_step_signature = [
tf.TensorSpec(shape=([batch_size, None]), dtype=tf.float32),
tf.TensorSpec(shape=([batch_size]), dtype=tf.float32)
]
add_att_layer = tf.keras.layers.AdditiveAttention()
softmax = tf.keras.layers.Softmax()
lr = LogisticRegression()
s1 = tf.keras.Sequential([
tf.keras.layers.Dense(512),
tf.keras.layers.Dense(4),
tf.keras.layers.Softmax()
])
@tf.function(input_signature=train_step_signature)
def train_step(log_batch: tf.Tensor, labels: tf.Tensor):
transformer_input = tf.tuple([
log_batch, # <tf.Tensor: shape=(batch_size, max_seq_len), dtype=float32>
labels # <tf.Tensor: shape=(batch_size, num_classes), dtype=float32>
])
with tf.GradientTape() as tape:
Rs, _ = optimus_prime.call(transformer_input)
# a_s = add_att_layer([Rs, Rs])
# y = softmax(a_s * Rs)
y = Rs
loss = tf.py_function(loss_function, [labels, y], tf.float32)
pred = s1(y)
labels = tf.cast(labels, tf.int64)
# Optimize the model
grads = tape.gradient(loss, optimus_prime.trainable_variables)
adm_optimizer.apply_gradients(zip(grads, optimus_prime.trainable_variables))
# Tracking Progress
epoch_loss.update_state(loss) # Adding Batch Loss
epoch_accuracy.update_state(accuracy_function(labels, pred))
logging.basicConfig(format='%(asctime)s %(levelname)s | %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def database_builder(path: str) -> pd.DataFrame():
logger.info('Building DataFrame ...')
(_, _, files) = next(os.walk(path))
sql_query = 'SELECT * FROM logs'
data = []
for f in files:
if '.db' in f:
conn = create_connection(path + f)
d = pd.read_sql_query(sql_query, conn)
data.append(d)
logger.info('...complete!')
return pd.concat(data)
def create_connection(path: str) -> sql.Connection:
"""
Creates a database connection
:param path: str
path to database object
:return sql.Connection
a connection to the database
"""
try:
conn = sql.connect(path)
logger.info('Connected to database ' + path)
return conn
except sql.Error as e:
logger.warning(e)
def get_max_length_(dataset: pd.DataFrame, buffer_size: float) -> int:
return int((1 + buffer_size) * dataset['log'].str.len().max())
def process_batch(dataset: pd.DataFrame,
vocabulary: dict,
max_seq_len: int,
idx: int,
labels: dict) -> tuple:
logs = np.zeros((batch_size, max_seq_len))
y_true = np.empty((batch_size,))
start_window = idx * batch_size
end_window = (idx + 1) * batch_size
for log_idx, log in enumerate(dataset['log'][start_window:end_window]):
for seq_idx, word in enumerate(log.split()):
if seq_idx >= max_seq_len:
break
logs[log_idx, seq_idx] = vocabulary[word] if word in vocabulary.keys() else 0
y_true[log_idx] = labels[dataset['label'][log_idx]]
return tf.convert_to_tensor(logs, dtype=tf.float32), tf.convert_to_tensor(y_true, dtype=tf.float32)
if __name__ == '__main__':
logging.info('Loading assets')
word_embedding_matrix = joblib.load("/results/w2v_weights.joblib")
vocabulary = joblib.load("/results/vocab_dict.joblib")
dataset = database_builder('/database/')
dataset = dataset.sample(frac=1).reset_index(drop=True)
max_seq_len = 200 # get_max_length_(dataset, 0.0)
vocab_size = len(vocabulary)
logging.info('Processing logs for training')
label_unique = dataset['label'].unique()
lbp = LabelEncoder().fit(label_unique)
binary_labels = lbp.transform(label_unique)
log_labels = {}
for idx, label in enumerate(label_unique):
log_labels.update({
label: binary_labels[idx]
})
n_logs = len(dataset.index)
n_iter = n_logs // batch_size
remainder = n_logs % batch_size
attns = []
optimus_prime = Transformer(
num_layers,
d_model,
num_heads,
dff,
vocab_size,
word_embedding_matrix,
max_seq_len,
rate=0.1)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
checkpoint_path = "./checkpoints/train"
checkpoint = tf.train.Checkpoint(step=tf.Variable(1), transformer=optimus_prime, optimizer=optimizer)
checkpoint_manager = tf.train.CheckpointManager(checkpoint, checkpoint_path, max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
if checkpoint_manager.latest_checkpoint:
checkpoint.restore(checkpoint_manager.latest_checkpoint)
print('Latest checkpoint restored!!')
for epoch in tqdm(range(epochs)):
start = time.time()
epoch_loss.reset_states()
epoch_accuracy.reset_states()
for idx in range(n_iter):
log_batch, labels = process_batch(dataset, vocabulary, max_seq_len, idx, log_labels)
# Returns Eager Tensor for Predictions
train_step(log_batch, labels)
checkpoint.step.assign_add(1)
if int(checkpoint.step) % 10 == 0:
save_path = checkpoint_manager.save()
print(f'Saved checkpoint for step {int(checkpoint.step)}: {save_path}')
print(f'Loss {epoch_loss.result():.3f}, Accuracy: {epoch_accuracy.result():.3%}')
print("Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}".format(epoch,
epoch_loss.result(),
epoch_accuracy.result()))
print(f'Time taken for 1 epoch: {time.time() - start:.2f} secs\n')
|
whytheevanssoftware/log-analyzer
|
training/__main__.py
|
__main__.py
|
py
| 7,572 |
python
|
en
|
code
| 2 |
github-code
|
6
|
5504018518
|
# https://www.hackerrank.com/challenges/zipped/problem
N, X = map(int, input().split())
data = []
for _ in range(X):
subject_marks = list(map(float, input().split()))
data.append(subject_marks)
tuples = zip(*data)
for element in tuples:
print(sum(element) / X)
|
Nikit-370/HackerRank-Solution
|
Python/zipped.py
|
zipped.py
|
py
| 275 |
python
|
en
|
code
| 10 |
github-code
|
6
|
34029348872
|
import matplotlib.pyplot as plt
import numpy as np
"""
Plot of success rate for a single NN with different control horizons
"""
# testing_result = [47.74, 52.76, 61.81, 63.82, 50.75]
# baseline_result = [44.72, 45.73, 51.25, 52.26, 46.73]
testing_result = [48, 53, 62, 64, 51]
baseline_result = [45, 46, 51, 52, 47]
# labels = ['1.5', '1', '0.5', '0.25', '0.15']
labels = ['0.67', '1', '2', '4', '6.67']
# x = np.arange(len(labels))
x_coordinate = np.asarray([1, 2, 3, 4, 5])
fig, ax = plt.subplots()
ax.set_ylabel('Success rate (%)', fontsize=12)
ax.set_xlabel('Replanning frequency (Hz)', fontsize=12)
ax.set_title('Success rate with different replanning frequencies', fontsize=12)
ax.set_xticks(x_coordinate)
ax.set_xticklabels(labels)
ax.set_yticks(np.arange(45, 65, 5))
plt.plot(x_coordinate, testing_result, 'o-', label='WayPtNav-ReachabilityCost')
plt.plot(x_coordinate, baseline_result, 's-', label='WayPtNav-HeuristicsCost')
# for i, value in enumerate(testing_result):
# x = x_coordinate[i]
# y = testing_result[i]
# if i == 0:
# scatter = ax.scatter(x, y, marker='x', color='red', label='Ours')
# else:
# scatter = ax.scatter(x, y, marker='x', color='red')
# ax.text(x + 0.05, y + 0.05, value, fontsize=9)
#
#
# for i, value in enumerate(baseline_result):
# x = x_coordinate[i]
# y = baseline_result[i]
# if i == 0:
# ax.scatter(x, y, marker='o', color='blue', label='Baseline')
# else:
# ax.scatter(x, y, marker='o', color='blue')
# ax.text(x + 0.05, y + 0.05, value, fontsize=9)
ax.legend(loc='lower right')
ax.set_aspect(aspect=0.2)
plt.show()
plot_path = '/home/anjianl/Desktop/project/WayPtNav_paper/plots/ctrlhorizon_success_rate.png'
fig.savefig(plot_path)
|
SFU-MARS/WayPtNav-reachability
|
executables/Plots_for_papers/Anjian/plot_ctrlhorizon_successful_rate.py
|
plot_ctrlhorizon_successful_rate.py
|
py
| 1,763 |
python
|
en
|
code
| 3 |
github-code
|
6
|
42739926570
|
# -*- coding = utf-8 -*-
"""
the :mod `dataset` module provides the dataset class
and other subclasses which are used for managing datasets
"""
import pandas as pd
import numpy as np
class Dataset:
"""base class for loading datasets
Note that you should never instantiate the class :class: `Dataset` class directly,
and just use the below available methods for loading datasets.
"""
def __init__(self,cfg):
self._cfg = cfg
self.movies = pd.read_csv(self._cfg.DATASET.MOVIE_SET)
self.ratings_train = pd.read_csv(self._cfg.DATASET.TRAIN_SET)
self.ratings_test = pd.read_csv(self._cfg.DATASET.TEST_SET)
self.user_list_train = self.ratings_train['userId'].drop_duplicates().values.tolist()
self.user_list_test = self.ratings_test['userId'].drop_duplicates().values.tolist()
self.user_list = self.user_list_train + self.user_list_test
self.user_list = list(set(self.user_list))
self.movie_list = self.movies['movieId'].drop_duplicates().values.tolist()
self.genre_list = self.movies['genres'].values.tolist()
self.movie_type_list = self.get_movie_type_list(self.genre_list)
self.user_map_train, self.user_map_reverse_train = self.get_list_index_map(self.user_list_train)
self.user_map_test, self.user_map_reverse_test = self.get_list_index_map(self.user_list_test)
self.type_map, self.type_map_reverse = self.get_list_index_map(self.movie_type_list)
self.user_map, self.user_map_reverse = self.get_list_index_map(self.user_list)
self.movie_map, self.movie_map_reverse = self.get_list_index_map(self.movie_list)
self.movie_type_features = self.get_movie_type_features(self.movies)
def get_movie_type_features(self,movies):
"""
get the movie type features, tf-idf matrix
"""
movie_type_features = np.zeros((len(self.movie_list),len(self.movie_type_list)))
for row in self.movies.itertuples(index=True,name="Pandas"):
movie_id = self.movie_map[getattr(row,'movieId')]
movie_types = getattr(row,'genres').split('|')
for movie_type in movie_types:
if movie_type != '(no genres listed)':
movie_type_index = self.type_map[movie_type]
movie_type_features[movie_id,movie_type_index] = 1
return movie_type_features # tfidf matri
def get_list_index_map(self,list):
"""
get the index map of a list
"""
index_map = {}
index_map_reverse = {}
for i,item in enumerate(list):
index_map[item] = i
index_map_reverse[i] = item
return index_map, index_map_reverse
def get_movie_type_list(self,genres_list):
"""
get the movie type list
"""
movie_type_list = []
for item in genres_list:
movie_types = item.split('|')
for movie_type in movie_types:
if movie_type not in movie_type_list and movie_type != '(no genres listed)':
movie_type_list.append(movie_type)
return movie_type_list
def get_trainset(self):
"""
get the trainset
@return: (trainset,user_map,movie_map,type_map)
"""
return (self.ratings_train,self.user_map,self.movie_map,self.movie_type_features)
def get_testset(self):
"""
get the testset
@return: (testset,user_map,movie_map,type_map)
"""
return (self.ratings_test,self.user_map,self.movie_map,self.movie_type_features)
def get_movie_name_by_movie_id(self,movie_id):
"""
get the movie name by movie id
"""
return self.movies[self.movies['movieId'] == movie_id]['title'].values[0]
if __name__ == '__main__':
from config import cfg
dataset = Dataset(cfg)
# print(dataset.user_list)
# print(dataset.movie_list)
# print(dataset.movie_type_list)
print(dataset.type_map)
print(dataset.type_map_reverse)
# print(dataset.user_map)
# print(dataset.user_map_reverse)
# print(dataset.movie_map)
# print(dataset.movie_map_reverse)
# genres type list
print(dataset.type_map.keys())
# ['Adventure', 'Animation', 'Children', 'Comedy', 'Fantasy', 'Romance', 'Drama', 'Action', 'Crime', 'Thriller', 'Horror', 'Mystery', 'Sci-Fi', 'Documentary', 'IMAX', 'War', 'Musical', 'Western', 'Film-Noir']
|
Jack-Lio/RecommenderSystem
|
dataset.py
|
dataset.py
|
py
| 4,487 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31101340357
|
from django.urls import path
from . import views
app_name = "public"
urlpatterns = [
path("", views.index, name="index"),
path("about", views.about, name="about"),
path("upload_dataset", views.upload_dataset, name="upload_dataset"),
path("train_model", views.train_model, name="train_model"),
path("test_model", views.test_model, name="test_model"),
]
|
pdagrawal/ml_playground
|
ml_playground/apps/public/urls.py
|
urls.py
|
py
| 374 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11735637618
|
"""Add md5 and sha256 columns to File
Revision ID: d128b94f9a63
Revises: 59d249ebf873
Create Date: 2021-10-24 14:54:30.381535
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd128b94f9a63'
down_revision = '59d249ebf873'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('files', sa.Column('md5', sa.LargeBinary(length=16), nullable=True))
op.add_column('files', sa.Column('sha256', sa.LargeBinary(length=32), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('files', 'sha256')
op.drop_column('files', 'md5')
# ### end Alembic commands ###
|
retroherna/rhinventory
|
alembic/versions/d128b94f9a63_add_md5_and_sha256_columns_to_file.py
|
d128b94f9a63_add_md5_and_sha256_columns_to_file.py
|
py
| 806 |
python
|
en
|
code
| 1 |
github-code
|
6
|
38961246021
|
#it is used to check the quatitity
import re
p1="a+"#occurance of a ,+ isused tocheck the occurance
p2="a*"#it will check for all a occurance
p3="a?"#it all position of a
p4="a{2}"#it will check only 2 occurances of a
p5="a{2,3}"#it will check min 2 num of a and max 3 num of a
p6="[Kl"
p=re.finditer(p3,"KL38C2280")
count=0
for i in p:
count+=1
print(i.start())# to find the position of mtching
print(i.group())# which object is matching
print("Count",count)
|
Aswin2289/LuminarPython
|
LuminarPythonPrograms/RegularExper/Quantifiers.py
|
Quantifiers.py
|
py
| 472 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8717939176
|
from threading import Thread
from time import sleep
from .. import Command, BaseTask
from ... import app_manager
class MeasureWeight(BaseTask):
"""
Measures weight and saves it to database.
Extra parameters:
- 'device_id': str - ID of target device,
- 'sleep_period': float - measurement period
"""
def __init__(self, config):
self.__dict__.update(config)
required = ['sleep_period', 'device_id', 'task_id']
self.validate_attributes(required, type(self).__name__)
self.device = app_manager.deviceManager.get_device(self.device_id)
super(MeasureWeight, self).__init__(config)
def start(self):
"""
Start the task.
"""
t = Thread(target=self._run)
t.start()
def _run(self):
while self.is_active:
cmd = Command(self.device_id, "1", [], self.task_id)
self.device.post_command(cmd)
cmd.await_cmd()
cmd.save_data_to_db()
sleep(int(self.sleep_period))
def end(self):
"""
End the task.
"""
self.is_active = False
|
SmartBioTech/DeviceControl
|
app/workspace/tasks/SICS.py
|
SICS.py
|
py
| 1,134 |
python
|
en
|
code
| 2 |
github-code
|
6
|
75261205308
|
import torch
from tqdm import tqdm
def evaluate(model, loader, device):
"""
Evaluation function to calculate loss and accuracy on Val/test dataset
Args:
model (nn.Module): model to be evaluated on the give dataset
loader (DataLoader): Validation/Test dataloader to evaluate the model on.
device (torch.device): The device (CPU/GPU) to perform the evalutation on.
"""
model.to(device)
model.eval()
correct = 0
total = 0
running_loss = 0.0
criterion = torch.nn.CrossEntropyLoss()
with torch.no_grad():
for images, labels in loader:
images, labels = images.to(device), labels.to(device)
# Forward pass
outputs = model(images)
predicted = torch.argmax(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
running_loss += loss.item()
accuracy = 100 * correct / total
avg_loss = running_loss / len(loader)
model.train()
return accuracy, avg_loss, correct
|
iMvijay23/Dinov2SSLImageCL
|
evaluate.py
|
evaluate.py
|
py
| 1,112 |
python
|
en
|
code
| 7 |
github-code
|
6
|
42206728289
|
import torch
from torch import nn
from torch.autograd import Variable
import torch.functional as F
from torch.optim import Adam
from torchvision.models import resnet50
# self
from vis import Vis
import vars
from data_loader import get_data_loader
from test import test
def train(epoch, model, train_loader, criterion, optimizer, vis):
model.train()
for i, (data, label) in enumerate(train_loader):
data = Variable(data).cuda() # gpu
label = Variable(label).cuda() # gpu
optimizer.zero_grad()
output = model(data)
loss = criterion(output, label)
loss.backward()
optimizer.step()
if i % 30 == 0:
status = 'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch + 1, i * len(data), len(train_loader.dataset),
100. * i / len(train_loader), loss.item())
print(status)
vis.update_train(x=torch.Tensor([epoch + i/len(train_loader)]),
y=torch.Tensor([loss.item()]), status=status)
if __name__ == '__main__':
# load data and init
train_loader, test_loader = get_data_loader()
vis = Vis('bba_race resnet')
# model
model = resnet50()
input_size = model.fc.in_features
model.fc = nn.Linear(input_size, 20) # output 20 category
# load exist
# checkpoints = vars.checkpoint_path + 'res_net50_0.14.pt'
checkpoints = ''
if checkpoints:
model.load_state_dict(torch.load(checkpoints)) # load exist model
model.cuda() # gpu
# criterion, optimizer
criterion = nn.CrossEntropyLoss().cuda() # gpu
optimizer = Adam(model.parameters(), lr=0.01)
epoches = 1
for epoch in range(epoches):
train(epoch, model, train_loader, criterion, optimizer, vis)
# save the model
torch.save(model.state_dict(), vars.checkpoint_path + 'res_net50_{}.pt'.format(epoch))
test(epoch, model, test_loader, criterion, vis)
|
DragonChen-TW/2018_bba_race
|
model/train.py
|
train.py
|
py
| 1,970 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.