seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
25893346960
|
import pygame
from pygame.draw import *
from random import randint
pygame.init()
FPS = 60 #число новых кругов в секунду
number_of_balls=4 #число обычных шаров
points=0 #счетчик очков
base_points_multiplier=100 #базовый множитель начисления очков
x_res,y_res=1920/1.25, 1080/1.25 #разрешение
res=[x_res,y_res]
sp_mult=0.01 #множитель скорости
screen = pygame.display.set_mode((x_res,y_res))
'''создаем массив цветов шарика в формате pygame.color'''
RED = (255, 0, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
GREEN = (0, 255, 0)
MAGENTA = (255, 0, 255)
CYAN = (0, 255, 255)
BLACK = (0, 0, 0)
COLORS = [RED, BLUE, YELLOW, GREEN, MAGENTA, CYAN]
class ball:
'''хранит координаты шаров, пересчитывает их при переходе на следующий кадр, pos=[x,y] и spd=[Vx,Vy], где V - изменение координаты за кадр'''
def __init__(self):
self.r=randint(y_res//100,y_res//8) #радиус
self.pos=[randint(self.r, int(x_res-self.r)),randint(self.r, int(y_res-self.r))] #координата
self.spd=[randint(-int(x_res*sp_mult), int(x_res*sp_mult)),randint(-int(y_res*sp_mult), int(y_res*sp_mult))] # скррости
self.color=COLORS[randint(0,len(COLORS)-1)] # цвет
def new_frame(self): #рисуем новый кадр
pygame.draw.circle(screen, self.color,self.pos,self.r)
for i in range(2):
self.pos[i]+=self.spd[i]
if(self.pos[i]<self.r or self.pos[i]>res[i]-self.r):
self.spd[i]=-self.spd[i]
self.pos[i]+=self.spd[i]
def click_check(event, balls,add):
'''увеличивает счетчик попаданий и выводит его в случае попадания точки в круг, возвращает счетчик
на вход принимает event щелчка, массив кругов, множитель начисления быллов, начисляемые баллы обартно пропорциональны радиусу окружности'''
global points
for i in range(len(balls)):
x=balls[i].pos[0]
y=balls[i].pos[1]
if(((event.pos[0]-x)**2+(event.pos[1]-y)**2)<balls[i].r**2):
points += add/balls[i].r
def create_balls(n):
'''создает шары, n - количество'''
balls=[]
for i in range(n):
balls.append(ball())
return balls
def draw_balls(balls):
'''рисует шары из массива'''
for i in range(len(balls)):
balls[i].new_frame()
def gravitation(balls,a):
'''меняет скорость шаров из переданного массива, а - ускорение'''
for i in range(len(balls)):
balls[i].spd[1]+=a
'''создаем шары разных видов'''
balls=create_balls(number_of_balls)
gravity_balls=create_balls(int(number_of_balls/2))
pygame.display.update()
clock = pygame.time.Clock()
finished = False
while not finished:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
finished = True
elif event.type == pygame.MOUSEBUTTONDOWN:
click_check(event,balls,base_points_multiplier) #проверяем попадание по шару
click_check(event,gravity_balls,2*base_points_multiplier)
print(points)
draw_balls(balls) #рисуем шары
gravitation(gravity_balls,2) #применяем гравитацию
draw_balls(gravity_balls)
pygame.display.update()
screen.fill(BLACK)
pygame.quit()
|
furs-aka-beast/mipt_inf
|
1_sem/Lab8/balls.py
|
balls.py
|
py
| 3,786 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
11307160967
|
from django.conf.urls import url
from .views import Discount_view, Category_view, Product_view, Product_detail_view, Category_detail_view
#These two added for viewsets
# from django.conf.urls import include
from rest_framework.routers import DefaultRouter
from django.urls import path, include
from django.contrib import admin
from . import views
router = DefaultRouter()
urlpatterns = [
path('api/discounts', Discount_view, name='discount'),
path('api/categories', Category_view, name='category api'),
path('api/categories/<int:category_id>', Category_detail_view, name='category detail'),
path('api/products', Product_view, name='product'),
path('api/products/<int:product_id>', Product_detail_view, name='product detail api'),
path('', views.homepage, name='home'),
path('categories', views.categories, name='category'),
path('categories/<int:category_id>/products', views.products, name='products'),
path('categories/<int:category_id>/products/<int:product_id>', views.product_detail, name='product detail'),
path('products/register', views.product_regi, name="product register"),
path('cart/', views.cart, name='cart'),
path('about/', views.about, name='about'),
path('support/', views.support, name='support'),
path('signin/', views.signin, name='signin'),
path('register/', views.register, name='register'),
path('signout/', views.signout, name='signout'),
path('account/', views.account, name='account'),
path('payment/', views.payment, name='payment'),
path('shipping/', views.shipping, name='shipping'),
path('application/', views.application, name='application'),
path('order/', views.order, name='order')
]
|
wjbarng/INFO441-Wholesale
|
wholesale/urls.py
|
urls.py
|
py
| 1,708 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22278226365
|
#python program to print number pattern using while loop
n = int(input("Enter number of rows: "))
k = 1
i = 1
while i<=n:
j = 1
while j<+i:
print(k,end= "")
j+=1
k+=1
print()
i+=1
|
luckyprasu22/python-program
|
pattern1-15.py
|
pattern1-15.py
|
py
| 262 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16199644126
|
############################################################################
## Django ORM Standalone Python Template
############################################################################
# Turn off bytecode generation
from datetime import time
import sys
sys.dont_write_bytecode = True
# Django specific settings
import os
project_path = "../"
project_root = "../../"
os.environ.get("DJANGO_SETTINGS_MODULE", "ctimanager.settings")
sys.path.append(project_path)
os.chdir(project_path)
import django
django.setup()
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Import your models for use in your script
from content.models import *
############################################################################
## START OF APPLICATION
############################################################################
import requests
import json
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.utils import ChromeType
from urllib.parse import urlparse, parse_qs
from PIL import Image, ImageChops, ImageDraw2, ImageFont
from io import BytesIO
import boto3
SPACES_APIKEY = os.environ.get('SPACES_APIKEY')
SPACES_APISECRET = os.environ.get('SPACES_APISECRET')
"""
class NewsSources(models.Model):
name = models.CharField(max_length=100, blank=True)
title = models.TextField(blank=False)
domain = models.CharField(max_length=100, blank=False)
rss_url = models.URLField()
article_selector = models.CharField(max_length=255)
region = models.CharField(max_length=5, blank=True)
def __str__(self):
return self.name
class News(models.Model):
cryptopanic_id = models.IntegerField(blank=True)
cryptopanic_url = models.URLField(blank=True)
type = models.CharField(max_length=20, blank=False)
domain = models.CharField(max_length=100, blank=True, null=True)
projects = models.ManyToManyField(Project)
# Note! The JSON1 module needs to be enables in SQL, if you get an error this might be the problem.
votes = models.JSONField(blank=True, null=True)
article_url = models.URLField(blank=False)
source = models.ForeignKey(NewsSources, on_delete=models.SET_NULL, null=True)
publish_data = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
"""
def resize_and_store(news_id, image_url):
try:
if image_url:
file_path = f"{BASE_PATH}/static/content/media/news_image_{news_id}.png"
proxies = {'http': "socks5://84.107.32.223:1080", 'https': "socks5://84.107.32.223:1080"}
headers = {
"Connection": "keep-alive",
"DNT": "1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Site": "none",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Dest": "document",
"Referer": "https://www.google.com/",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8" }
response = requests.get(image_url, proxies=proxies, headers=headers)
if response.status_code == 200:
img = Image.open(BytesIO(response.content))
if img.height < 629:
myheight = 629
hpercent = (myheight/float(img.size[1]))
wsize = int((float(img.size[0])*float(hpercent)))
img = img.resize((wsize,myheight), resample=Image.ANTIALIAS)
mywidth = 1200
wpercent = (mywidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((mywidth,hsize), resample=Image.ANTIALIAS)
new_width = 1200
new_height = 629
width, height = img.size # Get dimensions
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
# Crop the center of the image
im = img.crop((left, top, right, bottom))
im.save(file_path, format="png")
print(f"saving: {file_path}")
if upload_image_to_s3(file_path,f"news_image_{news_id}.png"):
print("image uploaded to s3")
else:
print("image not uploaded to s3")
else:
print(f"Response code {response.status_code} message: {response.text}")
except Exception as e:
print(f"Error reading image with error: {e}")
def check_news_source(source):
try:
if NewsSources.objects.filter(title=source['title']).exists():
return True
else:
try:
title = source['title']
except Exception as e:
print("No title found for source {source}")
try:
domain = source['domain']
except Exception as e:
print("No domain found for source {source}")
try:
region = source['region']
except Exception as e:
print("No region found for source {source}")
try:
path = source['path']
except Exception as e:
print("No path found for source {source}")
NewsSources.objects.create(domain=domain, region=region, title=title, path=path)
return True
except Exception as e:
print(f"Trouble checking and adding the news source with error {e}")
return False
def extract_video_id(url):
query = urlparse(url)
if query.hostname == 'youtu.be': return query.path[1:]
if query.hostname in {'www.youtube.com', 'youtube.com'}:
if query.path == '/watch': return parse_qs(query.query)['v'][0]
if query.path[:7] == '/watch/': return query.path.split('/')[1]
if query.path[:7] == '/embed/': return query.path.split('/')[2]
if query.path[:3] == '/v/': return query.path.split('/')[2]
# below is optional for playlists
if query.path[:9] == '/playlist': return parse_qs(query.query)['list'][0]
# returns None for invalid YouTube url
return None
def get_real_url(cryptopanic_url,source_domain):
print(f"getting real url for cryptopanic_url: {cryptopanic_url}")
ua = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36'
os.environ['WDM_LOG_LEVEL'] = '0'
os.environ['WDM_PRINT_FIRST_LINE'] = 'False'
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument('ignore-certificate-errors')
chrome_options.add_argument('--proxy-server=socks5://84.107.32.223:1080')
chrome_options.add_argument(f"--user-agent={ua}")
try:
if sys.platform == "darwin":
browser = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)
else:
browser = webdriver.Chrome(executable_path="chromedriver", options=chrome_options)
browser.get(cryptopanic_url)
time.sleep(3)
url = browser.find_element(By.XPATH, '//*[@id="detail_pane"]/div[1]/h1/a[2]').get_attribute('href')
print(f"article url: {url}")
browser.quit()
return url
except Exception as e:
print(f"error: {e}")
def get_article_image(article_url):
if 'youtube.com' in article_url or 'youtu.be' in article_url:
video_id = extract_video_id(article_url)
url = f"https://metafetcher.gurustacks.com/video/youtube/{video_id}"
response = requests.get(url)
if response.status_code==200:
if 'standard' in response.json()['images']:
return response.json()['images']['standard']['url']
else:
return None
elif 'twitter.com' in article_url:
url = f"https://metafetcher.gurustacks.com/website/{article_url}"
response = requests.get(url)
if response.status_code==200:
if 'icon_192x192' in response.json()['images']:
return response.json()['images']['icon_192x192']
else:
return None
else:
url = f"https://metafetcher.gurustacks.com/website/{article_url}"
response = requests.get(url)
if response.status_code==200:
if 'image' in response.json()['images']:
return response.json()['images']['image']
else:
return None
def upload_image_to_s3(image_url, image_name):
try:
session = boto3.session.Session()
client = session.client('s3', region_name='ams3',
endpoint_url='https://ams3.digitaloceanspaces.com',
aws_access_key_id=SPACES_APIKEY,
aws_secret_access_key=SPACES_APISECRET)
client.upload_file(image_url, 'cryptapi-news-images', image_name, ExtraArgs={'ACL':'public-read'})
return True
except Exception as e:
print(f"Error uploading file: {e}")
return False
for num in range(1,5):
url = f"https://cryptopanic.com/api/v1/posts/?auth_token={os.environ.get('CRYPTO_PANIC_API_KEY')}&page={num}"
try:
response = requests.get(url)
if response.status_code==200:
for item in response.json()['results']:
if check_news_source(item['source']):
cryptopanic_id = item['id']
if item['source']['domain'] == 'twitter.com':
type = "twitter"
else:
type = item['kind']
title = item['title']
published_at = item['published_at']
cryptopanic_url = item['url']
votes = item['votes']
domain = item['domain']
try:
source_obj = NewsSources.objects.get(title=item['source']['title'])
except Exception as e:
print(f"News Source Not Found with error {e} for {item}")
if News.objects.filter(cryptopanic_id=cryptopanic_id).exists():
try:
news = News.objects.get(cryptopanic_id=cryptopanic_id)
if news.article_url == "":
article_url = get_real_url(cryptopanic_url,item['source']['domain'])
if article_url:
news.article_url = article_url
else:
news.delete()
continue
news.votes = item['votes']
news.title = item['title']
news.save()
print(f"Updating news item {item['title']}")
except Exception as e:
print(f"Failed updating news item with error {e}")
else:
try:
article_url = get_real_url(cryptopanic_url,item['source']['domain'])
if article_url is not None:
article_image = get_article_image(article_url)
if article_image is not None:
news_item = News.objects.create(cryptopanic_id=cryptopanic_id, article_url=article_url, type=type, title=title, image=article_image, domain=domain, published_at=published_at, cryptopanic_url=cryptopanic_url, votes=votes, source=source_obj)
print(f"Adding news item with title {title} and new news_id: {news_item.id}")
# Resize and store imnage
if article_image:
try:
resize_and_store(news_item.id, news_item.image)
except Exception as e:
print("Failed downloading image for news item")
try:
if 'currencies' in item.keys():
for currency in item['currencies']:
symbol = currency['code'].lower()
if Project.objects.filter(symbol=symbol,status='ACTIVE').exists():
news_item.projects.add(Project.objects.filter(symbol=symbol,status='ACTIVE').first())
print(f"adding {symbol} to news item")
else:
print(f"No project found for currency {symbol}")
except Exception as e:
print(f"Problems adding projects to news item with error {e}")
else:
raise Exception(f"No image found for news item {item['url']}")
else:
raise Exception("Article URL not found")
except Exception as e:
print(f"Failed adding news item with error {e}")
else:
print(f"Problems with the news source.. Skipping..")
else:
print(f"Not Hotdog! {response.status_code}")
time.sleep(5)
except Exception as e:
print("Time out! Skipping")
|
barrydaniels-nl/crypto-api
|
ctimanager/scripts/update_news_items.py
|
update_news_items.py
|
py
| 15,028 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3345686330
|
import argparse
import subprocess
import os.path
import math
def dispatch(out_file, err_file, cmd, go, num_cores=1, num_nodes=1, max_hours=1, memory_in_gb=16):
"""
Populates 'runscript.sh' file to run 'dqn_original.py' file
on cluster's GPU partition for 'max_hours' hours with 1 node, 1 core, and 32GB memory
"""
with open('runscript.sh', 'w+') as f:
f.write(
f"""#!/bin/bash
#SBATCH -n {num_cores} # Number of cores
#SBATCH -N {num_nodes} # Ensure that all cores are on one machine
#SBATCH -t {format_time(max_hours)} # Runtime in D-HH:MM, minimum of 10 minutes
#SBATCH -p gpu # Partition to submit to
#SBATCH --gres=gpu # number of GPUs (here 1; see also --gres=gpu:n)
#SBATCH --mem={gb_to_mb(memory_in_gb)} # Memory pool for all cores (see also --mem-per-cpu)
#SBATCH -o {out_file} # File to which STDOUT will be written, %j inserts jobid
#SBATCH -e {err_file} # File to which STDERR will be written, %j inserts jobid
module load Anaconda3/5.0.1-fasrc01 # Load modules
module load cudnn/7.6.5.32_cuda10.1-fasrc01
module load cuda/10.0.130-fasrc01
source activate gpt2 # Switch to correct conda environment
{cmd} # Run code
"""
)
if go:
subprocess.call(['sbatch', 'runscript.sh'])
def format_time(total_hours):
'''Converts hours to D-HH:MM format.'''
days = total_hours // 24
frac_hour, hours = math.modf(total_hours % 24)
minutes = math.ceil(frac_hour * 60.0)
if minutes == 60:
hours += 1
minutes = 0
if hours == 24:
hours = 0
days += 1
return f'{int(days)}-{int(hours):02d}:{int(minutes):02d}'
def gb_to_mb(gb):
'''Converts gb to mb'''
mb = int(gb * 1000)
return mb
def print_red(string):
print('\033[1;31;40m' + string + '\033[0;37;40m')
def print_yellow(string):
print('\033[1;33;40m' + string + '\033[0;37;40m')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('run_name', type=str,
help="""
(str) Base name for output and error files to which SLURM writes results,
and ID for storing checkpoints and samples.
""")
parser.add_argument('dataset', type=str,
help='(str) Path to dataset for training')
parser.add_argument('restore_from', type=str,
help='(str) Either "latest", "fresh", or a path to a checkpoint file')
parser.add_argument('--sample_every', default=100, type=int,
help='(int) How often to generate samples (every N steps)')
parser.add_argument('--save_every', default=1000, type=int,
help='(int) How often to create model checkpoint (every N steps)')
parser.add_argument('--go', action='store_true',
help='(flag) Submits jobs to cluster if present. Default disabled')
parser.add_argument('--num_cores', default=1, type=int,
help='(int) Number of cores to run on')
parser.add_argument('--num_nodes', default=1, type=int,
help='(int) Number of nodes to run on')
parser.add_argument('--hours', default=1., type=float,
help='(float) Wall clock time to request on SLURM')
parser.add_argument('--gb_memory', default=16., type=float,
help='(float) Memory (in GB) to request')
args = parser.parse_args()
basename = args.run_name
out_file = basename + '.txt'
err_file = basename + '.err.txt'
cmd = f'python3 train.py --dataset {args.dataset} --restore_from {args.restore_from} --run_name {args.run_name}\
--sample_every {args.sample_every} --save_every {args.save_every}'
# If file for a configuration exists, skip over that configuration
if os.path.exists(out_file) or os.path.exists(err_file):
print_red(f'{basename} (already exists; skipping)')
else:
# Otherwise, generate and run script on cluster
# Populates 'runscript.sh' file to run specified file
# on cluster's GPU partition with specified number of nodes, cores, and memory
# Dispatches 'runscript.sh' to SLURM if '--go' flag was specified in CLI
print(basename)
dispatch(out_file=out_file,
err_file=err_file,
cmd=cmd,
go=args.go,
num_cores=args.num_cores,
num_nodes=args.num_nodes,
max_hours=args.hours,
memory_in_gb=args.gb_memory)
if not args.go:
print_yellow('''
*** This was just a test! No jobs were actually dispatched.
*** If the output looks correct, re-run with the "--go" argument.''')
print(flush=True)
if __name__ == '__main__':
main()
|
osiajod/cs205_project
|
singlenode_parallel/src/cluster_serialtrain.py
|
cluster_serialtrain.py
|
py
| 4,870 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35007806774
|
from src.main.python.Solution import Solution
# Given an array S of n integers, are there elements a, b, c, and d in S such that a + b + c + d = target?
# Find all unique quadruplets in the array which gives the sum of target.
#
# Note:
# Elements in a quadruplet (a,b,c,d) must be in non-descending order. (ie, a ≤ b ≤ c ≤ d)
# The solution set must not contain duplicate quadruplets.
#
# For example, given array S = {1 0 -1 0 -2 2}, and target = 0.
#
# A solution set is:
# (-1, 0, 0, 1)
# (-2, -1, 1, 2)
# (-2, 0, 0, 2)
class Q018(Solution):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
ans = []
if nums:
nums.sort()
a = 0
while a < len(nums)-3:
b = a+1
while b < len(nums)-2:
c, d = b+1, len(nums)-1
while c < d:
sum = nums[a]+nums[b]+nums[c]+nums[d]
if sum == target:
ans.append([nums[a], nums[b], nums[c], nums[d]])
c += 1
while c < d and nums[c] == nums[c-1]:
c += 1
d -= 1
while c < d and nums[d] == nums[d+1]:
d -= 1
elif sum < target:
c += 1
else:
d -= 1
b += 1
while b < len(nums)-2 and nums[b] == nums[b-1]:
b += 1
a += 1
while a < len(nums)-3 and nums[a] == nums[a-1]:
a += 1
return ans
|
renkeji/leetcode
|
python/src/main/python/Q018.py
|
Q018.py
|
py
| 1,844 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7170828604
|
#Answer to Find the Runner-Up Score!
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
j=-100
k=max(arr)
for i in arr:
if(i>j):
if(i!=k):
j=i
print(j)
|
CompetitiveCode/hackerrank-python
|
Practice/Basic Data Types/Find the Runner-Up Score!.py
|
Find the Runner-Up Score!.py
|
py
| 247 |
python
|
en
|
code
| 1 |
github-code
|
6
|
17177402704
|
"""
Kaming Yip
CS677 A1 Data Science with Python
Apr 3, 2020
Assignment 9.3: Random Forest
"""
from pandas_datareader import data as web
import os
import pandas as pd
import numpy as np
from tabulate import tabulate
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
def main():
def get_stock(ticker, start_date, end_date):
"""
download the historical data from yahoo web
& manipulate the data to create desirable columns
"""
try:
df = web.get_data_yahoo(ticker, start=start_date, end=end_date)
df['Return'] = df['Adj Close'].pct_change()
df['Return'].fillna(0, inplace = True)
df['Return'] = 100.0 * df['Return']
df['Return'] = df['Return'].round(3)
df['Date'] = df.index
df['Date'] = pd.to_datetime(df['Date'])
df['Month'] = df['Date'].dt.month
df['Year'] = df['Date'].dt.year
df['Day'] = df['Date'].dt.day
for col in ['Open', 'High', 'Low', 'Close', 'Adj Close']:
df[col] = df[col].round(2)
df['Weekday'] = df['Date'].dt.weekday_name
df['Week_Number'] = df['Date'].dt.strftime('%U')
df['Year_Week'] = df['Date'].dt.strftime('%Y-%U')
col_list = ['Date', 'Year', 'Month', 'Day', 'Weekday',
'Week_Number', 'Year_Week', 'Open',
'High', 'Low', 'Close', 'Volume', 'Adj Close',
'Return']
num_lines = len(df)
df = df[col_list]
print('read', num_lines, 'lines of data for ticker:' , ticker)
return df
except Exception as error:
print(error)
return None
# design the selected stock name and time frame
try:
ticker='YELP'
input_dir = os.getcwd()
output_file = os.path.join(input_dir, ticker + '.csv')
df = get_stock(ticker, start_date='2016-01-01', end_date='2019-12-31')
df.to_csv(output_file, index=False)
print('wrote ' + str(len(df)) + ' lines to file: ' + ticker + '.csv', end = "\n\n" + "-" * 50 + "\n\n")
except Exception as e:
print(e)
print('failed to get Yahoo stock data for ticker: ', ticker, end = "\n\n" + "-" * 50 + "\n\n")
def weekly_return_volatility(data, start_date, end_date):
"""
calculate the weekly mean return and volatility
& create a new file to contain these infor
"""
try:
df_2 = data[data['Date'] >= start_date]
df_2 = df_2[df_2['Date'] <= end_date]
df_2 = df_2[['Year', 'Week_Number', 'Open', 'Adj Close', 'Return']]
df_2.index = range(len(df_2))
df_grouped = df_2.groupby(['Year', 'Week_Number'])['Return'].agg([np.mean, np.std])
df_grouped.reset_index(['Year', 'Week_Number'], inplace=True)
df_grouped.rename(columns={'mean': 'mean_return', 'std':'volatility'}, inplace=True)
df_grouped.fillna(0, inplace=True)
df_grouped["Open"] = df_2.groupby(["Year", "Week_Number"])["Open"].head(1).\
reset_index(drop = True).copy()
df_grouped["Adj Close"] = df_2.groupby(["Year", "Week_Number"])["Adj Close"].tail(1).\
reset_index(drop = True).copy()
return df_grouped
except Exception as error:
print(error)
return None
# create the weekly dataframe with mean return and volatility values
try:
df_weekly = weekly_return_volatility(df, start_date='2018-01-01', end_date='2019-12-31')
except Exception as e:
print("Error in weekly_return_volatility: ", end = " ")
print(e)
def weekly_label(data, year):
"""
to create labels
"""
try:
df_label = data[data["Year"] == year].copy()
mean_return_percent50 = np.percentile(df_label["mean_return"], 50)
volatility_percent50 = np.percentile(df_label["volatility"], 50)
df_label["True Label"] = np.where((df_label["mean_return"] >= mean_return_percent50) & \
(df_label["volatility"] <= volatility_percent50), "Green", "Red")
return df_label
except Exception as error:
print(error)
return None
try:
df_labeling = pd.DataFrame()
for year in [2018, 2019]:
df_year_label = weekly_label(df_weekly, year)
label_count = df_year_label.groupby("True Label")["True Label"].size().to_frame(name = "Freq")
print("Label Count for Year {0}".format(year))
print(tabulate(label_count, headers = "keys", numalign = "right"), end = "\n\n")
df_labeling = df_labeling.append(df_year_label, ignore_index = True)
df_labeling["Week_Number"] = df_labeling["Week_Number"].astype(int)
except Exception as e:
print("Error in weekly_label:", end = " ")
print(e)
def random_forest(train_data, test_data, predictor, N, d):
# train the Random Forest model by stock data in year 1
train_X = train_data[predictor].values
le = LabelEncoder()
train_Y = le.fit_transform(train_data["True Label"].values)
model = RandomForestClassifier(n_estimators = N, max_depth = d,
criterion = "entropy", random_state = 3)
model.fit(train_X, train_Y)
# predict the labels in year 2
test_X = test_data[predictor].values
test_Y = le.fit_transform(test_data["True Label"].values)
pred_Y = model.predict(test_X)
error_rate = np.mean(pred_Y != test_Y)
pred_Y = le.inverse_transform(pred_Y)
return pred_Y, error_rate
def designed_confusion_matrix(actual, pred):
cm = confusion_matrix(actual, pred)
list_of_tuples = list(zip(cm[0], cm[1]))
designed_cm = pd.DataFrame(list_of_tuples,
columns = ["Actual Green", "Actual Red"],
index = ["Predicted Green", "Predicted Red"])
diagonal_sum = cm.trace()
sum_of_all_elements = cm.sum()
accuracy = diagonal_sum / sum_of_all_elements
TPR = cm[0,0]/(cm[0,0] + cm[0,1])
TNR = cm[1,1]/(cm[1,0] + cm[1,1])
return designed_cm, accuracy, TPR, TNR
def printout(actual, pred, year):
cm, accuracy, TPR, TNR = designed_confusion_matrix(actual, pred)
print(" * The Confusion Matrix for Year {0} * ".format(year),
cm,
"The accuracy of this model is {0:.3f}.\n".format(accuracy) +\
"The true positive rate of this model is {0:.3f}.\n".format(TPR) +\
"The true negative rate of this model is {0:.3f}.\n".format(TNR),
sep = "\n\n", end = "\n\n")
def trade_with_labels(data, col_name):
money = 100.0
shares = 0.0
position = "No"
balance = []
df_trade_labels = data.copy()
for i in range(len(df_trade_labels) - 1):
if i == 0:
label = df_trade_labels.iloc[i][col_name]
if label == "Green":
shares = money / df_trade_labels.iloc[i]["Open"]
money = 0.0
position = "Long"
balance.append(shares * df_trade_labels.iloc[i]["Adj Close"])
else:
balance.append(money)
else:
label = df_trade_labels.iloc[i+1][col_name]
if label == "Red":
if position == "Long":
money = shares * df_trade_labels.iloc[i]["Adj Close"]
shares = 0.0
position = "No"
balance.append(money)
else:
if position == "No":
shares = money / df_trade_labels.iloc[i+1]["Open"]
money = 0.0
position = "Long"
balance.append(shares * df_trade_labels.iloc[i]["Adj Close"])
if position == "Long":
balance.append(shares * df_trade_labels.iloc[-1]["Adj Close"])
else:
balance.append(money)
return balance
def script_text(data, year, col_name):
label_text_max = "{0} Week {1}\nmax ${2}".\
format(year,
data.iloc[data[data["Year"] == year][col_name].idxmax()]["Week_Number"],
round(data[data["Year"] == year][col_name].max(), 2))
label_x_max = data[data["Year"] == year][col_name].idxmax()
label_y_max = round(data[data["Year"] == year][col_name].max(), 2)
label_text_min = "{0} Week {1}\nmin ${2}".\
format(year,
data.iloc[data[data["Year"] == year][col_name].idxmin()]["Week_Number"],
round(data[data["Year"] == year][col_name].min(), 2))
label_x_min = data[data["Year"] == year][col_name].idxmin()
label_y_min = round(data[data["Year"] == year][col_name].min(), 2)
label_text_final = "{0} Final:\n${1}".format(year, round(data[data["Year"] == year].iloc[-1][col_name], 2))
label_x_final = data[data["Year"] == year].tail(1).index.values
label_y_final = round(data[data["Year"] == year].iloc[-1][col_name], 2)
return label_text_max, label_x_max, label_y_max,\
label_text_min, label_x_min, label_y_min,\
label_text_final, label_x_final, label_y_final
def buy_n_hold(data):
money = 100.0
shares = 0.0
balance = []
df_buy_hold = data.copy()
for i in range(len(df_buy_hold)):
if i == 0:
shares = money / df_buy_hold.iloc[i]["Open"]
balance.append(shares * df_buy_hold.iloc[i]["Adj Close"])
return balance
########## Q1 ##########
print("\n" + "#" * 35 + " Q1 " + "#" * 35 + "\n")
try:
df_2018 = df_labeling.loc[df_labeling["Year"] == 2018].copy().reset_index(drop = True)
df_2019 = df_labeling.loc[df_labeling["Year"] == 2019].copy().reset_index(drop = True)
predictor = ["mean_return", "volatility"]
Y_2019 = df_2019[["True Label"]].values
N_list = list(range(1, 11))
d_list = list(range(1, 6))
x_list = []
y_list = []
size_list = []
results = pd.DataFrame(columns = N_list, index = d_list)
best_combo = [0, 0, float("inf")]
for N in N_list:
for d in d_list:
x_list.append(N)
y_list.append(d)
pred_Y, error_rate = random_forest(df_2018, df_2019, predictor, N, d)
results.loc[d, N] = error_rate
size_list.append(error_rate)
if error_rate < best_combo[2]:
best_combo = [N, d, error_rate]
else:
pass
results = results.astype(float)
print(" " * 10 + " * The Error Rate Results of Different Random Forests * ",
tabulate(results.round(3), headers = "keys", numalign = "left"),
sep = "\n\n", end = "\n\n")
min_size, max_size = min(size_list), max(size_list)
for i, ele in enumerate(size_list):
size_list[i] = (ele - min_size) / (max_size - min_size) * 800
plt.figure(figsize = (8, 4))
plt.scatter(x_list, y_list, marker = ".", s = size_list)
plt.title("The Error Rates with Different Combinations of N and d")
plt.xlabel("Number of Trees (N)")
plt.xticks(N_list)
plt.ylabel("Max Depth of Each Subtree (d)")
plt.yticks(d_list)
plt.show()
print("\nAs displayed above, the best combination is N = {0:d}, d = {1:d},".\
format(best_combo[0], best_combo[1]),
"with the minimal error rate of {0:.3f}.".format(best_combo[2]),
sep = "\n")
except Exception as e:
print("Error in Question 1:", end = " ")
print(e)
########## Q2 & Q3 ##########
print("\n" + "#" * 35 + " Q2 & Q3 " + "#" * 35 + "\n")
try:
optimal_N, optimal_d = best_combo[0], best_combo[1]
pred_Y, error = random_forest(df_2018, df_2019, predictor, optimal_N, optimal_d)
printout(Y_2019, pred_Y, 2019)
except Exception as e:
print("Error in Question 2:", end = " ")
print(e)
########## Q4 ##########
print("\n" + "#" * 35 + " Q4 " + "#" * 35 + "\n")
try:
df_trading = df_labeling[df_labeling["Year"] == 2019].copy().reset_index(drop = True)
df_trading["True Label Balance"] = trade_with_labels(df_trading, "True Label")
df_trading["Buy and Hold Balance"] = buy_n_hold(df_trading)
df_trading["Random Forest Label"] = pred_Y
df_trading["Random Forest Balance"] = trade_with_labels(df_trading, "Random Forest Label")
fig, ax = plt.subplots(figsize = (9, 5))
label_text_max_2019, label_x_max_2019, label_y_max_2019,\
label_text_min_2019, label_x_min_2019, label_y_min_2019,\
label_text_final_2019, label_x_final_2019, label_y_final_2019 =\
script_text(df_trading, 2019, "True Label Balance")
forest_text_max_2019, forest_x_max_2019, forest_y_max_2019,\
forest_text_min_2019, forest_x_min_2019, forest_y_min_2019,\
forest_text_final_2019, forest_x_final_2019, forest_y_final_2019 =\
script_text(df_trading, 2019, "Random Forest Balance")
buy_hold_text_max_2019, buy_hold_x_max_2019, buy_hold_y_max_2019,\
buy_hold_text_min_2019, buy_hold_x_min_2019, buy_hold_y_min_2019,\
buy_hold_text_final_2019, buy_hold_x_final_2019, buy_hold_y_final_2019 =\
script_text(df_trading, 2019, "Buy and Hold Balance")
# Trading with True Label
ax.plot(df_trading.index, "True Label Balance", data = df_trading, color = "blue")
ax.annotate(label_text_max_2019, xy = (label_x_max_2019, label_y_max_2019), xycoords = "data",
xytext = (label_x_max_2019+5, label_y_max_2019+5), color = "blue",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "blue"),
ha = "left", va = "bottom")
ax.annotate(label_text_min_2019, xy = (label_x_min_2019, label_y_min_2019), xycoords = "data",
xytext = (label_x_min_2019+5, label_y_min_2019+17), color = "blue",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "blue"),
ha = "left", va = "bottom")
ax.annotate(label_text_final_2019, xy = (label_x_final_2019, label_y_final_2019), xycoords = "data",
xytext = (label_x_final_2019+5, label_y_final_2019-5), color = "blue",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "blue"),
ha = "left", va = "bottom")
# Buy and Hold
ax.plot(df_trading.index, "Buy and Hold Balance", data = df_trading, color = "red")
ax.annotate(buy_hold_text_max_2019, xy = (buy_hold_x_max_2019, buy_hold_y_max_2019), xycoords = "data",
xytext = (buy_hold_x_max_2019+5, buy_hold_y_max_2019+11), color = "red",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "red"),
ha = "left", va = "bottom")
ax.annotate(buy_hold_text_min_2019, xy = (buy_hold_x_min_2019, buy_hold_y_min_2019), xycoords = "data",
xytext = (buy_hold_x_min_2019+4, buy_hold_y_min_2019+2), color = "red",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "red"),
ha = "left", va = "bottom")
ax.annotate(buy_hold_text_final_2019, xy = (buy_hold_x_final_2019, buy_hold_y_final_2019), xycoords = "data",
xytext = (buy_hold_x_final_2019+5, buy_hold_y_final_2019-2), color = "red",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "red"),
ha = "left", va = "bottom")
# Trading with Decision Tree Label
ax.plot(df_trading.index, "Random Forest Balance", data = df_trading, color = "green")
ax.annotate(forest_text_max_2019, xy = (forest_x_max_2019, forest_y_max_2019), xycoords = "data",
xytext = (forest_x_max_2019+5, forest_y_max_2019+5), color = "green",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "green"),
ha = "left", va = "bottom")
ax.annotate(forest_text_min_2019, xy = (forest_x_min_2019, forest_y_min_2019), xycoords = "data",
xytext = (forest_x_min_2019+5, forest_y_min_2019+27), color = "green",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "green"),
ha = "left", va = "bottom")
ax.annotate(forest_text_final_2019, xy = (forest_x_final_2019, forest_y_final_2019), xycoords = "data",
xytext = (forest_x_final_2019+5, forest_y_final_2019-15), color = "green",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "green"),
ha = "left", va = "bottom")
plt.title("* Year 2019 *\n" + "Performance against Different Investing Strategies", loc = "center")
plt.xlabel("Week Number")
plt.xticks(np.arange(0, 60, 5))
plt.ylabel("Total Balance($)")
plt.legend()
plt.show()
print("\nAs displayed in the plot above, the {0} strategy results in a".\
format("buy-and-hold" if buy_hold_y_final_2019 > forest_y_final_2019 else "Random Forest Classifier"),
"larger amount as ${0} at the end of the year 2019.".\
format(buy_hold_y_final_2019 if buy_hold_y_final_2019 > forest_y_final_2019 else forest_y_final_2019),
sep = "\n")
except Exception as e:
print("Error in Question 4:", end = " ")
print(e)
main()
|
KamingYip/Trading_Strategies_with_Stock_Data
|
Random Forest.py
|
Random Forest.py
|
py
| 18,943 |
python
|
en
|
code
| 3 |
github-code
|
6
|
36363712042
|
import pyodbc
from db_connect_oop import *
class NWEmployees(MSDBConnection):
def all_employees(self):
query = 'select * from employees'
data = self._MSDBConnection__sql_query(query)
while True:
record = data.fetchone()
if record is None:
break
print(record)
def one_employee(self):
employee_id = input('Input employee id: ')
query = f"select * from employees where EmployeeID = '{employee_id}'"
employee_id_info = self._MSDBConnection__sql_query(query).fetchone()
return employee_id_info
def employee_search(self):
employee_name = input('Insert employee name: ')
query = f"select * from employees where FirstName like '%{employee_name}%' or LastName like '%{employee_name}%'"
employee_name_info = self._MSDBConnection__sql_query(query).fetchone()
return employee_name_info
|
dilanmorar/pyodbc_connection
|
db_employees_oop.py
|
db_employees_oop.py
|
py
| 931 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28989994372
|
if True:
from PyQt5.QtCore import pyqtSlot, QSettings
from PyQt5.QtWidgets import QApplication, QDialog, QDialogButtonBox, QTableWidgetItem
from PyQt5.QtXml import QDomDocument
else:
from PyQt4.QtCore import pyqtSlot, QSettings
from PyQt4.QtGui import QApplication, QDialog, QDialogButtonBox, QTableWidgetItem
from PyQt4.QtXml import QDomDocument
# ------------------------------------------------------------------------------------------------------------
# Imports (Custom Stuff)
import ui_catarina
import ui_catarina_addgroup
import ui_catarina_removegroup
import ui_catarina_renamegroup
import ui_catarina_addport
import ui_catarina_removeport
import ui_catarina_renameport
import ui_catarina_connectports
import ui_catarina_disconnectports
from shared_canvasjack import *
from shared_settings import *
# ------------------------------------------------------------------------------------------------------------
# Try Import OpenGL
try:
from PyQt5.QtOpenGL import QGLWidget
hasGL = True
except:
hasGL = False
# ------------------------------------------------------------------------------------------------------------
# Static Variables
iGroupId = 0
iGroupName = 1
iGroupSplit = 2
iGroupIcon = 3
iGroupPosId = 0
iGroupPosX_o = 1
iGroupPosY_o = 2
iGroupPosX_i = 3
iGroupPosY_i = 4
iPortGroup = 0
iPortId = 1
iPortName = 2
iPortMode = 3
iPortType = 4
iConnId = 0
iConnOutput = 1
iConnInput = 2
# ------------------------------------------------------------------------------------------------------------
# Add Group Dialog
class CatarinaAddGroupW(QDialog, ui_catarina_addgroup.Ui_CatarinaAddGroupW):
def __init__(self, parent, group_list):
QDialog.__init__(self, parent)
self.setupUi(self)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.m_group_list_names = []
for group in group_list:
self.m_group_list_names.append(group[iGroupName])
self.accepted.connect(self.slot_setReturn)
self.le_group_name.textChanged.connect(self.slot_checkText)
self.ret_group_name = ""
self.ret_group_split = False
@pyqtSlot(str)
def slot_checkText(self, text):
check = bool(text and text not in self.m_group_list_names)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_setReturn(self):
self.ret_group_name = self.le_group_name.text()
self.ret_group_split = self.cb_split.isChecked()
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Remove Group Dialog
class CatarinaRemoveGroupW(QDialog, ui_catarina_removegroup.Ui_CatarinaRemoveGroupW):
def __init__(self, parent, group_list):
QDialog.__init__(self, parent)
self.setupUi(self)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
index = 0
for group in group_list:
twi_group_id = QTableWidgetItem(str(group[iGroupId]))
twi_group_name = QTableWidgetItem(group[iGroupName])
twi_group_split = QTableWidgetItem("Yes" if (group[iGroupSplit]) else "No")
self.tw_group_list.insertRow(index)
self.tw_group_list.setItem(index, 0, twi_group_id)
self.tw_group_list.setItem(index, 1, twi_group_name)
self.tw_group_list.setItem(index, 2, twi_group_split)
index += 1
self.accepted.connect(self.slot_setReturn)
self.tw_group_list.cellDoubleClicked.connect(self.accept)
self.tw_group_list.currentCellChanged.connect(self.slot_checkCell)
self.ret_group_id = -1
@pyqtSlot(int)
def slot_checkCell(self, row):
check = bool(row >= 0)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_setReturn(self):
if self.tw_group_list.rowCount() >= 0:
self.ret_group_id = int(self.tw_group_list.item(self.tw_group_list.currentRow(), 0).text())
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Rename Group Dialog
class CatarinaRenameGroupW(QDialog, ui_catarina_renamegroup.Ui_CatarinaRenameGroupW):
def __init__(self, parent, group_list):
QDialog.__init__(self, parent)
self.setupUi(self)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.m_group_list_names = []
for group in group_list:
self.cb_group_to_rename.addItem("%i - %s" % (group[iGroupId], group[iGroupName]))
self.m_group_list_names.append(group[iGroupName])
self.accepted.connect(self.slot_setReturn)
self.cb_group_to_rename.currentIndexChanged[int].connect(self.slot_checkItem)
self.le_new_group_name.textChanged.connect(self.slot_checkText)
self.ret_group_id = -1
self.ret_new_group_name = ""
@pyqtSlot(int)
def slot_checkItem(self, ignored):
self.slot_checkText(self.le_new_group_name.text())
@pyqtSlot(str)
def slot_checkText(self, text):
if self.cb_group_to_rename.count() > 0:
group_name = self.cb_group_to_rename.currentText().split(" - ", 1)[1]
check = bool(text and text != group_name and text not in self.m_group_list_names)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_setReturn(self):
self.ret_group_id = int(self.cb_group_to_rename.currentText().split(" - ", 1)[0])
self.ret_new_group_name = self.le_new_group_name.text()
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Add Port Dialog
class CatarinaAddPortW(QDialog, ui_catarina_addport.Ui_CatarinaAddPortW):
def __init__(self, parent, group_list, port_id):
QDialog.__init__(self, parent)
self.setupUi(self)
self.sb_port_id.setValue(port_id)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
for group in group_list:
self.cb_group.addItem("%i - %s" % (group[iGroupId], group[iGroupName]))
self.accepted.connect(self.slot_setReturn)
self.le_port_name.textChanged.connect(self.slot_checkText)
self.ret_group_id = -1
self.ret_port_name = ""
self.ret_port_mode = patchcanvas.PORT_MODE_NULL
self.ret_port_type = patchcanvas.PORT_TYPE_NULL
@pyqtSlot(str)
def slot_checkText(self, text):
check = bool(text)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_setReturn(self):
if self.cb_group.count() > 0:
self.ret_group_id = int(self.cb_group.currentText().split(" ", 1)[0])
self.ret_port_name = self.le_port_name.text()
self.ret_port_mode = patchcanvas.PORT_MODE_INPUT if self.rb_flags_input.isChecked() else patchcanvas.PORT_MODE_OUTPUT
self.ret_port_type = self.cb_port_type.currentIndex() + 1 # 1, 2, 3 or 4 for patchcanvas types
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Remove Port Dialog
class CatarinaRemovePortW(QDialog, ui_catarina_removeport.Ui_CatarinaRemovePortW):
def __init__(self, parent, group_list, port_list):
QDialog.__init__(self, parent)
self.setupUi(self)
self.tw_port_list.setColumnWidth(0, 25)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.m_group_list = group_list
self.m_port_list = port_list
self.accepted.connect(self.slot_setReturn)
self.tw_port_list.cellDoubleClicked.connect(self.accept)
self.tw_port_list.currentCellChanged.connect(self.slot_checkCell)
self.rb_input.clicked.connect(self.slot_reAddPorts)
self.rb_output.clicked.connect(self.slot_reAddPorts)
self.rb_audio_jack.clicked.connect(self.slot_reAddPorts)
self.rb_midi_jack.clicked.connect(self.slot_reAddPorts)
self.rb_midi_a2j.clicked.connect(self.slot_reAddPorts)
self.rb_midi_alsa.clicked.connect(self.slot_reAddPorts)
self.ret_port_id = -1
self.reAddPorts()
def reAddPorts(self):
self.tw_port_list.clearContents()
for x in range(self.tw_port_list.rowCount()):
self.tw_port_list.removeRow(0)
port_mode = patchcanvas.PORT_MODE_INPUT if (self.rb_input.isChecked()) else patchcanvas.PORT_MODE_OUTPUT
if self.rb_audio_jack.isChecked():
port_type = patchcanvas.PORT_TYPE_AUDIO_JACK
elif self.rb_midi_jack.isChecked():
port_type = patchcanvas.PORT_TYPE_MIDI_JACK
elif self.rb_midi_a2j.isChecked():
port_type = patchcanvas.PORT_TYPE_MIDI_A2J
elif self.rb_midi_alsa.isChecked():
port_type = patchcanvas.PORT_TYPE_MIDI_ALSA
else:
print("CatarinaRemovePortW::reAddPorts() - Invalid port type")
return
index = 0
for port in self.m_port_list:
if port[iPortMode] == port_mode and port[iPortType] == port_type:
port_name = port[iPortName]
group_name = self.findPortGroupName(port[iPortGroup])
tw_port_id = QTableWidgetItem(str(port[iPortId]))
tw_port_name = QTableWidgetItem("%s:%s" % (group_name, port_name))
self.tw_port_list.insertRow(index)
self.tw_port_list.setItem(index, 0, tw_port_id)
self.tw_port_list.setItem(index, 1, tw_port_name)
index += 1
def findPortGroupName(self, group_id):
for group in self.m_group_list:
if group[iGroupId] == group_id:
return group[iGroupName]
return ""
@pyqtSlot()
def slot_reAddPorts(self):
self.reAddPorts()
@pyqtSlot(int)
def slot_checkCell(self, row):
check = bool(row >= 0)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_setReturn(self):
if self.tw_port_list.rowCount() > 0:
self.ret_port_id = int(self.tw_port_list.item(self.tw_port_list.currentRow(), 0).text())
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Rename Port Dialog
class CatarinaRenamePortW(QDialog, ui_catarina_renameport.Ui_CatarinaRenamePortW):
def __init__(self, parent, group_list, port_list):
QDialog.__init__(self, parent)
self.setupUi(self)
self.tw_port_list.setColumnWidth(0, 25)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.m_group_list = group_list
self.m_port_list = port_list
self.accepted.connect(self.slot_setReturn)
self.tw_port_list.currentCellChanged.connect(self.slot_checkCell)
self.le_new_name.textChanged.connect(self.slot_checkText)
self.rb_input.clicked.connect(self.slot_reAddPorts)
self.rb_output.clicked.connect(self.slot_reAddPorts)
self.rb_audio_jack.clicked.connect(self.slot_reAddPorts)
self.rb_midi_jack.clicked.connect(self.slot_reAddPorts)
self.rb_midi_a2j.clicked.connect(self.slot_reAddPorts)
self.rb_midi_alsa.clicked.connect(self.slot_reAddPorts)
self.ret_port_id = -1
self.ret_new_port_name = ""
self.reAddPorts()
def reAddPorts(self):
self.tw_port_list.clearContents()
for x in range(self.tw_port_list.rowCount()):
self.tw_port_list.removeRow(0)
port_mode = patchcanvas.PORT_MODE_INPUT if (self.rb_input.isChecked()) else patchcanvas.PORT_MODE_OUTPUT
if self.rb_audio_jack.isChecked():
port_type = patchcanvas.PORT_TYPE_AUDIO_JACK
elif self.rb_midi_jack.isChecked():
port_type = patchcanvas.PORT_TYPE_MIDI_JACK
elif self.rb_midi_a2j.isChecked():
port_type = patchcanvas.PORT_TYPE_MIDI_A2J
elif self.rb_midi_alsa.isChecked():
port_type = patchcanvas.PORT_TYPE_MIDI_ALSA
else:
print("CatarinaRenamePortW::reAddPorts() - Invalid port type")
return
index = 0
for port in self.m_port_list:
if port[iPortMode] == port_mode and port[iPortType] == port_type:
port_name = port[iPortName]
group_name = self.findPortGroupName(port[iPortGroup])
tw_port_id = QTableWidgetItem(str(port[iPortId]))
tw_port_name = QTableWidgetItem("%s:%s" % (group_name, port_name))
self.tw_port_list.insertRow(index)
self.tw_port_list.setItem(index, 0, tw_port_id)
self.tw_port_list.setItem(index, 1, tw_port_name)
index += 1
self.tw_port_list.setCurrentCell(0, 0)
def findPortGroupName(self, group_id):
for group in self.m_group_list:
if group[iGroupId] == group_id:
return group[iGroupName]
return ""
@pyqtSlot()
def slot_reAddPorts(self):
self.reAddPorts()
@pyqtSlot()
def slot_checkCell(self):
self.slot_checkText(self.le_new_name.text())
@pyqtSlot(str)
def slot_checkText(self, text):
check = bool(text and self.tw_port_list.currentRow() >= 0)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_setReturn(self):
if self.tw_port_list.rowCount() > 0:
self.ret_port_id = int(self.tw_port_list.item(self.tw_port_list.currentRow(), 0).text())
self.ret_new_port_name = self.le_new_name.text()
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Connect Ports Dialog
class CatarinaConnectPortsW(QDialog, ui_catarina_connectports.Ui_CatarinaConnectPortsW):
def __init__(self, parent, group_list, port_list):
QDialog.__init__(self, parent)
self.setupUi(self)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.m_group_list = group_list
self.m_port_list = port_list
self.m_ports_audio_jack = []
self.m_ports_midi_jack = []
self.m_ports_midi_a2j = []
self.m_ports_midi_alsa = []
for port in self.m_port_list:
if port[iPortType] == patchcanvas.PORT_TYPE_AUDIO_JACK:
self.m_ports_audio_jack.append(port)
elif port[iPortType] == patchcanvas.PORT_TYPE_MIDI_JACK:
self.m_ports_midi_jack.append(port)
elif port[iPortType] == patchcanvas.PORT_TYPE_MIDI_A2J:
self.m_ports_midi_a2j.append(port)
elif port[iPortType] == patchcanvas.PORT_TYPE_MIDI_ALSA:
self.m_ports_midi_alsa.append(port)
self.accepted.connect(self.slot_setReturn)
self.rb_audio_jack.clicked.connect(self.slot_portTypeChanged)
self.rb_midi_jack.clicked.connect(self.slot_portTypeChanged)
self.rb_midi_a2j.clicked.connect(self.slot_portTypeChanged)
self.rb_midi_alsa.clicked.connect(self.slot_portTypeChanged)
self.lw_outputs.currentRowChanged.connect(self.slot_checkOutSelection)
self.lw_inputs.currentRowChanged.connect(self.slot_checkInSelection)
self.ret_port_out_id = -1
self.ret_port_in_id = -1
self.slot_portTypeChanged()
def showPorts(self, ports):
self.lw_outputs.clear()
self.lw_inputs.clear()
for port in ports:
if port[iPortMode] == patchcanvas.PORT_MODE_INPUT:
self.lw_inputs.addItem("%i - %s:%s" % (port[iPortId], self.findPortGroupName(port[iPortGroup]), port[iPortName]))
elif port[iPortMode] == patchcanvas.PORT_MODE_OUTPUT:
self.lw_outputs.addItem("%i - %s:%s" % (port[iPortId], self.findPortGroupName(port[iPortGroup]), port[iPortName]))
def findPortGroupName(self, group_id):
for group in self.m_group_list:
if group[iGroupId] == group_id:
return group[iGroupName]
return ""
def checkSelection(self, out_row, in_row):
check = bool(out_row >= 0 and in_row >= 0)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_portTypeChanged(self):
if self.rb_audio_jack.isChecked():
ports = self.m_ports_audio_jack
elif self.rb_midi_jack.isChecked():
ports = self.m_ports_midi_jack
elif self.rb_midi_a2j.isChecked():
ports = self.m_ports_midi_a2j
elif self.rb_midi_alsa.isChecked():
ports = self.m_ports_midi_alsa
else:
print("CatarinaConnectPortstW::portTypeChanged() - Invalid port type")
return
self.showPorts(ports)
@pyqtSlot(int)
def slot_checkOutSelection(self, row):
self.checkSelection(row, self.lw_inputs.currentRow())
@pyqtSlot(int)
def slot_checkInSelection(self, row):
self.checkSelection(self.lw_outputs.currentRow(), row)
@pyqtSlot()
def slot_setReturn(self):
if self.lw_outputs.currentRow() >= 0 and self.lw_inputs.currentRow() >= 0:
self.ret_port_out_id = int(self.lw_outputs.currentItem().text().split(" - ", 1)[0])
self.ret_port_in_id = int(self.lw_inputs.currentItem().text().split(" - ", 1)[0])
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Disconnect Ports Dialog
class CatarinaDisconnectPortsW(QDialog, ui_catarina_disconnectports.Ui_CatarinaDisconnectPortsW):
def __init__(self, parent, group_list, port_list, connection_list):
QDialog.__init__(self, parent)
self.setupUi(self)
self.tw_connections.setColumnWidth(0, 225)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.m_group_list = group_list
self.m_port_list = port_list
self.m_connection_list = connection_list
self.accepted.connect(self.slot_setReturn)
self.tw_connections.cellDoubleClicked.connect(self.accept)
self.tw_connections.currentCellChanged.connect(self.slot_checkSelection)
self.rb_audio_jack.clicked.connect(self.slot_portTypeChanged)
self.rb_midi_jack.clicked.connect(self.slot_portTypeChanged)
self.rb_midi_a2j.clicked.connect(self.slot_portTypeChanged)
self.rb_midi_alsa.clicked.connect(self.slot_portTypeChanged)
self.ret_port_out_id = -1
self.ret_port_in_id = -1
self.slot_portTypeChanged()
def showPorts(self, ptype):
self.tw_connections.clearContents()
for x in range(self.tw_connections.rowCount()):
self.tw_connections.removeRow(0)
index = 0
for connection in self.m_connection_list:
if self.findPortType(connection[iConnOutput]) == ptype:
port_out_id = connection[iConnOutput]
port_out_name = self.findPortName(port_out_id)
port_in_id = connection[iConnInput]
port_in_name = self.findPortName(port_in_id)
tw_port_out = QTableWidgetItem("%i - %s" % (port_out_id, port_out_name))
tw_port_in = QTableWidgetItem("%i - %s" % (port_in_id, port_in_name))
self.tw_connections.insertRow(index)
self.tw_connections.setItem(index, 0, tw_port_out)
self.tw_connections.setItem(index, 1, tw_port_in)
index += 1
def findPortName(self, port_id):
for port in self.m_port_list:
if port[iPortId] == port_id:
return "%s:%s" % (self.findPortGroupName(port[iPortGroup]), port[iPortName])
return ""
def findPortType(self, port_id):
for port in self.m_port_list:
if port[iPortId] == port_id:
return port[iPortType]
return patchcanvas.PORT_TYPE_NULL
def findPortGroupName(self, group_id):
for group in self.m_group_list:
if group[iGroupId] == group_id:
return group[iGroupName]
return ""
@pyqtSlot()
def slot_portTypeChanged(self):
if self.rb_audio_jack.isChecked():
ptype = patchcanvas.PORT_TYPE_AUDIO_JACK
elif self.rb_midi_jack.isChecked():
ptype = patchcanvas.PORT_TYPE_MIDI_JACK
elif self.rb_midi_a2j.isChecked():
ptype = patchcanvas.PORT_TYPE_MIDI_A2J
elif self.rb_midi_alsa.isChecked():
ptype = patchcanvas.PORT_TYPE_MIDI_ALSA
else:
print("CatarinaDisconnectPortstW::portTypeChanged() - Invalid port type")
return
self.showPorts(ptype)
@pyqtSlot(int)
def slot_checkSelection(self, row):
check = bool(row >= 0)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(check)
@pyqtSlot()
def slot_setReturn(self):
if self.tw_connections.currentRow() >= 0:
self.ret_port_out_id = int(self.tw_connections.item(self.tw_connections.currentRow(), 0).text().split(" - ", 1)[0])
self.ret_port_in_id = int(self.tw_connections.item(self.tw_connections.currentRow(), 1).text().split(" - ", 1)[0])
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Main Window
class CatarinaMainW(AbstractCanvasJackClass):
def __init__(self, parent=None):
AbstractCanvasJackClass.__init__(self, "Catarina", ui_catarina.Ui_CatarinaMainW, parent)
self.loadSettings(True)
# -------------------------------------------------------------
# Set-up GUI
setIcons(self, ("canvas",))
self.ui.act_project_new.setIcon(getIcon("document-new"))
self.ui.act_project_open.setIcon(getIcon("document-open"))
self.ui.act_project_save.setIcon(getIcon("document-save"))
self.ui.act_project_save_as.setIcon(getIcon("document-save-as"))
self.ui.b_project_new.setIcon(getIcon("document-new"))
self.ui.b_project_open.setIcon(getIcon("document-open"))
self.ui.b_project_save.setIcon(getIcon("document-save"))
self.ui.b_project_save_as.setIcon(getIcon("document-save-as"))
self.ui.act_patchbay_add_group.setIcon(getIcon("list-add"))
self.ui.act_patchbay_remove_group.setIcon(getIcon("edit-delete"))
self.ui.act_patchbay_rename_group.setIcon(getIcon("edit-rename"))
self.ui.act_patchbay_add_port.setIcon(getIcon("list-add"))
self.ui.act_patchbay_remove_port.setIcon(getIcon("list-remove"))
self.ui.act_patchbay_rename_port.setIcon(getIcon("edit-rename"))
self.ui.act_patchbay_connect_ports.setIcon(getIcon("network-connect"))
self.ui.act_patchbay_disconnect_ports.setIcon(getIcon("network-disconnect"))
self.ui.b_group_add.setIcon(getIcon("list-add"))
self.ui.b_group_remove.setIcon(getIcon("edit-delete"))
self.ui.b_group_rename.setIcon(getIcon("edit-rename"))
self.ui.b_port_add.setIcon(getIcon("list-add"))
self.ui.b_port_remove.setIcon(getIcon("list-remove"))
self.ui.b_port_rename.setIcon(getIcon("edit-rename"))
self.ui.b_ports_connect.setIcon(getIcon("network-connect"))
self.ui.b_ports_disconnect.setIcon(getIcon("network-disconnect"))
self.scene = patchcanvas.PatchScene(self, self.ui.graphicsView)
self.ui.graphicsView.setScene(self.scene)
self.ui.graphicsView.setRenderHint(QPainter.Antialiasing, bool(self.fSavedSettings["Canvas/Antialiasing"] == patchcanvas.ANTIALIASING_FULL))
self.ui.graphicsView.setRenderHint(QPainter.TextAntialiasing, self.fSavedSettings["Canvas/TextAntialiasing"])
if self.fSavedSettings["Canvas/UseOpenGL"] and hasGL:
self.ui.graphicsView.setViewport(QGLWidget(self.ui.graphicsView))
self.ui.graphicsView.setRenderHint(QPainter.HighQualityAntialiasing, self.fSavedSettings["Canvas/HighQualityAntialiasing"])
p_options = patchcanvas.options_t()
p_options.theme_name = self.fSavedSettings["Canvas/Theme"]
p_options.auto_hide_groups = self.fSavedSettings["Canvas/AutoHideGroups"]
p_options.use_bezier_lines = self.fSavedSettings["Canvas/UseBezierLines"]
p_options.antialiasing = self.fSavedSettings["Canvas/Antialiasing"]
p_options.eyecandy = self.fSavedSettings["Canvas/EyeCandy"]
p_features = patchcanvas.features_t()
p_features.group_info = False
p_features.group_rename = True
p_features.port_info = True
p_features.port_rename = True
p_features.handle_group_pos = True
patchcanvas.setOptions(p_options)
patchcanvas.setFeatures(p_features)
patchcanvas.init("Catarina", self.scene, self.canvasCallback, DEBUG)
self.ui.act_project_new.triggered.connect(self.slot_projectNew)
self.ui.act_project_open.triggered.connect(self.slot_projectOpen)
self.ui.act_project_save.triggered.connect(self.slot_projectSave)
self.ui.act_project_save_as.triggered.connect(self.slot_projectSaveAs)
self.ui.b_project_new.clicked.connect(self.slot_projectNew)
self.ui.b_project_open.clicked.connect(self.slot_projectOpen)
self.ui.b_project_save.clicked.connect(self.slot_projectSave)
self.ui.b_project_save_as.clicked.connect(self.slot_projectSaveAs)
self.ui.act_patchbay_add_group.triggered.connect(self.slot_groupAdd)
self.ui.act_patchbay_remove_group.triggered.connect(self.slot_groupRemove)
self.ui.act_patchbay_rename_group.triggered.connect(self.slot_groupRename)
self.ui.act_patchbay_add_port.triggered.connect(self.slot_portAdd)
self.ui.act_patchbay_remove_port.triggered.connect(self.slot_portRemove)
self.ui.act_patchbay_rename_port.triggered.connect(self.slot_portRename)
self.ui.act_patchbay_connect_ports.triggered.connect(self.slot_connectPorts)
self.ui.act_patchbay_disconnect_ports.triggered.connect(self.slot_disconnectPorts)
self.ui.b_group_add.clicked.connect(self.slot_groupAdd)
self.ui.b_group_remove.clicked.connect(self.slot_groupRemove)
self.ui.b_group_rename.clicked.connect(self.slot_groupRename)
self.ui.b_port_add.clicked.connect(self.slot_portAdd)
self.ui.b_port_remove.clicked.connect(self.slot_portRemove)
self.ui.b_port_rename.clicked.connect(self.slot_portRename)
self.ui.b_ports_connect.clicked.connect(self.slot_connectPorts)
self.ui.b_ports_disconnect.clicked.connect(self.slot_disconnectPorts)
self.setCanvasConnections()
self.ui.act_settings_configure.triggered.connect(self.slot_configureCatarina)
self.ui.act_help_about.triggered.connect(self.slot_aboutCatarina)
self.ui.act_help_about_qt.triggered.connect(app.aboutQt)
self.SIGUSR1.connect(self.slot_projectSave)
# Dummy timer to keep events active
self.fUpdateTimer = self.startTimer(1000)
# Start Empty Project
self.slot_projectNew()
def canvasCallback(self, action, value1, value2, value_str):
if action == patchcanvas.ACTION_GROUP_INFO:
pass
elif action == patchcanvas.ACTION_GROUP_RENAME:
group_id = value1
new_group_name = value_str
for group in self.m_group_list:
if group[iGroupName] == new_group_name:
QMessageBox.warning(self, self.tr("Warning"), self.tr("There is already a group with this name"))
return
patchcanvas.renameGroup(group_id, new_group_name)
for group in self.m_group_list:
if group[iGroupId] == group_id:
group[iGroupName] = new_group_name
break
elif action == patchcanvas.ACTION_GROUP_SPLIT:
group_id = value1
patchcanvas.splitGroup(group_id)
for group in self.m_group_list:
if group[iGroupId] == group_id:
group[iGroupSplit] = True
break
elif action == patchcanvas.ACTION_GROUP_JOIN:
group_id = value1
patchcanvas.joinGroup(group_id)
for group in self.m_group_list:
if group[iGroupId] == group_id:
group[iGroupSplit] = False
break
elif action == patchcanvas.ACTION_PORT_INFO:
port_id = value1
group_id = 0
group_name = ""
port_name = ""
port_mode = patchcanvas.PORT_MODE_NULL
port_type = patchcanvas.PORT_TYPE_NULL
for port in self.m_port_list:
if port[iPortId] == port_id:
group_id = port[iPortGroup]
port_name = port[iPortName]
port_mode = port[iPortMode]
port_type = port[iPortType]
break
for group in self.m_group_list:
if group[iGroupId] == group_id:
group_name = group[iGroupName]
break
if port_mode == patchcanvas.PORT_MODE_INPUT:
mode_text = self.tr("Input")
elif port_mode == patchcanvas.PORT_MODE_OUTPUT:
mode_text = self.tr("Output")
else:
mode_text = self.tr("Unknown")
if port_type == patchcanvas.PORT_TYPE_AUDIO_JACK:
type_text = self.tr("JACK Audio")
elif port_type == patchcanvas.PORT_TYPE_MIDI_JACK:
type_text = self.tr("JACK MIDI")
elif port_type == patchcanvas.PORT_TYPE_MIDI_A2J:
type_text = self.tr("A2J MIDI")
elif port_type == patchcanvas.PORT_TYPE_MIDI_ALSA:
type_text = self.tr("ALSA MIDI")
else:
type_text = self.tr("Unknown")
port_full_name = group_name + ":" + port_name
info = self.tr(""
"<table>"
"<tr><td align='right'><b>Group Name:</b></td><td> %1</td></tr>"
"<tr><td align='right'><b>Group ID:</b></td><td> %2</td></tr>"
"<tr><td align='right'><b>Port Name:</b></td><td> %3</td></tr>"
"<tr><td align='right'><b>Port ID:</b></td><td> %4</i></td></tr>"
"<tr><td align='right'><b>Full Port Name:</b></td><td> %5</td></tr>"
"<tr><td colspan='2'> </td></tr>"
"<tr><td align='right'><b>Port Mode:</b></td><td> %6</td></tr>"
"<tr><td align='right'><b>Port Type:</b></td><td> %7</td></tr>"
"</table>"
).arg(group_name).arg(group_id).arg(port_name).arg(port_id).arg(port_full_name).arg(mode_text).arg(type_text)
QMessageBox.information(self, self.tr("Port Information"), info)
elif action == patchcanvas.ACTION_PORT_RENAME:
port_id = value1
new_port_name = value_str
patchcanvas.renamePort(port_id, new_port_name)
for port in self.m_port_list:
if port[iPortId] == port_id:
port[iPortName] = new_port_name
break
elif action == patchcanvas.ACTION_PORTS_CONNECT:
connection_id = self.m_last_connection_id
port_out_id = value1
port_in_id = value2
patchcanvas.connectPorts(connection_id, port_out_id, port_in_id)
conn_obj = [None, None, None]
conn_obj[iConnId] = connection_id
conn_obj[iConnOutput] = port_out_id
conn_obj[iConnInput] = port_in_id
self.m_connection_list.append(conn_obj)
self.m_last_connection_id += 1
elif action == patchcanvas.ACTION_PORTS_DISCONNECT:
connection_id = value1
patchcanvas.disconnectPorts(connection_id)
for connection in self.m_connection_list:
if connection[iConnId] == connection_id:
self.m_connection_list.remove(connection)
break
def initPorts(self):
for group in self.m_group_list:
patchcanvas.addGroup(group[iGroupId], group[iGroupName], patchcanvas.SPLIT_YES if (group[iGroupSplit]) else patchcanvas.SPLIT_NO, group[iGroupIcon])
for group_pos in self.m_group_list_pos:
patchcanvas.setGroupPosFull(group_pos[iGroupPosId], group_pos[iGroupPosX_o], group_pos[iGroupPosY_o], group_pos[iGroupPosX_i], group_pos[iGroupPosY_i])
for port in self.m_port_list:
patchcanvas.addPort(port[iPortGroup], port[iPortId], port[iPortName], port[iPortMode], port[iPortType])
for connection in self.m_connection_list:
patchcanvas.connectPorts(connection[iConnId], connection[iConnOutput], connection[iConnInput])
self.m_group_list_pos = []
patchcanvas.updateZValues()
def saveFile(self, path):
content = ("<?xml version='1.0' encoding='UTF-8'?>\n"
"<!DOCTYPE CATARINA>\n"
"<CATARINA VERSION='%s'>\n" % VERSION)
content += " <Groups>\n"
for i in range(len(self.m_group_list)):
group = self.m_group_list[i]
group_id = group[iGroupId]
group_name = group[iGroupName]
group_split = group[iGroupSplit]
group_icon = group[iGroupIcon]
group_pos_i = patchcanvas.getGroupPos(group_id, patchcanvas.PORT_MODE_INPUT)
group_pos_o = patchcanvas.getGroupPos(group_id, patchcanvas.PORT_MODE_OUTPUT)
content += " <g%i> <name>%s</name> <data>%i:%i:%i:%f:%f:%f:%f</data> </g%i>\n" % (i, group_name, group_id, group_split, group_icon, group_pos_o.x(), group_pos_o.y(), group_pos_i.x(), group_pos_i.y(), i)
content += " </Groups>\n"
content += " <Ports>\n"
for i in range(len(self.m_port_list)):
port = self.m_port_list[i]
content += " <p%i> <name>%s</name> <data>%i:%i:%i:%i</data> </p%i>\n" % (i, port[iPortName], port[iPortGroup], port[iPortId], port[iPortMode], port[iPortType], i)
content += " </Ports>\n"
content += " <Connections>\n"
for i in range(len(self.m_connection_list)):
connection = self.m_connection_list[i]
content += " <c%i>%i:%i:%i</c%i>\n" % (i, connection[iConnId], connection[iConnOutput], connection[iConnInput], i)
content += " </Connections>\n"
content += "</CATARINA>\n"
try:
fd = uopen(path, "w")
fd.write(content)
fd.close()
except:
QMessageBox.critical(self, self.tr("Error"), self.tr("Failed to save file"))
def loadFile(self, path):
if not os.path.exists(path):
QMessageBox.critical(self, self.tr("Error"), self.tr("The file '%s' does not exist" % path))
self.m_save_path = None
return
try:
fd = uopen(path, "r")
readState = fd.read()
fd.close()
except:
QMessageBox.critical(self, self.tr("Error"), self.tr("Failed to load file"))
self.m_save_path = None
return
self.m_save_path = path
self.m_group_list = []
self.m_group_list_pos = []
self.m_port_list = []
self.m_connection_list = []
self.m_last_group_id = 1
self.m_last_port_id = 1
self.m_last_connection_id = 1
xml = QDomDocument()
xml.setContent(readState.encode("utf-8"))
content = xml.documentElement()
if content.tagName() != "CATARINA":
QMessageBox.critical(self, self.tr("Error"), self.tr("Not a valid Catarina file"))
return
# Get values from XML - the big code
node = content.firstChild()
while not node.isNull():
if node.toElement().tagName() == "Groups":
group_name = ""
groups = node.toElement().firstChild()
while not groups.isNull():
group = groups.toElement().firstChild()
while not group.isNull():
tag = group.toElement().tagName()
text = group.toElement().text()
if tag == "name":
group_name = text
elif tag == "data":
group_data = text.split(":")
if len(group_data) == 7 and group_data[0].isdigit() and group_data[1].isdigit() and group_data[2].isdigit() and isNumber(group_data[3]) and isNumber(group_data[4]) and isNumber(group_data[5]) and isNumber(group_data[6]):
group_obj = [None, None, None, None]
group_obj[iGroupId] = int(group_data[0])
group_obj[iGroupName] = group_name
group_obj[iGroupSplit] = int(group_data[1])
group_obj[iGroupIcon] = int(group_data[2])
group_pos_obj = [None, None, None, None, None]
group_pos_obj[iGroupPosId] = int(group_data[0])
group_pos_obj[iGroupPosX_o] = float(group_data[3])
group_pos_obj[iGroupPosY_o] = float(group_data[4])
group_pos_obj[iGroupPosX_i] = float(group_data[5])
group_pos_obj[iGroupPosY_i] = float(group_data[6])
self.m_group_list.append(group_obj)
self.m_group_list_pos.append(group_pos_obj)
group_id = group_obj[iGroupId]
if group_id > self.m_last_group_id:
self.m_last_group_id = group_id + 1
group = group.nextSibling()
groups = groups.nextSibling()
elif node.toElement().tagName() == "Ports":
port_name = ""
ports = node.toElement().firstChild()
while not ports.isNull():
port = ports.toElement().firstChild()
while not port.isNull():
tag = port.toElement().tagName()
text = port.toElement().text()
if tag == "name":
port_name = text
elif tag == "data":
port_data = text.split(":")
if len(port_data) == 4 and port_data[0].isdigit() and port_data[1].isdigit() and port_data[2].isdigit() and port_data[3].isdigit():
new_port = [None, None, None, None, None]
new_port[iPortGroup] = int(port_data[0])
new_port[iPortId] = int(port_data[1])
new_port[iPortName] = port_name
new_port[iPortMode] = int(port_data[2])
new_port[iPortType] = int(port_data[3])
self.m_port_list.append(new_port)
if new_port[iPortId] > self.m_last_port_id:
self.m_last_port_id = new_port[iPortId] + 1
port = port.nextSibling()
ports = ports.nextSibling()
elif node.toElement().tagName() == "Connections":
conns = node.toElement().firstChild()
while not conns.isNull():
conn_data = conns.toElement().text().split(":")
if len(conn_data) == 3 and conn_data[0].isdigit() and conn_data[1].isdigit() and conn_data[2].isdigit():
conn_obj = [None, None, None]
conn_obj[iConnId] = int(conn_data[0])
conn_obj[iConnOutput] = int(conn_data[1])
conn_obj[iConnInput] = int(conn_data[2])
connection_id = conn_obj[iConnId]
self.m_connection_list.append(conn_obj)
if connection_id >= self.m_last_connection_id:
self.m_last_connection_id = connection_id + 1
conns = conns.nextSibling()
node = node.nextSibling()
self.m_last_group_id += 1
self.m_last_port_id += 1
self.m_last_connection_id += 1
patchcanvas.clear()
self.initPorts()
self.scene.zoom_fit()
self.scene.zoom_reset()
@pyqtSlot()
def slot_projectNew(self):
self.m_group_list = []
self.m_group_list_pos = []
self.m_port_list = []
self.m_connection_list = []
self.m_last_group_id = 1
self.m_last_port_id = 1
self.m_last_connection_id = 1
self.m_save_path = None
patchcanvas.clear()
@pyqtSlot()
def slot_projectOpen(self):
path, _ = QFileDialog.getOpenFileName(self, self.tr("Load State"), filter=self.tr("Catarina XML Document (*.xml)"))
if path:
self.loadFile(path)
@pyqtSlot()
def slot_projectSave(self):
if self.m_save_path:
self.saveFile(self.m_save_path)
else:
self.slot_projectSaveAs()
@pyqtSlot()
def slot_projectSaveAs(self):
path, _ = QFileDialog.getSaveFileName(self, self.tr("Save State"), filter=self.tr("Catarina XML Document (*.xml)"))
if path:
self.m_save_path = path
self.saveFile(path)
@pyqtSlot()
def slot_groupAdd(self):
dialog = CatarinaAddGroupW(self, self.m_group_list)
if dialog.exec_():
group_id = self.m_last_group_id
group_name = dialog.ret_group_name
group_split = dialog.ret_group_split
group_splitR = patchcanvas.SPLIT_YES if group_split else patchcanvas.SPLIT_NO
group_icon = patchcanvas.ICON_HARDWARE if group_split else patchcanvas.ICON_APPLICATION
patchcanvas.addGroup(group_id, group_name, group_splitR, group_icon)
group_obj = [None, None, None, None]
group_obj[iGroupId] = group_id
group_obj[iGroupName] = group_name
group_obj[iGroupSplit] = group_split
group_obj[iGroupIcon] = group_icon
self.m_group_list.append(group_obj)
self.m_last_group_id += 1
@pyqtSlot()
def slot_groupRemove(self):
if len(self.m_group_list) > 0:
dialog = CatarinaRemoveGroupW(self, self.m_group_list)
if dialog.exec_():
group_id = dialog.ret_group_id
# Remove port connections first
for port in self.m_port_list:
if port[iPortGroup] == group_id:
port_id = port[iPortId]
h = 0
for i in range(len(self.m_connection_list)):
connection = self.m_connection_list[i-h]
if connection[iConnOutput] == port_id or connection[iConnInput] == port_id:
patchcanvas.disconnectPorts(connection[iConnId])
self.m_connection_list.pop(i-h)
h += 1
# Remove ports
h = 0
for i in range(len(self.m_port_list)):
port = self.m_port_list[i-h]
if port[iPortGroup] == group_id:
port_id = port[iPortId]
patchcanvas.removePort(port[iPortId])
self.m_port_list.pop(i-h)
h += 1
# Now remove group
patchcanvas.removeGroup(group_id)
for group in self.m_group_list:
if group[iGroupId] == group_id:
self.m_group_list.remove(group)
break
else:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Please add a Group first!"))
@pyqtSlot()
def slot_groupRename(self):
if len(self.m_group_list) > 0:
dialog = CatarinaRenameGroupW(self, self.m_group_list)
if dialog.exec_():
group_id = dialog.ret_group_id
new_group_name = dialog.ret_new_group_name
patchcanvas.renameGroup(group_id, new_group_name)
for group in self.m_group_list:
if group[iGroupId] == group_id:
group[iGroupName] = new_group_name
break
else:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Please add a Group first!"))
@pyqtSlot()
def slot_portAdd(self):
if len(self.m_group_list) > 0:
dialog = CatarinaAddPortW(self, self.m_group_list, self.m_last_port_id)
if dialog.exec_():
group_id = dialog.ret_group_id
port_name = dialog.ret_port_name
port_mode = dialog.ret_port_mode
port_type = dialog.ret_port_type
patchcanvas.addPort(group_id, self.m_last_port_id, port_name, port_mode, port_type)
new_port = [None, None, None, None, None]
new_port[iPortGroup] = group_id
new_port[iPortId] = self.m_last_port_id
new_port[iPortName] = port_name
new_port[iPortMode] = port_mode
new_port[iPortType] = port_type
self.m_port_list.append(new_port)
self.m_last_port_id += 1
else:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Please add a Group first!"))
@pyqtSlot()
def slot_portRemove(self):
if len(self.m_port_list) > 0:
dialog = CatarinaRemovePortW(self, self.m_group_list, self.m_port_list)
if dialog.exec_():
port_id = dialog.ret_port_id
h = 0
for i in range(len(self.m_connection_list)):
connection = self.m_connection_list[i-h]
if connection[iConnOutput] == port_id or connection[iConnInput] == port_id:
patchcanvas.disconnectPorts(connection[iConnId])
self.m_connection_list.pop(i-h)
h += 1
patchcanvas.removePort(port_id)
for port in self.m_port_list:
if port[iPortId] == port_id:
self.m_port_list.remove(port)
break
else:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Please add a Port first!"))
@pyqtSlot()
def slot_portRename(self):
if len(self.m_port_list) > 0:
dialog = CatarinaRenamePortW(self, self.m_group_list, self.m_port_list)
if dialog.exec_():
port_id = dialog.ret_port_id
new_port_name = dialog.ret_new_port_name
patchcanvas.renamePort(port_id, new_port_name)
for port in self.m_port_list:
if port[iPortId] == port_id:
port[iPortName] = new_port_name
break
else:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Please add a Port first!"))
@pyqtSlot()
def slot_connectPorts(self):
if len(self.m_port_list) > 0:
dialog = CatarinaConnectPortsW(self, self.m_group_list, self.m_port_list)
if dialog.exec_():
connection_id = self.m_last_connection_id
port_out_id = dialog.ret_port_out_id
port_in_id = dialog.ret_port_in_id
for connection in self.m_connection_list:
if connection[iConnOutput] == port_out_id and connection[iConnInput] == port_in_id:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Ports already connected!"))
return
patchcanvas.connectPorts(connection_id, port_out_id, port_in_id)
conn_obj = [None, None, None]
conn_obj[iConnId] = connection_id
conn_obj[iConnOutput] = port_out_id
conn_obj[iConnInput] = port_in_id
self.m_connection_list.append(conn_obj)
self.m_last_connection_id += 1
else:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Please add some Ports first!"))
@pyqtSlot()
def slot_disconnectPorts(self):
if len(self.m_connection_list) > 0:
dialog = CatarinaDisconnectPortsW(self, self.m_group_list, self.m_port_list, self.m_connection_list)
if dialog.exec_():
connection_id = -1
port_out_id = dialog.ret_port_out_id
port_in_id = dialog.ret_port_in_id
for connection in self.m_connection_list:
if connection[iConnOutput] == port_out_id and connection[iConnInput] == port_in_id:
connection_id = connection[iConnId]
self.m_connection_list.remove(connection)
break
patchcanvas.disconnectPorts(connection_id)
else:
QMessageBox.warning(self, self.tr("Warning"), self.tr("Please make some Connections first!"))
@pyqtSlot()
def slot_configureCatarina(self):
dialog = SettingsW(self, "catarina", hasGL)
if dialog.exec_():
self.loadSettings(False)
patchcanvas.clear()
p_options = patchcanvas.options_t()
p_options.theme_name = self.fSavedSettings["Canvas/Theme"]
p_options.auto_hide_groups = self.fSavedSettings["Canvas/AutoHideGroups"]
p_options.use_bezier_lines = self.fSavedSettings["Canvas/UseBezierLines"]
p_options.antialiasing = self.fSavedSettings["Canvas/Antialiasing"]
p_options.eyecandy = self.fSavedSettings["Canvas/EyeCandy"]
patchcanvas.setOptions(p_options)
patchcanvas.init("Catarina", self.scene, self.canvasCallback, DEBUG)
self.initPorts()
@pyqtSlot()
def slot_aboutCatarina(self):
QMessageBox.about(self, self.tr("About Catarina"), self.tr("<h3>Catarina</h3>"
"<br>Version %s"
"<br>Catarina is a testing ground for the 'PatchCanvas' module.<br>"
"<br>Copyright (C) 2010-2022 falkTX") % VERSION)
def saveSettings(self):
settings = QSettings()
settings.setValue("Geometry", self.saveGeometry())
settings.setValue("ShowToolbar", self.ui.frame_toolbar.isVisible())
def loadSettings(self, geometry):
settings = QSettings()
if geometry:
self.restoreGeometry(settings.value("Geometry", b""))
showToolbar = settings.value("ShowToolbar", True, type=bool)
self.ui.act_settings_show_toolbar.setChecked(showToolbar)
self.ui.frame_toolbar.setVisible(showToolbar)
self.fSavedSettings = {
"Canvas/Theme": settings.value("Canvas/Theme", patchcanvas.getDefaultThemeName(), type=str),
"Canvas/AutoHideGroups": settings.value("Canvas/AutoHideGroups", False, type=bool),
"Canvas/UseBezierLines": settings.value("Canvas/UseBezierLines", True, type=bool),
"Canvas/EyeCandy": settings.value("Canvas/EyeCandy", patchcanvas.EYECANDY_SMALL, type=int),
"Canvas/UseOpenGL": settings.value("Canvas/UseOpenGL", False, type=bool),
"Canvas/Antialiasing": settings.value("Canvas/Antialiasing", patchcanvas.ANTIALIASING_SMALL, type=int),
"Canvas/TextAntialiasing": settings.value("Canvas/TextAntialiasing", True, type=bool),
"Canvas/HighQualityAntialiasing": settings.value("Canvas/HighQualityAntialiasing", False, type=bool)
}
def timerEvent(self, event):
if event.timerId() == self.fUpdateTimer:
self.update()
QMainWindow.timerEvent(self, event)
def closeEvent(self, event):
self.saveSettings()
patchcanvas.clear()
QMainWindow.closeEvent(self, event)
#--------------- main ------------------
if __name__ == '__main__':
# App initialization
app = QApplication(sys.argv)
app.setApplicationName("Catarina")
app.setApplicationVersion(VERSION)
app.setOrganizationName("Cadence")
app.setWindowIcon(QIcon(":/scalable/catarina.svg"))
# Show GUI
gui = CatarinaMainW()
# Set-up custom signal handling
setUpSignals(gui)
gui.show()
if len(app.arguments()) > 1:
if not app.arguments()[0].endswith("Python.exe"):
gui.loadFile(app.arguments()[1])
elif len(app.arguments()) > 2:
gui.loadFile(app.arguments()[2])
# App-Loop
sys.exit(app.exec_())
|
falkTX/Cadence
|
src/catarina.py
|
catarina.py
|
py
| 54,241 |
python
|
en
|
code
| 361 |
github-code
|
6
|
19887674480
|
#
# -*- coding: utf-8 -*-
# OpenPGPpy OpenPGPcard : OpenPGP smartcard communication library for Python
# Copyright (C) 2020-2022 BitLogiK
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from logging import getLogger
import time
from .der_coding import encode_der, decode_do
try:
from smartcard.System import readers
from smartcard.util import toBytes, toHexString
from smartcard.Exceptions import CardConnectionException
except ModuleNotFoundError as exc:
raise ModuleNotFoundError("pyscard not installed ?") from exc
logger = getLogger(__name__)
# Exception classes for OpenPGPcard
class PGPBaseException(Exception):
pass
class PGPCardException(PGPBaseException):
def __init__(self, sw_byte1, sw_byte2):
self.sw_byte1 = sw_byte1
self.sw_byte2 = sw_byte2
self.sw_code = (sw_byte1 << 8) | sw_byte2
self.message = "Error status : 0x%02X%02X" % (sw_byte1, sw_byte2)
super().__init__(self.message)
class ConnectionException(PGPBaseException):
pass
class BadInputException(PGPBaseException):
pass
class DataException(PGPBaseException):
pass
class PinException(PGPBaseException):
def __init__(self, num_retries):
self.retries_left = num_retries
if num_retries >= 2:
self.message = f"Wrong PIN. {num_retries} tries left"
else:
self.message = f"Wrong PIN. {num_retries} try left"
super().__init__(self.message)
HEX_SYMBOLS = "0123456789abcdefABCDEF"
APDU_SHORT = 256
APDU_LONG = 65536
# Utils helpers
def ishex(istring):
return all(c in HEX_SYMBOLS for c in istring)
def check_hex(func):
"""Decorator to check the first method argument
is 2/4 string hex (a DO short address)
Expands the hex string from 2 to 4 hex chars (adds leading 0)
"""
def func_wrapper(*args):
if len(args) < 2:
BadInputException(
"First argument must be filehex : 1 or 2 bytes hex string"
)
if not isinstance(args[1], str):
BadInputException("filehex provided must be a string")
args_list = [*args]
if len(args_list[1]) == 2:
# A single byte address : param_1=0
args_list[1] = "00" + args_list[1]
if len(args_list[1]) != 4 or not ishex(args_list[1]):
raise BadInputException("filehex provided must be 2 or 4 hex chars")
return func(*args_list)
return func_wrapper
def to_list(binstr):
return toBytes(binstr.hex())
def print_list(liststr):
"""Output a list pretty in the debug logger."""
for item in liststr:
logger.debug(" - %s", item)
# Core class OpenPGPcard
class OpenPGPcard:
AppID = toBytes("D27600012401")
default_manufacturer_name = "- unknown -"
manufacturer_list = {
0x0001: "PPC Card Systems",
0x0002: "Prism",
0x0003: "OpenFortress",
0x0004: "Wewid",
0x0005: "ZeitControl",
0x0006: "Yubico",
0x0007: "OpenKMS",
0x0008: "LogoEmail",
0x0009: "Fidesmo",
0x000A: "Dangerous Things",
0x000B: "Feitian Technologies",
0x002A: "Magrathea",
0x0042: "GnuPG",
0x1337: "Warsaw Hackerspace",
0x2342: "Warpzone",
0x2C97: "Ledger",
0x4354: "Confidential Technologies",
0x5443: "TIF-IT",
0x63AF: "Trustica",
0xAFAF: "ANSSI",
0xBA53: "c-base",
0xBD0E: "Paranoidlabs",
0xF517: "FSIJ",
0xF5EC: "F-Secure",
}
def __init__(self, reader_index=None):
applet_detected = False
readers_list = readers()
if len(readers_list) > 0:
if reader_index is None:
logger.debug("Trying to reach the OpenPGP app in all readers")
logger.debug("Available readers :")
print_list(readers_list)
if reader_index is not None:
if not isinstance(reader_index, int):
raise ValueError("reader_index must be int.")
if reader_index < 0:
raise ValueError("reader_index is a positive index, starts at 0.")
if len(readers_list) > reader_index:
readers_list = readers_list[reader_index : reader_index + 1]
else:
raise ConnectionException("Reader index out of readers detected")
logger.debug("Using reader index #%i", reader_index)
for reader in readers_list:
applet_detected = False
try:
logger.debug("Trying with reader : %s", reader)
self.connection = reader.createConnection()
self.connection.connect()
apdu_select = [0x00, 0xA4, 0x04, 0x00]
self.send_apdu(apdu_select, OpenPGPcard.AppID)
applet_detected = hasattr(self, "connection")
except Exception:
logger.debug("Fail with this reader")
if reader_index is not None:
raise ConnectionException("No OpenPGP applet on this reader.")
continue
if applet_detected:
logger.debug("An OpenPGP applet detected, using %s", reader.name)
self.name = reader.name
break
if applet_detected:
self.longer = 0
# Read device info
self.get_identifier()
self.get_application_data()
self.get_length()
self.get_features()
else:
raise ConnectionException("Can't find any OpenPGP device connected.")
# The object has the following attributes :
# self.name = str, name of the device (or the card reader used)
# self.pgpvermaj = int, OpenPGP application major version (3)
# self.pgpvermin = int, OpenPGP application minor version
# self.pgpverstr = string, OpenPGP application "maj.min"
# self.manufacturer_id = string, hex string of the manufacturer ID "0xXXXX"
# self.manufacturer = string, name of the manufacturer (or "- unknown -")
# self.serial = int, serial number
# self.max_cmd : int, maximum command length
# self.max_rsp : int, maximum response length
# self.display : bool, has a display ?
# self.bio : bool, has a biometric sensor ?
# self.button : bool, has a button ?
# self.keypad : bool, has a keypad ?
# self.led : bool, has a LED ?
# self.speaker : bool, has a speaker ?
# self.mic : bool, has a microphone ?
# self.touchscreen : bool, has a touchescreen ?
def __del__(self):
"""Disconnect device."""
if hasattr(self, "connection"):
del self.connection
def send_apdu(self, apdu_header, cmd_data, exp_resp_len=0):
"""send APDU 7816-4 with extended length
apdu_header : [ INS, CLA, P1, P2 ] ISO7816 APDU header,
without length info (Lc nor Le)
cmd_data field : bytes list of the command data
exp_resp_len : Expected response length, must be set to 65536
when expecting a long answser (with a short command)
"""
len_data = len(cmd_data)
# Lc is 1 or 3 bytes
if len_data < APDU_SHORT and exp_resp_len <= APDU_SHORT:
# Standard APDU : Lc 1 byte : short command and short response
apdu = apdu_header + [len_data] + cmd_data
elif len_data < APDU_LONG:
# Extended APDU : Lc 3 bytes : extended command and extended response
apdu = apdu_header + [0, len_data >> 8, len_data & 255] + cmd_data
else:
raise DataException("Command data too large")
if exp_resp_len > 0:
# Le present
if exp_resp_len < APDU_SHORT:
# Le fixed and short
apdu += [exp_resp_len]
elif exp_resp_len == APDU_SHORT:
# Le short : max response len 255 bytes
apdu += [0]
elif exp_resp_len < APDU_LONG:
# Le fixed and long
apdu += [exp_resp_len >> 8, exp_resp_len & 255]
elif exp_resp_len == APDU_LONG:
# Le long : max response len 65535 bytes
apdu += [0, 0]
else:
raise DataException("Expected data response too large")
logger.debug(
f" Sending 0x{apdu_header[1]:X} command with {len_data} bytes data"
)
if exp_resp_len > 0:
logger.debug(f" with Le={exp_resp_len}")
logger.debug(f"-> {toHexString(apdu)}")
t_env = time.time()
try:
data, sw_byte1, sw_byte2 = self.connection.transmit(apdu)
except CardConnectionException:
raise ConnectionException(
"Error when communicating with the OpenGPG device."
)
t_ans = (time.time() - t_env) * 1000
logger.debug(
" Received %i bytes data : SW 0x%02X%02X - duration: %.1f ms"
% (len(data), sw_byte1, sw_byte2, t_ans)
)
if len(data) > 0:
logger.debug(f"<- {toHexString(data)}")
while sw_byte1 == 0x61:
t_env = time.time()
datacompl, sw_byte1, sw_byte2 = self.connection.transmit(
[0x00, 0xC0, 0, 0, 0]
)
t_ans = (time.time() - t_env) * 1000
logger.debug(
" Received remaining %i bytes : 0x%02X%02X - duration: %.1f ms"
% (len(datacompl), sw_byte1, sw_byte2, t_ans)
)
logger.debug(f"<- {toHexString(datacompl)}")
data += datacompl
if sw_byte1 == 0x63 and sw_byte2 & 0xF0 == 0xC0:
raise PinException(sw_byte2 - 0xC0)
if sw_byte1 != 0x90 or sw_byte2 != 0x00:
raise PGPCardException(sw_byte1, sw_byte2)
return data
@check_hex
def select_data(self, filehex, param_1=0, param_2=4):
"""Select a data object : filehex is 2 bytes (4 string hex)."""
apdu_command = [
0x00,
0xA5,
param_1,
param_2,
]
data = toBytes("60 04 5C 02" + filehex)
self.send_apdu(apdu_command, data)
@check_hex
def get_data(self, filehex, data_hex=""):
"""Binary read / ISO read the object"""
logger.debug(f"Read Data {data_hex} in 0x{filehex}")
param_1 = int(filehex[0:2], 16)
param_2 = int(filehex[2:4], 16)
apdu_command = [0x00, 0xCA, param_1, param_2]
if len(data_hex) == 2:
data_hex = "00" + data_hex
dataresp = self.send_apdu(apdu_command, toBytes(data_hex), self.longer)
return dataresp
def get_next_data(self, param_1=0, param_2=0, data_hex=""):
"""Continue read."""
logger.debug("Read next data %s", data_hex)
apdu_command = [0x00, 0xCC, param_1, param_2]
blkdata = self.send_apdu(apdu_command, toBytes(data_hex))
return blkdata
@check_hex
def put_data(self, filehex, data_hex=""):
logger.debug(f"Put data {data_hex} in 0x{filehex}")
param_1 = int(filehex[0:2], 16)
param_2 = int(filehex[2:4], 16)
apdu_command = [0x00, 0xDA, param_1, param_2] # or 0xDB command
blkdata = self.send_apdu(apdu_command, toBytes(data_hex))
return blkdata
def get_identifier(self):
"""Full application identifier"""
resp = self.get_data("4F")
if len(resp) != 16:
raise DataException("Application identifier data shall be 16 bytes long.")
if resp[:6] != OpenPGPcard.AppID:
raise DataException(
"Start of application identifier data shall be the OpenGPG AID."
)
self.pgpvermaj = resp[6]
self.pgpvermin = resp[7]
self.pgpverstr = f"{resp[6]}.{resp[7]}"
self.manufacturer_id = f"0x{resp[8]:02X}{resp[9]:02X}"
manufacturer_id_int = int(self.manufacturer_id, 16)
if manufacturer_id_int in OpenPGPcard.manufacturer_list:
self.manufacturer = OpenPGPcard.manufacturer_list[manufacturer_id_int]
else:
self.manufacturer = OpenPGPcard.default_manufacturer_name
self.serial = int.from_bytes(resp[10:14], "big")
if self.pgpvermaj >= 3:
self.longer = APDU_LONG
logger.debug(f"PGP version : {self.pgpverstr}")
logger.debug(f"Manufacturer : {self.manufacturer} ({self.manufacturer_id})")
logger.debug(f"Serial : {self.serial}")
def get_length(self):
"""Extended length info DO 7F66 : 0202 xxxx 0202 xxxx
Also bit 7 in Application Data "0x73"
"""
self.max_cmd = 256
self.max_rsp = 256
if self.pgpvermaj >= 3:
resp = self.get_data("7F66")
if len(resp) == 8: # Simple DO
self.max_cmd = int.from_bytes(resp[2:4], "big")
self.max_rsp = int.from_bytes(resp[6:8], "big")
elif len(resp) == 11 and resp[:3] == [0x7F, 0x66, 8]: # Constructed DO
self.max_cmd = int.from_bytes(resp[5:7], "big")
self.max_rsp = int.from_bytes(resp[9:11], "big")
else:
raise DataException("Extended length info incorrect format.")
def get_pwstatus(self):
return self.get_data("C4")
def get_features(self):
"""Features optional DO 7F74"""
self.display = False
self.bio = False
self.button = False
self.keypad = False
self.led = False
self.speaker = False
self.mic = False
self.touchscreen = False
try:
resp = self.get_data("7F74")
except PGPCardException as exc:
if exc.sw_code == 0x6B00 or exc.sw_code == 0x6A83 or exc.sw_code == 0x6A88:
self.display_features()
return
raise
if resp[:3] == [0x7F, 0x74, 3]: # Turn constructed DO to simple DO
resp = resp[3:]
if resp[:2] != [0x81, 1]:
raise DataException("Features data shall start with 0x81 0x01.")
if len(resp) != 3:
raise DataException("Features data shall be 3 bytes long.")
feature_int = resp[2]
def check_bit(integ, bit_pos):
# Check bit 8..1
powertwo = 1 << (bit_pos - 1)
return (integ & powertwo) == powertwo
self.display = check_bit(feature_int, 8)
self.bio = check_bit(feature_int, 7)
self.button = check_bit(feature_int, 6)
self.keypad = check_bit(feature_int, 5)
self.led = check_bit(feature_int, 4)
self.speaker = check_bit(feature_int, 3)
self.mic = check_bit(feature_int, 2)
self.touchscreen = check_bit(feature_int, 1)
self.display_features()
def display_features(self):
"""Print features for debug"""
def capability_message(capability):
return "Yes" if capability else "No"
# logger.debug("Display ? %s", capability_message(self.display))
# logger.debug("Biometric sensor ? %s", capability_message(self.bio))
logger.debug("Button ? %s", capability_message(self.button))
# logger.debug("Keypad ? %s", capability_message(self.keypad))
# logger.debug("LED ? %s", capability_message(self.led))
# logger.debug("Speaker ? %s", capability_message(self.speaker))
# logger.debug("Microphone ? %s", capability_message(self.mic))
# logger.debug("TouchScreen ? %s", capability_message(self.touchscreen))
def get_historical_bytes(self):
"""Historical bytes DO 5F52"""
return self.get_data("5F52")
def get_application_data(self):
"""Application Related Data DO 6E"""
try:
resp = self.get_data("6E")
except PGPCardException as exc:
if exc.sw_code == 0x6D00:
# Retry after 3 seconds
time.sleep(3)
# Select again the applet
self.send_apdu([0x00, 0xA4, 0x04, 0x00], OpenPGPcard.AppID)
time.sleep(1)
return self.get_application_data()
app_rel_data = decode_do(resp)
if resp[0] == 0x6E:
app_rel_data = app_rel_data["6E"]
# Set the attribute about max PW length
pwstatus_data = bytes.fromhex(app_rel_data["73"]["C4"])
if pwstatus_data[1] < 128:
self.pw1_maxlen = pwstatus_data[1]
if pwstatus_data[1] < 128:
self.pw3_maxlen = pwstatus_data[3]
return app_rel_data
def terminate_df(self):
self.send_apdu([0, 0xE6, 0, 0], [])
def activate_file(self):
self.send_apdu([0, 0x44, 0, 0], [])
def reset(self, pin3):
self.verify_pin(3, pin3)
self.terminate_df()
self.activate_file()
def get_random(self, len_data):
"""Get challenge INS=0x84
Return len bytes of random (not integer)
ToDo : make it as optional, 6D00 error?
"""
return bytes(self.send_apdu([0, 0x84, 0, 0], [], len_data))
def get_pin_status(self, pin_bank):
"""Return remaining tries left for the given PIN bank address (1, 2 or 3)
If return 0 : PIN is blocked, if 9000 : PIN has been verified
"""
if self.pgpvermaj * 10000 + self.pgpvermin >= 30001: # >= v 3.1
try:
self.verify_pin(pin_bank, "")
return 9000
except PinException as exc:
return exc.retries_left
except PGPCardException as exc:
if exc.sw_code == 0x6983:
return 0
if exc.sw_code != 0x6A80:
raise exc
# Fallback to PW status "C4"
resp = self.get_pwstatus()
if len(resp) != 7:
raise PGPCardException("Bad PW status status data")
if pin_bank == 1:
return resp[4]
elif pin_bank == 3:
return resp[6]
raise PGPCardException("Only PW1 and PW3 are available for status")
def change_pin(self, old_pin, new_pin, pin_index):
"""Change PIN index number (index : 1 or 3)."""
if pin_index not in (3, 1):
raise DataException("Bad PIN index, must be 1 or 3.")
old_pin_bin = old_pin.encode("utf8")
new_pin_bin = new_pin.encode("utf8")
pin_min_len = 6
if pin_index == 3:
pin_min_len = 8
if len(old_pin_bin) < pin_min_len or len(new_pin_bin) < pin_min_len:
raise BadInputException(
f"Bad PIN #{pin_index} length, must be {pin_min_len} bytes."
)
data = old_pin_bin + new_pin_bin
self.send_apdu([0, 0x24, 0, 0x80 + pin_index], to_list(data))
def verify_pin(self, pin_bank, pin_string):
"""Verify PIN code : pin_bank is 1, 2 or 3 for SW1, SW2 or SW3
Call CHANGE REFERENCE DATA card command
"""
if pin_bank not in (1, 2, 3):
raise DataException("Bad PIN index, must be 1, 2 or 3.")
if pin_string:
self.send_apdu(
[0, 0x20, 0, 0x80 + pin_bank], to_list(pin_string.encode("utf8"))
)
else:
self.send_apdu([0, 0x20, 0, 0x80 + pin_bank], [])
@check_hex
def gen_key(self, keypos_hex):
"""Generate an asymmetric key pair in keypos slot address
Digital signature : 0xB600 : gen key according to algorithm data in C1
Confidentiality : 0xB800 : gen key according to algorithm data in C2
Authentication : 0xA400 : gen key according to algorithm data in C3
"""
return bytes(
self.send_apdu([0, 0x47, 0x80, 0], toBytes(keypos_hex), self.longer)
)
@check_hex
def get_public_key(self, keypos_hex):
"""Get the public part of the key pair in keypos slot address"""
return bytes(
self.send_apdu([0, 0x47, 0x81, 0], toBytes(keypos_hex), self.longer)
)
def sign(self, data):
"""Sign data, with Compute Digital Signature command"""
return bytes(self.send_apdu([0, 0x2A, 0x9E, 0x9A], to_list(data)))
def sign_ec_der(self, hashdata):
"""Sign with ECDSA hash data and output signature as ASN1 DER encoded
hashdata is the same size in bits of the EC key
"""
return encode_der(self.sign(hashdata))
def encipher(self):
"""Call ENC command
ToDo
"""
raise NotImplementedError()
def decipher(self, data):
return bytes(self.send_apdu([0, 0x2A, 0x80, 0x86], to_list(data)))
def decipher_25519(self, ext_pubkey):
"""For ECDH with Curve25519
ext_pubkey is a 32 bytes "x" public key
"""
data_field = b"\xA6\x12\x7F\x49\x22\x86\x20" + ext_pubkey
return self.decipher(data_field)
|
bitlogik/OpenPGPpy
|
OpenPGPpy/openpgp_card.py
|
openpgp_card.py
|
py
| 22,169 |
python
|
en
|
code
| 8 |
github-code
|
6
|
19523144511
|
from celery import shared_task
from time import sleep
from .models import Movie
@shared_task
def increase_ranking():
# increase_ranking: This task increases the ranking of upcoming movies by 10 every 5 minutes. It gets a list of
# all upcoming movies from the database, iterates over them, and adds 10 to each movie's ranking field. The
# changes are saved back to the database.
upcoming_movies = Movie.objects.filter(status='upcoming')
for movie in upcoming_movies:
movie.ranking += 10
movie.save()
@shared_task
def add(x, y):
# add: This task simulates a long-running task by sleeping for 5 seconds, and then returns the sum of two numbers
# x and y. This task is used as an example to demonstrate how to define a simple task with Celery.
sleep(5) # Simulate a long-running task
return x + y
|
Optimustprime/cinema_program
|
app/movies/tasks.py
|
tasks.py
|
py
| 853 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8424293393
|
from bs4 import BeautifulSoup
import spacy
import os
#nlp = spacy.load("nl_core_news_lg")
nlp = spacy.load("en_core_web_lg")
import regex as re
from nltk import ngrams
import pickle
import json
from augment.replace import BertSampler
from sacremoses import MosesDetokenizer
md = MosesDetokenizer(lang='en')
def position_of_ngram(words, hyp):
length = len(words)
for i, sublist in enumerate((hyp[i:i + length] for i in range(len(hyp)))):
if words == sublist:
return i, i+length
return None, None
def wordtokenizer(text):
nlptext = nlp(text)
#Tokenize the text using SpaCy
tokenlist = [token.text for token in nlptext if token.text != ' ']
for idx, token in enumerate(tokenlist):
#SpaCy struggles with the E2E templates (e.g., __NAME__ ). As it tends to make all underscores separate tokens. Let's fix that.
#First, we find the start of the template
try:
if (tokenlist[idx] == '_') and (tokenlist[idx+1] == '_'):
wordgroup = tokenlist[idx]
dellist = []
nextidx = idx
#Go to the next words after the start of the template until you reach the end (market by two underscores and a non-underscore word).
#Then we will group all the separate tokens into the one template token, and use the collected index information to delete the part-of-template tokens.
while True:
nextidx += 1
try:
if (nextidx+2 == len(tokenlist)) or ((tokenlist[nextidx] == '_') and (tokenlist[nextidx+1] == '_') and (tokenlist[nextidx+2] != '_')):
dellist = dellist + [nextidx, nextidx+1]
wordgroup += tokenlist[nextidx] + tokenlist[nextidx+1]
break
else:
dellist.append(nextidx)
wordgroup += tokenlist[nextidx]
except IndexError:
return ['ERROR ERROR']
#We reverse the indexlist to make sure the deletion doesn't affect the next index
tokenlist[idx] = wordgroup
for delnum in dellist:
tokenlist[delnum] = ''
except IndexError:
return ['ERROR ERROR']
tokenlist = [x for x in tokenlist if x != '']
return tokenlist
def main():
#Gather all E2E files
filelist = []
for path, subdirs, files in os.walk('C:/Users/cvdrl/Desktop/EnrichedE2E-main'):
for name in files:
if name.endswith('.xml'):
filelist.append(os.path.join(path, name))
allentrytemplateinfo = {}
currentpath = os.getcwd()
for e2efile in filelist:
#Open the file, gather all entries, and all lexicalizations for that entry, then also find the template and text for that lexicalization
with open(e2efile, 'rb') as f:
soup = BeautifulSoup(f, 'lxml')
entrylist = soup.find('entries').find_all('entry')
fileentrytemplateinfo = []
for entry in entrylist:
targetlist = entry.find_all('target')
entrytemplatelist = []
for target in targetlist:
targettext = target.find('text').text
targettemplate = target.find('template').text
#Tokenize the targettext and template the same way as will be done in the Data_Augmentation file
tokentargettext = wordtokenizer(targettext)
targettext = ' '.join(tokentargettext)
tokentargettemplate = wordtokenizer(targettemplate)
if (tokentargettemplate == ['ERROR ERROR']) or (tokentargettext == ['ERROR ERROR']):
continue
targettemplatedict = {'eid': entry['eid'], 'lid': target['lid'], 'info': []}
templateissue = 'n'
#Iterate over the target text until the word index overlaps with a template indicator in the template text
for wordidx, word in enumerate(tokentargettext):
try:
if re.search(r'(__[A-Z]+_?[A-Z]+?__)', tokentargettemplate[wordidx]):
templatedict = {'tag': re.search(r'(__[A-Z]+_?[A-Z]+?__)', tokentargettemplate[wordidx]).group(1), 'wordmatches': [tokentargettext[wordidx]], 'indices': [wordidx], 'text': targettext, 'template': targettemplate, 'text_tokenized': tokentargettext, 'template_tokenized': tokentargettemplate}
nextlist = tokentargettext[wordidx+1:].copy()
for nextwordidx, nextword in enumerate(nextlist):
#If there is no next word in the template text anymore, add all remaining words to the dict.
if wordidx + 1 >= len(tokentargettemplate):
templatedict['wordmatches'].append(nextword)
templatedict['indices'].append(wordidx+1 + nextwordidx)
#Else stop if the next template word is found.
elif nextword == tokentargettemplate[wordidx+1]:
break
else:
templatedict['wordmatches'].append(nextword)
templatedict['indices'].append(wordidx+1 + nextwordidx)
targettemplatedict['info'].append(templatedict)
matchindices = templatedict['indices'].copy()
if len(matchindices) > 1:
matchindices = matchindices[1:]
for matchidx in matchindices:
tokentargettemplate.insert(matchidx, '_FILLER_')
except IndexError:
#print(tokentargettemplate)
#print(tokentargettext)
#print(targettext)
#print(e2efile)
#exit(2)
templateissue = 'y'
if templateissue == 'y':
continue
#ADD INFORMATION IF THE TEXT OVERLAPS WITH THE DATA AND WHERE IT OVERLAPS, SO THAT WE CAN CHANGE THIS WITH THE DATA AUGMENTATION
data_inputlist = entry.find('source').find_all('input')
for data_input in data_inputlist:
#TRY TO FIND N-GRAM MATCHES FOR MAX, THEN FOR MAX-1, MAX-2, etc.
#Iterate over the template info we collected
for idx, template_input in enumerate(targettemplatedict['info']):
#If the template_tag matches the data tag, let's see if there's overlapping text
if template_input['tag'] == data_input['tag']:
targettemplatedict['info'][idx].update({'data': {'attribute': data_input['attribute'], 'tag': data_input['tag'], 'value': data_input['value']}})
lexlist = template_input['indices'].copy()
ngramrange = list(range(len(lexlist), 0, -1))
ngramfound = 'n'
for ngramlen in ngramrange:
if ngramfound == 'n':
lexngramspositions = list(ngrams(lexlist, ngramlen))
lexngramspositions = [list(x) for x in lexngramspositions]
for lexngram in lexngramspositions:
wordmatchstart, wordmatchend = position_of_ngram(lexngram, lexlist)
wordmatchinput = template_input['wordmatches'][wordmatchstart:wordmatchend]
tokeninput = wordtokenizer(data_input['value'])
startposition, endposition = position_of_ngram(wordmatchinput, tokeninput)
if startposition != None:
ngramfound = 'y'
targettemplatedict['info'][idx].update({'overlap': lexngram})
break
if ngramfound == 'y':
break
#print(targettemplatedict)
entrytemplatelist.append(targettemplatedict)
fileentrytemplateinfo.append(entrytemplatelist)
allentrytemplateinfo.update({e2efile: fileentrytemplateinfo})
with open(currentpath + '/Data/AllEntryTemplateInfo.json', 'w') as outfile:
json.dump(allentrytemplateinfo, outfile, indent=4, separators=(',', ': '))
def convert_data(candidate, targettemplatedict, idxlist):
tokenizedcandidate = wordtokenizer(candidate)
l = [1, 2, 3]
datadict = {}
for output_element in targettemplatedict['info']:
replaceindices = []
for idx in idxlist:
if ('overlap' in output_element) and (idx in output_element['overlap']):
replaceindices.append([output_element['overlap'].index(idx), idx])
try:
datavalue = output_element['data']['value']
except KeyError:
print('ERROR ERROR', flush=True)
return 'ERROR ERROR'
datavaluelist = wordtokenizer(datavalue)
for replaceidx in replaceindices:
datavaluelist[replaceidx[0]] = tokenizedcandidate[replaceidx[1]]
datavaluestring = md.detokenize(datavaluelist)
datadict.update({output_element['data']['attribute']: datavaluestring})
datalist = []
for entry in datadict:
datalist.append(entry.upper() + '(' + entry + '="' + datadict[entry] + '")')
datastring = ' '.join(datalist)
return datastring
def data_augmentation(allentrytemplateinfo):
candidates125list = []
candidates250list = []
candidates500list = []
candidates1000list = []
rep = BertSampler(sim_threshold=0.001)
currentpath = os.getcwd()
if os.path.isfile(currentpath + '/DonePickle.pkl'):
previousdonelist = []
with open(currentpath + '/DonePickle.pkl', 'rb') as fr:
try:
while True:
previousdonelist.append(pickle.load(fr))
except EOFError:
pass
startsearch = 'y'
else:
startsearch = 'n'
for entrytemplateidx, entrytemplate in enumerate(allentrytemplateinfo):
for targettemplatedictidx, targettemplatedict in enumerate(entrytemplate):
if startsearch == 'y':
entryfound = 'n'
for prevdone in previousdonelist:
if (entrytemplateidx == prevdone['entrytemplateidx']) and (targettemplatedictidx == prevdone['targettemplatedictidx']):
entryfound = 'y'
break
if entryfound == 'y':
continue
else:
startsearch = 'n'
try:
doc = nlp(targettemplatedict['info'][0]['text'])
except IndexError:
continue
idxlist = []
for idx, token in enumerate(doc):
#print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_, token.shape_, token.is_alpha, token.is_stop)
if (token.tag_.startswith('NN')) or (token.pos_ == 'ADJ') or (token.pos_ == 'ADV') or (token.pos_ == 'NUM'): #or (token.pos_ == 'VERB'):
idxlist.append(idx)
#candidateslist = [o for o in rep(targettemplatedict['info'][0]['text'], idxlist, 20, dropout=0.2)]
candidateslist = [o for o in rep(targettemplatedict['info'][0]['text'], idxlist, 20, dropout=0.2)]
print(candidateslist, flush=True)
with open(currentpath + '/DonePickle.pkl', 'ab') as f:
pickle.dump({'entrytemplateidx': entrytemplateidx, 'targettemplatedictidx': targettemplatedictidx, 'candidateslist': candidateslist, 'targettemplatedict': targettemplatedict, 'idxlist': idxlist}, f)
for candidateidx, candidate in enumerate(candidateslist):
candidatedatastring = convert_data(candidate, targettemplatedict, idxlist)
if candidatedatastring == 'ERROR ERROR':
break
elif candidateidx < 1:
candidates125list.append([candidate, candidatedatastring])
candidates250list.append([candidate, candidatedatastring])
candidates500list.append([candidate, candidatedatastring])
candidates1000list.append([candidate, candidatedatastring])
elif candidateidx < 2:
candidates250list.append([candidate, candidatedatastring])
candidates500list.append([candidate, candidatedatastring])
candidates1000list.append([candidate, candidatedatastring])
elif candidateidx < 5:
candidates500list.append([candidate, candidatedatastring])
candidates1000list.append([candidate, candidatedatastring])
elif candidateidx < 10:
candidates1000list.append([candidate, candidatedatastring])
else:
break
rep = None # NOTE: clear out GPU memory
candidatesdict = {'125': candidates125list, '250': candidates250list, '500': candidates500list, '1000': candidates1000list}
for candlist in candidatesdict:
candidatestrg = [x[0] for x in candidatesdict[candlist]]
candidatessrc = [x[1] for x in candidatesdict[candlist]]
alltrgstring = '\n'.join(candidatestrg)
allsrcstring = '\n'.join(candidatessrc)
with open(currentpath + '/Predictions/Extended' + candlist + '_trg.txt', 'wb') as f:
f.write(bytes(alltrgstring, 'UTF-8'))
with open(currentpath + '/Predictions/Extended' + candlist + '_src.txt', 'wb') as f:
f.write(bytes(allsrcstring, 'UTF-8'))
def collect_dict():
currentpath = os.getcwd()
with open(currentpath + '/Data/AllEntryTemplateInfo.json', 'r') as infile:
allentrytemplateinfo = json.load(infile)
fulltrain = []
for e2efile in allentrytemplateinfo:
if '\\train\\' in e2efile:
fulltrain += allentrytemplateinfo[e2efile]
data_augmentation(fulltrain)
#allentrytemplateinfo = main()
collect_dict()
|
TallChris91/Neural-Data-to-Text-Small-Datasets
|
Data_Augmentation/Mark_Words_E2E.py
|
Mark_Words_E2E.py
|
py
| 14,690 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18399278652
|
# SPDX-License-Identifier: GPL-2.0-only
import threading
from pprint import pprint
import pytest
from flask import url_for
import libeagle
from tests.simulator import eagle200sim
import re
@pytest.fixture(scope="session", autouse=True)
def app():
app = eagle200sim.create_app()
return app
@pytest.mark.usefixtures("live_server")
class TestLiveServer:
def test_eagle200(self):
url = url_for("process_request", _external=True)
port = int(re.search(":([0-9]+)/", url)[1])
conn = libeagle.Connection("localhost", "0077dd", "6e61a3a94882eef9", port=port, debug=True)
devices = conn.device_list()
pprint(devices)
details = conn.device_details(devices[0]["HardwareAddress"])
pprint(details)
query = conn.device_query(
devices[0]["HardwareAddress"],
details[0]["Name"],
details[0]["Variables"][0],
)
pprint(query)
assert (
query[0]["Variables"]["zigbee:InstantaneousDemand"] == "21.499 kW"
)
query = conn.device_query(devices[0]["HardwareAddress"])
pprint(query)
assert (
query[0]["Variables"]["zigbee:Message"] == "Hello, World!"
)
|
lrusak/py-eagle-200
|
tests/test_eagle200.py
|
test_eagle200.py
|
py
| 1,240 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8978366140
|
import os
import pandas as pd
from darts import TimeSeries
from darts.models import LightGBMModel
from enfobench import AuthorInfo, ModelInfo, ForecasterType
from enfobench.evaluation.server import server_factory
from enfobench.evaluation.utils import periods_in_duration
class DartsLightGBMModel:
def __init__(self, seasonality: str):
self.seasonality = seasonality.upper()
def info(self) -> ModelInfo:
return ModelInfo(
name=f"Darts.LightGBM.Direct.{self.seasonality}",
authors=[
AuthorInfo(name="Mohamad Khalil", email="[email protected]")
],
type=ForecasterType.point,
params={
"seasonality": self.seasonality,
},
)
def forecast(
self,
horizon: int,
history: pd.DataFrame,
past_covariates: pd.DataFrame | None = None,
future_covariates: pd.DataFrame | None = None,
**kwargs,
) -> pd.DataFrame:
# Fill missing values
history = history.fillna(history.y.mean())
# Create model
periods = periods_in_duration(history.index, duration=self.seasonality)
model = LightGBMModel(
lags=list(range(-periods, 0)),
output_chunk_length=horizon,
multi_models=False,
)
# Fit model
series = TimeSeries.from_dataframe(history, value_cols=["y"])
model.fit(series)
# Make forecast
pred = model.predict(horizon)
# Postprocess forecast
forecast = (
pred.pd_dataframe().rename(columns={"y": "yhat"}).fillna(history.y.mean())
)
return forecast
# Load parameters
seasonality = os.getenv("ENFOBENCH_MODEL_SEASONALITY")
# Instantiate your model
model = DartsLightGBMModel(seasonality=seasonality)
# Create a forecast server by passing in your model
app = server_factory(model)
|
attila-balint-kul/energy-forecast-benchmark-examples
|
models/dt-lightgbm-direct/src/main.py
|
main.py
|
py
| 1,933 |
python
|
en
|
code
| 2 |
github-code
|
6
|
26038625786
|
from __future__ import annotations
import logging
import os
from dataclasses import dataclass
from typing import Iterable
from pants.backend.cc.subsystems.compiler import CCSubsystem, ExternalCCSubsystem
from pants.backend.cc.target_types import CCLanguage
from pants.core.util_rules.archive import ExtractedArchive
from pants.core.util_rules.archive import rules as archive_rules
from pants.core.util_rules.system_binaries import (
BinaryNotFoundError,
BinaryPathRequest,
BinaryPaths,
BinaryPathTest,
)
from pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest
from pants.engine.fs import DownloadFile
from pants.engine.internals.native_engine import EMPTY_DIGEST, Digest
from pants.engine.platform import Platform
from pants.engine.process import Process
from pants.engine.rules import Get, Rule, collect_rules, rule
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
from pants.util.ordered_set import OrderedSet
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class CCToolchainRequest:
"""A request for a C/C++ toolchain."""
language: CCLanguage
@dataclass(frozen=True)
class CCToolchain:
"""A C/C++ toolchain."""
compiler: str
# include_directories: tuple[str, ...] = () # TODO as part of the `check` goal to ensure source roots are handled
compiler_flags: tuple[str, ...] = ()
compiler_definitions: tuple[str, ...] = ()
linker_flags: tuple[str, ...] = ()
digest: Digest = EMPTY_DIGEST
def __post_init__(self):
# TODO: Should this error out to notify the user of a mistake? Or silently handle
# Or just ensure all defines have -D right now?
if self.compiler_definitions:
sanitized_definitions = [define.lstrip("-D") for define in self.compiler_definitions]
object.__setattr__(self, "compiler_definitions", tuple(sanitized_definitions))
@property
def compile_command(self) -> tuple[str, ...]:
"""The command to compile a C/C++ source file."""
command = [self.compiler, *self.compiler_definitions, *self.compiler_flags]
return tuple(filter(None, command))
@property
def link_command(self) -> tuple[str, ...]:
"""The command to link a C/C++ binary."""
command = [self.compiler, *self.linker_flags]
return tuple(filter(None, command))
async def _executable_path(binary_names: Iterable[str], search_paths: Iterable[str]) -> str:
"""Find the path to an executable by checking whether the executable supports a version
option."""
for name in binary_names:
binary_paths = await Get( # noqa: PNT30: requires triage
BinaryPaths,
BinaryPathRequest(
binary_name=name,
search_path=search_paths,
test=BinaryPathTest(args=["-v"]),
),
)
if not binary_paths or not binary_paths.first_path:
continue
return binary_paths.first_path.path
raise BinaryNotFoundError(f"Could not find any of '{binary_names}' in any of {search_paths}.")
async def _setup_downloadable_toolchain(
request: CCToolchainRequest,
subsystem: ExternalCCSubsystem,
platform: Platform,
) -> CCToolchain:
"""Set up a toolchain from a downloadable archive."""
download_file_request = subsystem.get_request(platform).download_file_request
maybe_archive_digest = await Get(Digest, DownloadFile, download_file_request)
extracted_archive = await Get(ExtractedArchive, Digest, maybe_archive_digest)
# Populate the toolchain for C or C++ accordingly
if request.language == CCLanguage.CXX:
return CCToolchain(
compiler=subsystem.cxx_executable,
compiler_flags=tuple(subsystem.cxx_compiler_flags),
compiler_definitions=tuple(subsystem.cxx_definitions),
digest=extracted_archive.digest,
)
return CCToolchain(
compiler=subsystem.c_executable,
compiler_flags=tuple(subsystem.c_compiler_flags),
compiler_definitions=tuple(subsystem.c_definitions),
digest=extracted_archive.digest,
)
async def _setup_system_toolchain(
request: CCToolchainRequest, subsystem: CCSubsystem
) -> CCToolchain:
"""Set up a toolchain from the user's host system."""
# Sanitize the search paths in case the "<PATH>" is specified
raw_search_paths = list(subsystem.search_paths)
if "<PATH>" in raw_search_paths:
i = raw_search_paths.index("<PATH>")
env = await Get(EnvironmentVars, EnvironmentVarsRequest(["PATH"]))
system_path = env.get("PATH", "")
raw_search_paths[i : i + 1] = system_path.split(os.pathsep)
search_paths = tuple(OrderedSet(raw_search_paths))
# Populate the toolchain for C or C++ accordingly
if request.language == CCLanguage.CXX:
cxx_executable = await _executable_path(tuple(subsystem.cxx_executable), search_paths)
return CCToolchain(
cxx_executable,
compiler_flags=tuple(subsystem.cxx_compiler_flags),
compiler_definitions=tuple(subsystem.cxx_definitions),
)
c_executable = await _executable_path(tuple(subsystem.c_executable), search_paths)
return CCToolchain(
c_executable,
compiler_flags=tuple(subsystem.c_compiler_flags),
compiler_definitions=tuple(subsystem.c_definitions),
)
@rule(desc="Setup the CC Toolchain", level=LogLevel.DEBUG)
async def setup_cc_toolchain(
request: CCToolchainRequest,
subsystem: CCSubsystem,
external_subsystem: ExternalCCSubsystem,
platform: Platform,
) -> CCToolchain:
"""Set up the C/C++ toolchain."""
if external_subsystem.url_template:
return await _setup_downloadable_toolchain(request, external_subsystem, platform)
else:
return await _setup_system_toolchain(request, subsystem)
@dataclass(frozen=True)
class CCProcess:
args: tuple[str, ...]
language: CCLanguage
description: str
input_digest: Digest = EMPTY_DIGEST
output_files: tuple[str, ...] = ()
level: LogLevel = LogLevel.INFO
@rule(desc="Setup a CC Process loaded with the CCToolchain", level=LogLevel.DEBUG)
async def setup_cc_process(request: CCProcess) -> Process:
"""Set up a C/C++ process.
This rule will load the C/C++ toolchain based on the requested language. It will then return a
Process that can be run to compile or link a C/C++ source file.
"""
toolchain = await Get(CCToolchain, CCToolchainRequest(request.language))
# TODO: What if this is for linking instead of compiling?
# TODO: From tdyas: Should there then be a CCCompilerProcess and CCLinkerProcess?
# Investigate further during `check` PR
compiler_command = list(toolchain.compile_command)
# If downloaded, this will be the toolchain, otherwise empty digest
immutable_digests = {"__toolchain": toolchain.digest}
if toolchain.digest != EMPTY_DIGEST:
compiler_command[0] = f"__toolchain/{compiler_command[0]}"
argv = tuple(compiler_command) + request.args
return Process(
argv=argv,
input_digest=request.input_digest,
output_files=request.output_files,
description=request.description,
level=request.level,
immutable_input_digests=immutable_digests,
# env={"__PANTS_CC_COMPILER_FINGERPRINT": toolchain.compiler.fingerprint},
)
def rules() -> Iterable[Rule | UnionRule]:
return (
*collect_rules(),
*archive_rules(),
)
|
pantsbuild/pants
|
src/python/pants/backend/cc/util_rules/toolchain.py
|
toolchain.py
|
py
| 7,547 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
73829193786
|
# Usage:
import asyncio
from starknet_py.net.gateway_client import GatewayClient
from starknet_py.net.networks import TESTNET
from starknet_py.net import AccountClient, KeyPair
from starknet_py.contract import Contract
from starknet_py.net.models.chains import StarknetChainId
uuid = '2f530e87-a2c5-47c9-8ebf-e704dc06e9d8'
rpc_endpoint = 'http://[email protected]:5061'
private_key = 0x417ea85a3231ed89e745f9623ee2c32b
player_address = 0x6fb14af9a52544466d0b00b536930d57c49f9140c3ee989102a930a88cec521
contract_address = 0x22307a497c26e0766e6701e3ed78c21166ba691e9fad47d2f3e836cbbdaf52c
PRIME = 3618502788666131213697322783095070105623107215331596699973092056135872020481
async def run():
gateway_client = GatewayClient(rpc_endpoint, TESTNET)
account_client = AccountClient(
client=gateway_client,
address=player_address,
key_pair=KeyPair.from_private_key(private_key),
chain=StarknetChainId.TESTNET,
supported_tx_version=1,
)
block = await gateway_client.get_block(block_number=1)
print(block)
ts = block.timestamp
print('timestamp1', ts)
contract = await Contract.from_address(contract_address, account_client)
print(contract.functions)
call = contract.functions['solve'].prepare()
tx_r = await account_client.execute(call, auto_estimate=True)
await account_client.wait_for_tx(tx_r.transaction_hash)
print(tx_r)
print(tx_r.transaction_hash)
if __name__ == "__main__":
asyncio.run(run())
|
feltroidprime/CTF-starknet-cc
|
challenges/solve-me/deploy.py
|
deploy.py
|
py
| 1,531 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39332700051
|
# -*- coding: utf-8 -*-
import os
import networkx as nx
import sorcery_read
import sorcery_make
import sorcery_save
#
# メイン処理
#
def main(excelFileName):
# ディレクトリの作成
out_dir = os.path.splitext(
os.path.basename( excelFileName ) )[0]
if not os.path.exists( out_dir ):
os.makedirs( out_dir )
# エクセルファイルの読み込み
node, edge, node_all, edge_all = sorcery_read.read_xlsx(excelFileName)
# 全体図描画
if True:
#if False:
map_all = '全体図'
node_all_list = node_all[map_all]['node']
node_all_oder = node_all[map_all]['oder']
figsize_all = node_all[map_all]['figsize']
# グラフの作成
G = sorcery_make.make_network(node_all_list, edge_all)
# 出力ファイル名
out_file_all = '{0}/{1:0>2}_{2}.png'.format(
out_dir, node_all_oder, map_all)
print(os.path.basename(out_file_all))
# グラフの出力
sorcery_save.save_network(G,
title=map_all,
out_file=out_file_all,
figsize=figsize_all,
)
# 各マップ毎に、グラフ作成して描画
for map, map_dict in node.items():
node_list = map_dict['node']
node_oder = map_dict['oder']
figsize = map_dict['figsize']
# デバッグ用 特定のマップのみ処理
if True:
#if False:
if node_oder!=31:
continue
# グラフの作成
G = sorcery_make.make_network(node_list, edge)
# 出力ファイル名
out_file = '{0}/{1:0>2}_{2}.png'.format(
out_dir, node_oder, map)
print(os.path.basename(out_file))
# グラフの出力
sorcery_save.save_network(G,
title=map,
out_file=out_file,
figsize=figsize,
)
if __name__ == '__main__':
# main('Sorcery01.xlsx')
# main('Sorcery02.xlsx')
main('Sorcery03.xlsx')
# main('Sorcery04.xlsx')
|
NaotoNAKATA/my_sample
|
python/networkx/sorcery_graph.py
|
sorcery_graph.py
|
py
| 1,779 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
13239474097
|
from __future__ import division
from __future__ import print_function
from builtins import range
from past.utils import old_div
from numpy import *
from matplotlib.pyplot import *
import sys
def solver(I, a, T, dt, theta):
"""Solve u'=-a*u, u(0)=I, for t in (0,T]; step: dt."""
dt = float(dt) # avoid integer division
N = int(round(old_div(T,dt))) # no of time intervals
T = N*dt # adjust T to fit time step dt
u = zeros(N+1) # array of u[n] values
t = linspace(0, T, N+1) # time mesh
u[0] = I # assign initial condition
for n in range(0, N): # n=0,1,...,N-1
u[n+1] = (1 - (1-theta)*a*dt)/(1 + theta*dt*a)*u[n]
return u, t
def exact_solution(t, I, a):
return I*exp(-a*t)
def explore(I, a, T, dt, theta=0.5, makeplot=True):
"""
Run a case with the solver, compute error measure,
and plot the numerical and exact solutions (if makeplot=True).
"""
u, t = solver(I, a, T, dt, theta) # Numerical solution
u_e = exact_solution(t, I, a)
e = u_e - u
E = sqrt(dt*sum(e**2))
if makeplot:
figure() # create new plot
t_e = linspace(0, T, 1001) # very fine mesh for u_e
u_e = exact_solution(t_e, I, a)
plot(t, u, 'r--o') # red dashes w/circles
plot(t_e, u_e, 'b-') # blue line for u_e
legend(['numerical', 'exact'])
xlabel('t')
ylabel('u')
title('Method: theta-rule, theta=%g, dt=%g' % (theta, dt))
theta2name = {0: 'FE', 1: 'BE', 0.5: 'CN'}
savefig('%s_%g.png' % (theta2name[theta], dt))
show()
return E
def define_command_line_options():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--I', '--initial_condition', type=float,
default=1.0, help='initial condition, u(0)',
metavar='I')
parser.add_argument('--a', type=float,
default=1.0, help='coefficient in ODE',
metavar='a')
parser.add_argument('--T', '--stop_time', type=float,
default=1.0, help='end time of simulation',
metavar='T')
parser.add_argument('--makeplot', action='store_true',
help='display plot or not')
parser.add_argument('--dt', '--time_step_values', type=float,
default=[1.0], help='time step values',
metavar='dt', nargs='+', dest='dt_values')
return parser
def read_command_line(use_argparse=True):
if use_argparse:
parser = define_command_line_options()
args = parser.parse_args()
print('I={}, a={}, makeplot={}, dt_values={}'.format(
args.I, args.a, args.makeplot, args.dt_values))
return args.I, args.a, args.T, args.makeplot, args.dt_values
else:
if len(sys.argv) < 6:
print('Usage: %s I a on/off dt1 dt2 dt3 ...' % \
sys.argv[0]); sys.exit(1)
I = float(sys.argv[1])
a = float(sys.argv[2])
T = float(sys.argv[3])
makeplot = sys.argv[4] in ('on', 'True')
dt_values = [float(arg) for arg in sys.argv[5:]]
return I, a, T, makeplot, dt_values
def main():
I, a, T, makeplot, dt_values = read_command_line()
r = {}
for theta in 0, 0.5, 1:
E_values = []
for dt in dt_values:
E = explore(I, a, T, dt, theta, makeplot=False)
E_values.append(E)
# Compute convergence rates
m = len(dt_values)
r[theta] = [old_div(log(old_div(E_values[i-1],E_values[i])),
log(old_div(dt_values[i-1],dt_values[i])))
for i in range(1, m, 1)]
for theta in r:
print('\nPairwise convergence rates for theta=%g:' % theta)
print(' '.join(['%.2f' % r_ for r_ in r[theta]]))
return r
def verify_convergence_rate():
r = main()
tol = 0.1
expected_rates = {0: 1, 1: 1, 0.5: 2}
for theta in r:
r_final = r[theta][-1]
diff = abs(expected_rates[theta] - r_final)
if diff > tol:
return False
return True # all tests passed
if __name__ == '__main__':
if 'verify_rates' in sys.argv:
sys.argv.remove('verify_rates')
if not '--dt' in sys.argv:
print('Must assign several dt values through the --dt option')
sys.exit(1) # abort
if verify_convergence_rate():
pass
else:
print('Bug in the implementation!')
else:
# Perform simulations
main()
|
hplgit/doconce
|
doc/src/slides/src/solver.py
|
solver.py
|
py
| 4,682 |
python
|
en
|
code
| 305 |
github-code
|
6
|
20665108806
|
import os
from dataclasses import dataclass, field
from pathlib import Path
from .file_utils import CsvWriter, JsonWriter, PickleWriter
@dataclass
class File:
""" Класс, представляющий файл. """
name: str
size: int
parent: 'Directory'
path: str
def __str__(self):
return f"File: {self.parent}/{self.name}\n" \
f"Size: {self.size}\n"
def __dict__(self):
return {
"type": "file",
"name": self.name,
"size": self.size,
"path": f'{self.path}'
}
@dataclass
class Directory:
"""Класс, представляющий директорию. """
name: str
parent: 'Directory'
path: str
size: int = 0
files: list = field(default_factory=list)
subdirectories: list = field(default_factory=list)
def __str__(self):
return f"Directory: {self.parent}/{self.name}\n" \
f"Size: {self.size}\n" \
f"File count: {len(self.files)}\n" \
f"Subdirectory count: {len(self.subdirectories)}\n"
def to_dict(self):
return {
"type": "directory",
"name": self.name,
"size": self.size,
"files": [file.__dict__() for file in self.files],
"path": f'{self.path}'
}
def calculate(self):
for directory in self.subdirectories:
self.size += directory.size
@dataclass
class DirectoryManager:
""" Класс для управления директориями. """
directories = {}
path: Path
def traverse(self):
""" Рекурсивный обход директории. """
for dirpath, dirnames, filenames in os.walk(self.path):
if dirpath not in self.directories.keys():
directory = Directory(os.path.basename(dirpath), None, dirpath)
else:
directory = self.directories[dirpath]
for filename in filenames:
file_path = os.path.join(dirpath, filename)
file_size = os.path.getsize(file_path)
file = File(filename, file_size, directory, dirpath)
directory.files.append(file)
directory.size += file_size
for dirname in dirnames:
sub_directory = Directory(dirname, directory, dirpath)
directory.subdirectories.append(sub_directory)
self.directories[os.path.join(dirpath, dirname)] = sub_directory
self.directories[dirpath] = directory
for directory in self.directories.values():
directory.calculate()
def write_files(self, output_directory):
""" Запись результатов обхода директории в файл. """
self.traverse()
output_directory = Path(output_directory)
os.makedirs(output_directory, exist_ok=True)
directories = [d.to_dict() for d in self.directories.values()]
JsonWriter.write(output_directory / "directory.json", directories)
CsvWriter.write(output_directory / "directory.csv", directories)
PickleWriter.write(output_directory / "directory.pickle", directories)
|
nadia3373/GeekBrains-Python-Developer
|
Diving into Python/s10/directory_traversal/directory_traversal.py
|
directory_traversal.py
|
py
| 3,249 |
python
|
en
|
code
| 1 |
github-code
|
6
|
407829777
|
import time
from subprocess import call
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
t = time.time()
reg = [1.0,2.04054585,-20.38379773,-3.93128902]
params = [0.5,0.2,0.1,0.3,0.006]
time_ = [0,10,100]
reg_s = [str(k) for k in reg]
params_s = [str(k) for k in params]
time_s = [str(k) for k in time_]
call(["./bt1"]+reg_s+params_s+time_s)
t = time.time() - t
print("Time: ",t)
#f = open("test3.txt")
#li = list(f)
#print(' '.join(li))
#df = pd.read_csv("test3.txt",delimiter="\t")
t = time.time()
# 0.000222 s when you specify time, else 2 times worse
arr = np.fromfile("test3.txt",sep='\t',count=-1)
#print(arr)
t = time.time() - t
print("Time: ",t)
|
radluki/InvertedPendulum
|
test2/reader.py
|
reader.py
|
py
| 681 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27259261820
|
"""We are the captains of our ships, and we stay 'till the end. We see our stories through.
"""
"""813. Largest Sum of Averages [Naive]
"""
class Solution:
def largestSumOfAverages(self, nums, k):
n = len(nums)
summ = [0]*n
summ[0] = nums[0]
for i in range(1, n):
summ[i] = summ[i-1] + nums[i]
return self.helper(nums, summ, n, 0, k)
def helper(self, nums, summ, n, s, k):
if k == 1:
return (summ[n-1] - summ[s] + nums[s]) / (n - s)
maxx = float('-inf')
for i in range(s, n-k+1):
avg_sum = (summ[i] - summ[s] + nums[s]) / (i-s+1) + self.helper(nums, summ, n, i+1, k-1)
maxx = max(maxx, avg_sum)
return maxx
|
asperaa/back_to_grind
|
DP/813. Largest Sum of Averages_Naive.py
|
813. Largest Sum of Averages_Naive.py
|
py
| 740 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27980959232
|
import glob
import sqlite3
import csv
import time;
conn = sqlite3.connect('gdax_0.1.db')
cur = conn.cursor()
cur.execute("SELECT * FROM quotes_BTC_LTC") # WHERE start >?", (1420160461, ))
results1 = cur.fetchall()
conn2 = sqlite3.connect('gdaxLTC.db')
cur2 = conn2.cursor()
cur2.execute("SELECT * FROM quotes_BTC_LTC") # WHERE start >?", (1420160461, ))
results2 = cur2.fetchall()
for i in range(0, len(results1)):
if(results1[i] != results2[i]):
print("Different")
# tsListResults = []
#
# # print(results1.pop(0))
# # print(results1.pop())
#
# for row in results1:
# tup = (row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])
# #print(tup)
# tsListResults.append(row[1])
# # if (row[1] == 1509949420 or row[1] == 1375731700 or row[1] == 1417674740 or row[1] == 1501560820 or row[1] == 1493172220):
# # print(tup)
#
# #tsListResults.sort()
#
# tsList = []
# for i in range(1471407360, 1504213860, 60):
# tsList.append(i)
#
# # diff = list(set(tsList) - (set(tsListResults)))
# diff = list(set(tsList).symmetric_difference(set(tsListResults)))
# diff.sort()
# for row in diff:
# print(row)
#
# print("Start", min(tsListResults))
# print("End", max(tsListResults))
|
HristoHr/backTestEngine
|
CheckDataCompletenessDB.py
|
CheckDataCompletenessDB.py
|
py
| 1,226 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25389647152
|
import nltk
import numpy as np
import pandas as pd
import re
"""This code aims to perform text preprocessing and save processed texts as a new file"""
def utils_preprocess_text(text, flg_stemm=False, flg_lemm=True, lst_stopwords=None):
"""Text processing: remove stopwords, stem or lemma"""
## clean (convert to lowercase and remove punctuations and characters and then strip)
text = re.sub(r"[^\w\s]", "", str(text).lower().strip())
## Tokenize (convert from string to list)
lst_text = text.split()
## remove Stopwords
if lst_stopwords is not None:
lst_text = [word for word in lst_text if word not in lst_stopwords]
## Stemming (remove -ing, -ly, ...)
if flg_stemm == True:
ps = nltk.stem.porter.PorterStemmer()
lst_text = [ps.stem(word) for word in lst_text]
## Lemmatisation (convert the word into root word)
if flg_lemm == True:
lem = nltk.stem.wordnet.WordNetLemmatizer()
lst_text = [lem.lemmatize(word) for word in lst_text]
## back to string from list
text = " ".join(lst_text)
return text
def main(
file_path="Suicide_Detection.csv",
lst_stopwords=nltk.corpus.stopwords.words("english"),
):
df = pd.read_csv(file_path, index_col=False)
df = df.iloc[:, 1:]
# class transformation to 0 and 1
df["y"] = df["class"].map({"suicide": "1", "non-suicide": "0"})
df["text_clean"] = df["text"].apply(
lambda x: utils_preprocess_text(
x, flg_stemm=False, flg_lemm=True, lst_stopwords=lst_stopwords
)
)
df.to_csv("processed_trainData.csv", index=False)
if __name__ == "__main__":
main()
|
nogibjj/Suicide-Text-Classification
|
a_01_text_preprocessing.py
|
a_01_text_preprocessing.py
|
py
| 1,659 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33255293229
|
from typing import Optional
from fastapi.routing import APIRouter
from pydantic.main import BaseModel
from mongo import user_col, list_col
from auth_repo import ar
from dependencies import verify_token_dependency
from bson.objectid import ObjectId
from fastapi import Depends
from fastapi.routing import APIRouter
user_router = APIRouter(
prefix="/users", dependencies=[Depends(verify_token_dependency)]
)
common_find_options_user = {"password": 0, "lists": 0}
@user_router.get("/")
async def get_all_users():
search_res = user_col.find(
{"_id": {"$ne": ObjectId(ar.get_current_user_id())}}, {"password": 0}
)
result_list = []
for user in search_res:
user["_id"] = str(user.get("_id"))
result_list.append(user)
return result_list
@user_router.get("/me")
async def get_me():
me_res = user_col.find_one(
{"_id": ObjectId(ar.get_current_user_id())}, common_find_options_user
)
me_res["_id"] = str(me_res.get("_id"))
return me_res
class EditMeBody(BaseModel):
profile_emoji: str
@user_router.put("/me")
async def edit_my_attribs(body: EditMeBody):
edit_res = user_col.update_one(
{"_id": ObjectId(ar.get_current_user_id())},
{"$set": {"profile_emoji": body.profile_emoji}},
)
return {
"id": edit_res.upserted_id,
"raw": edit_res.raw_result,
"metas": {
"matched": edit_res.matched_count,
"modified": edit_res.modified_count,
},
}
@user_router.get("/{user_id}/lists")
async def get_lists_by_user(user_id: str):
res = user_col.find_one({"_id": ObjectId(user_id)}, {"password": 0})
for idx, list_id in enumerate(res.get("lists")):
list_res = list_col.find_one(filter={"_id": ObjectId(list_id)})
list_res["_id"] = str(list_res.get("_id"))
res["lists"][idx] = list_res
res["_id"] = str(res.get("_id"))
return res
@user_router.get("/search")
async def search_user_by_full_name(q: str):
search_res = user_col.find({"$text": {"$search": q}}, common_find_options_user)
result_list = []
for user in search_res:
user["_id"] = str(user.get("_id"))
result_list.append(user)
return result_list
@user_router.get("/{user_id}")
async def get_user_by_id(user_id: str):
res = user_col.find_one({"_id": ObjectId(user_id)}, common_find_options_user)
res["_id"] = str(res.get("_id"))
return res
|
snokpok/listlive
|
backend/src/routers/user.py
|
user.py
|
py
| 2,432 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31591116775
|
'''
Created on 2019年8月29日
@author: MR.Tree
'''
def save_txt(wea_group):
i=len(wea_group)
print('总条数:',i)
for t in wea_group:
my_file=open('E:\\weather_date01.txt','a')
my_file.write('\n'+t)
print('---写入完成---')
my_file.close()
|
roxasqiao/get_weather
|
get_weather/save_txt.py
|
save_txt.py
|
py
| 312 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
30522562696
|
from django.urls import path
from django.views.generic import TemplateView
import mainapp.views as views
app_name = 'mainapp'
urlpatterns = [
path('',
views.WorkoutListView.as_view(),
name='index'),
path('about/',
TemplateView.as_view(template_name='mainapp/about.html'),
name='about'),
path('workout/<int:pk>/',
views.WorkoutDetailView.as_view(),
name='workout'),
path('workout/add/',
views.WorkoutCreateView.as_view(),
name='workout-add'),
path('workout/<int:pk>/update/',
views.WorkoutUpdateView.as_view(),
name='workout-update'),
path('workout/<int:pk>/delete/',
views.WorkoutDeleteView.as_view(),
name='workout-delete'),
path('schedule/',
views.ScheduleListView.as_view(),
name='schedule'),
path('test/',
TemplateView.as_view(template_name='mainapp/test.html'),
name='test'),
]
|
galla-okto/otus_training_site
|
mainapp/urls.py
|
urls.py
|
py
| 969 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74977715387
|
import csv
import re
import logging
import gzip
import io
import sys
import os
import yaml
from dipper.sources.ZFIN import ZFIN
from dipper.sources.WormBase import WormBase
from dipper.sources.Source import Source
from dipper.models.assoc.Association import Assoc
from dipper.models.assoc.G2PAssoc import G2PAssoc
from dipper.models.Genotype import Genotype
from dipper.models.Reference import Reference
from dipper.models.Model import Model
from dipper.utils.GraphUtils import GraphUtils
LOG = logging.getLogger(__name__)
# get gene annotation from current.geneontology.com,
# which is the last official release (but not the bleeding edge)
GOGA = 'http://current.geneontology.org/annotations'
FTPEBI = 'ftp://ftp.uniprot.org/pub/databases/' # best for North America
UPCRKB = 'uniprot/current_release/knowledgebase/'
# large entries in field 7 of ZFIN require this:
csv.field_size_limit(sys.maxsize)
class GeneOntology(Source):
"""
This is the parser for the
[Gene Ontology Annotations](http://www.geneontology.org),
from which we process gene-process/function/subcellular
location associations.
We generate the GO graph to include the following information:
* genes
* gene-process
* gene-function
* gene-location
We process only a subset of the organisms:
Status: IN PROGRESS / INCOMPLETE
"""
gaf_columns = [ # GAF2.1 files contain the following columns:
'DB',
'DB_Object_ID',
'DB_Object_Symbol',
'Qualifier',
'GO_ID',
'DB:Reference',
'Evidence Code',
'With (or) From', # list possible w/pipe(or) w/comma(and) +both
'Aspect',
'DB_Object_Name',
'DB_Object_Synonym',
'DB_Object_Type',
'Taxon and Interacting taxon',
'Date',
'Assigned_By',
'Annotation_Extension',
'Gene_Product_Form_ID'
]
files = {
'9615': { # Canis lupus familiaris
'file': 'goa_dog.gaf.gz',
'url': GOGA + '/goa_dog.gaf.gz',
'columnns': gaf_columns
},
'7227': { # Drosophila melanogaster
'file': 'fb.gaf.gz',
'url': GOGA + '/fb.gaf.gz',
'columnns': gaf_columns
},
'7955': { # Danio rerio
'file': 'zfin.gaf.gz',
'url': GOGA + '/zfin.gaf.gz',
'columnns': gaf_columns
},
'10090': { # Mus musculus
'file': 'mgi.gaf.gz',
'url': GOGA + '/mgi.gaf.gz',
'columnns': gaf_columns
},
'10116': { # Rattus norvegicus
'file': 'rgd.gaf.gz',
'url': GOGA + '/rgd.gaf.gz',
'columnns': gaf_columns
},
'6239': { # Caenorhabditis elegans
'file': 'wb.gaf.gz',
'url': GOGA + '/wb.gaf.gz',
'columnns': gaf_columns
},
'9823': { # Sus scrofa
'file': 'goa_pig.gaf.gz',
'url': GOGA + '/goa_pig.gaf.gz',
'columnns': gaf_columns
},
'9031': { # Gallus gallus
'file': 'goa_chicken.gaf.gz',
'url': GOGA + '/goa_chicken.gaf.gz',
'columnns': gaf_columns
},
'9606': { # Homo sapiens
'file': 'goa_human.gaf.gz',
'url': GOGA + '/goa_human.gaf.gz',
'columnns': gaf_columns
},
'9913': { # Bos taurus
'file': 'goa_cow.gaf.gz',
'url': GOGA + '/goa_cow.gaf.gz',
'columnns': gaf_columns
},
'559292': { # Saccharomyces cerevisiae 4932
'file': 'sgd.gaf.gz',
'url': GOGA + '/sgd.gaf.gz',
'columnns': gaf_columns
},
'4896': { # Schizosaccharomyces pombe (yeast)
'file': 'pombase.gaf.gz',
'url': GOGA + '/pombase.gaf.gz',
'columnns': gaf_columns
},
'5782': { # Dictyostelium (slime mold genus)
'file': 'dictybase.gaf.gz',
'url': GOGA + '/dictybase.gaf.gz',
'columnns': gaf_columns
},
'5052': { # Aspergillus (fungi) http://www.aspergillusgenome.org/
'file': 'aspgd.gaf.gz',
'url': GOGA + '/aspgd.gaf.gz',
'columnns': gaf_columns
},
# consider this after most others - should this be part of GO?
# 'multispecies': {
# 'file': 'gene_association.goa_uniprot.gz',
# 'url': FTPEBI + 'GO/goa/UNIPROT/gene_association.goa_uniprot.gz'},
# 'go-references': { # does not seem to be used
# 'file': 'GO.references',
# # Quoth the header of this file: "This file is DEPRECATED.
# # Please see go-refs.json relative to this location"
# # (http://current.geneontology.org/metadata/go-refs.json)
# 'url': 'http://www.geneontology.org/doc/GO.references'
# },
'idmapping_selected': {
# 9.7GB mapping file takes hours to DL ...
# maps UniProt to Ensembl & more (which we imostly gnore)
# replace w/ Ensembl rdf? --- no, current approach seems most canonical
# ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/
# idmapping/idmapping_selected.tab.gz
'file': 'idmapping_selected.tab.gz',
'url': FTPEBI + UPCRKB + 'idmapping/idmapping_selected.tab.gz',
# ftp://ftp.uniprot.org
# /pub/databases/uniprot/current_release/knowledgebase/idmapping/README
'columns': [
'UniProtKB-AC',
'UniProtKB-ID',
'GeneID (EntrezGene)',
'RefSeq',
'GI',
'PDB',
'GO',
'UniRef100',
'UniRef90',
'UniRef50',
'UniParc',
'PIR',
'NCBI-taxon',
'MIM',
'UniGene',
'PubMed',
'EMBL',
'EMBL-CDS',
'Ensembl',
'Ensembl_TRS',
'Ensembl_PRO',
'Additional PubMed'
]
},
'gaf-eco-mapping': {
'file': 'gaf-eco-mapping.yaml',
'url': '/'.join((Source.DIPPERCACHE, 'go', 'gaf-eco-mapping.yaml')),
}
}
# a set of synomym curie prefixes we choose not to propagate as uri
# this takes a quarter million warrnings out of the log files
wont_prefix = [
'zgc', 'wu', 'si', 'im', 'BcDNA', 'sb', 'anon-EST', 'EG', 'id', 'zmp',
'BEST', 'BG', 'hm', 'tRNA', 'NEST', 'xx']
def __init__(self,
graph_type,
are_bnodes_skolemized,
data_release_version=None,
tax_ids=None):
super().__init__(
graph_type=graph_type,
are_bnodes_skized=are_bnodes_skolemized,
data_release_version=data_release_version,
name='go',
ingest_title='Gene Ontology',
ingest_url='http://www.geneontology.org',
ingest_logo='source-geneontology.png',
license_url=None,
data_rights='http://geneontology.org/page/use-and-license'
# file_handle=None
)
self.test_ids = []
# note: dipper-etl defaults tax_ids to '9606'
# note: sorting tax_ids for stable digest
if tax_ids is not None and [] != set(tax_ids).difference(['9606']):
LOG.info('Have %s given as taxon to ingest', str(tax_ids))
self.tax_ids = sorted([str(x) for x in tax_ids])
nottax = set(tax_ids) - set(self.files.keys())
if nottax:
LOG.error('Cant process taxon number(s):\t%s', str(nottax))
self.tax_ids = list(set(self.tax_ids) - nottax)
else:
self.tax_ids = sorted(['9606', '10090', '7955'])
LOG.info("Filtering to the following taxa: %s", self.tax_ids)
# moving this from process_gaf() to avoid repeating this for each
# file to be processed.
if '7955' in self.tax_ids:
self.zfin = ZFIN(self.graph_type, self.are_bnodes_skized)
if '6239' in self.tax_ids:
self.wbase = WormBase(self.graph_type, self.are_bnodes_skized)
if 'gene' not in self.all_test_ids:
LOG.warning("not configured with gene test ids.")
else:
self.test_ids = self.all_test_ids['gene']
# build the id map for mapping uniprot ids to genes ... ONCE
self.uniprot_entrez_id_map = self.get_uniprot_entrez_id_map()
# gaf evidence code mapping is built in parse(), after the file is fetched.
self.gaf_eco = {}
def fetch(self, is_dl_forced=False):
self.get_files(is_dl_forced)
def parse(self, limit=None):
yamlfile = '/'.join((self.rawdir, self.files['gaf-eco-mapping']['file']))
with open(yamlfile, 'r') as yfh:
self.gaf_eco = yaml.safe_load(yfh)
if limit is not None:
LOG.info("Only parsing first %s rows of each file", limit)
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
for txid_num in list(set(self.files).intersection(self.tax_ids)):
gaffile = '/'.join((self.rawdir, self.files[txid_num]['file']))
self.process_gaf(gaffile, limit, self.uniprot_entrez_id_map)
LOG.info("Finished parsing.")
def process_gaf(self, gaffile, limit, id_map=None):
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
LOG.info("Processing Gene Associations from %s", gaffile)
uniprot_hit = 0
uniprot_miss = 0
col = self.gaf_columns
with gzip.open(gaffile, 'rb') as csvfile:
reader = csv.reader(
io.TextIOWrapper(csvfile, newline=""), delimiter='\t', quotechar='\"')
for row in reader:
# comments start with exclamation
if row[0][0] == '!':
continue
if len(row) != len(col):
LOG.error(
"Wrong number of columns %i, expected ... got:\n\t%s",
len(col), row)
exit(1)
dbase = row[col.index('DB')].strip()
gene_num = row[col.index('DB_Object_ID')].strip()
gene_symbol = row[col.index('DB_Object_Symbol')].strip()
qualifier = row[col.index('Qualifier')]
go_id = row[col.index('GO_ID')].strip()
ref = row[col.index('DB:Reference')].strip()
eco_symbol = row[col.index('Evidence Code')].strip()
with_or_from = row[col.index('With (or) From')]
aspect = row[col.index('Aspect')].strip()
gene_name = row[col.index('DB_Object_Name')]
gene_synonym = row[col.index('DB_Object_Synonym')]
# object_type = row[col.index('DB_Object_Type')].strip()
taxon = row[col.index('Taxon and Interacting taxon')].strip()
# date = row[col.index('Date')].strip()
# assigned_by = row[col.index('Assigned_By')].strip()
# annotation_extension = row[col.index('Annotation_Extension')]
# gene_product_form_id = row[col.index('Gene_Product_Form_ID')]
# test for required fields
if '' in [row[:10], row[12]]:
LOG.error(
"Missing required part of annotation on row %i:\n%s",
reader.line_num, str(row[:-4]))
continue
# (Don't) deal with qualifier NOT, contributes_to, colocalizes_with
if re.search(r'NOT', qualifier):
continue
if dbase in self.localtt:
dbase = self.localtt[dbase]
uniprotid = None
gene_id = None
if dbase == 'UniProtKB':
if id_map is not None:
# try/except much faster than checking
# for dict key membership
try:
gene_id = id_map[gene_num]
uniprotid = ':'.join((dbase, gene_num))
(dbase, gene_num) = gene_id.split(':')
uniprot_hit += 1
except KeyError:
# LOG.warning(
# "UniProt id %s is without a 1:1 mapping to entrez/ensembl",
# gene_num)
uniprot_miss += 1
continue
else:
gene_num = gene_num.split(':')[-1] # last
gene_id = ':'.join((dbase, gene_num))
if self.test_mode and gene_id[:9] != 'NCBIGene:' and\
gene_num not in self.test_ids:
continue
model.addLabel(gene_id, gene_symbol)
model.addType(gene_id, self.globaltt['gene'])
if gene_name != '':
model.addDescription(gene_id, gene_name)
if gene_synonym != '':
for syn in re.split(r'\|', gene_synonym):
syn = syn.strip()
if syn[:10] == 'UniProtKB:':
model.addTriple(
gene_id, self.globaltt['has gene product'], syn)
elif re.fullmatch(graph.curie_regexp, syn) is not None and\
syn.split(':')[0] not in self.wont_prefix:
syn = syn.strip()
LOG.warning(
'possible curie "%s" as a literal synomym for %s',
syn, gene_id)
if syn != '':
model.addSynonym(gene_id, syn)
elif syn != '':
model.addSynonym(gene_id, syn)
# First taxon is for the gene, after the pipe are interacting taxa
tax_curie = taxon.split('|')[0].replace('taxon', 'NCBITaxon')
# this is a required field but good to safe
if tax_curie:
geno.addTaxon(tax_curie, gene_id)
assoc = Assoc(graph, self.name)
assoc.set_subject(gene_id)
assoc.set_object(go_id)
try:
eco_id = self.gaf_eco[eco_symbol]
assoc.add_evidence(eco_id)
except KeyError:
LOG.error("Evidence code (%s) not mapped", eco_symbol)
refs = re.split(r'\|', ref)
for ref in refs:
ref = ref.strip()
if ref != '':
prefix = ref.split(':')[-2] # sidestep 'MGI:MGI:'
if prefix in self.localtt:
prefix = self.localtt[prefix]
ref = ':'.join((prefix, ref.split(':')[-1]))
refg = Reference(graph, ref)
if prefix == 'PMID':
ref_type = self.globaltt['journal article']
refg.setType(ref_type)
refg.addRefToGraph()
assoc.add_source(ref)
# TODO add the source of the annotations from assigned by?
rel = self.resolve(aspect, mandatory=False)
if rel is not None and aspect == rel:
if aspect == 'F' and re.search(r'contributes_to', qualifier):
assoc.set_relationship(self.globaltt['contributes to'])
else:
LOG.error(
"Aspect: %s with qualifier: %s is not recognized",
aspect, qualifier)
elif rel is not None:
assoc.set_relationship(rel)
assoc.add_association_to_graph()
else:
LOG.warning("No predicate for association \n%s\n", str(assoc))
if uniprotid is not None:
assoc.set_description('Mapped from ' + uniprotid)
# object_type should be one of:
# protein_complex; protein; transcript; ncRNA; rRNA; tRNA;
# snRNA; snoRNA; any subtype of ncRNA in the Sequence Ontology.
# If the precise product type is unknown,
# gene_product should be used
########################################################################
# Derive G2P Associations from IMP annotations
# in version 2.1 Pipe will indicate 'OR'
# and Comma will indicate 'AND'.
# in version 2.0, multiple values are separated by pipes
# where the pipe has been used to mean 'AND'
if eco_symbol == 'IMP' and with_or_from != '':
withitems = re.split(r'[|,]', with_or_from) # OR + AND
phenotypeid = go_id + 'PHENOTYPE'
# create phenotype associations
for itm in withitems:
if itm == '' or re.match(
r'(UniProtKB|WBPhenotype|InterPro|HGNC)', itm):
LOG.warning(
"Skipping %s from or with %s", uniprotid, itm)
continue
# sanity check/conversion on go curie prefix
(pfx, lclid) = itm.split(':')[-2:] # last prefix wins
if pfx in self.localtt:
pfx = self.localtt[pfx]
itm = ':'.join((pfx, lclid))
# for worms and fish, they might give a RNAi or MORPH
# in these cases make a reagent-targeted gene
if re.search('MRPHLNO|CRISPR|TALEN', itm):
targeted_gene_id = self.zfin.make_targeted_gene_id(
gene_id, itm)
geno.addReagentTargetedGene(itm, gene_id, targeted_gene_id)
# TODO PYLINT why is this needed?
# Redefinition of assoc type from
# dipper.models.assoc.Association.Assoc to
# dipper.models.assoc.G2PAssoc.G2PAssoc
assoc = G2PAssoc(
graph, self.name, targeted_gene_id, phenotypeid)
elif re.search(r'WBRNAi', itm):
targeted_gene_id = self.wbase.make_reagent_targeted_gene_id(
gene_id, itm)
geno.addReagentTargetedGene(itm, gene_id, targeted_gene_id)
assoc = G2PAssoc(
graph, self.name, targeted_gene_id, phenotypeid)
else:
assoc = G2PAssoc(graph, self.name, itm, phenotypeid)
for ref in refs:
ref = ref.strip()
if ref != '':
prefix = ref.split(':')[-2]
if prefix in self.localtt:
prefix = self.localtt[prefix]
ref = ':'.join((prefix, ref.split(':')[-1]))
assoc.add_source(ref)
# experimental phenotypic evidence
assoc.add_evidence(
self.globaltt['experimental phenotypic evidence'])
assoc.add_association_to_graph()
# TODO should the G2PAssoc be the evidence for the GO assoc?
if not self.test_mode and limit is not None and \
reader.line_num > limit:
break
uniprot_tot = (uniprot_hit + uniprot_miss)
uniprot_per = 0.0
if uniprot_tot != 0:
uniprot_per = 100.0 * uniprot_hit / uniprot_tot
LOG.info(
"Uniprot: %.2f%% of %i benefited from the idmapping_selected download",
uniprot_per, uniprot_tot)
def get_uniprot_entrez_id_map(self):
src_key = 'idmapping_selected'
taxon_digest = GraphUtils.digest_id(str(self.tax_ids))
id_map = {}
smallfile = '/'.join((self.rawdir, 'id_map_' + taxon_digest + '.yaml'))
bigfile = '/'.join((self.rawdir, self.files[src_key]['file']))
# if processed smallfile exists and is newer than bigfile then use it instead
if os.path.isfile(smallfile) and \
os.path.getctime(smallfile) > os.path.getctime(bigfile):
LOG.info("Using the cheap mapping file %s", smallfile)
with open(smallfile, 'r') as yamlreader:
id_map = yaml.safe_load(yamlreader)
else:
LOG.info(
"Expensive Mapping from Uniprot IDs to Entrez/ENSEMBL gene ids for %s",
self.tax_ids)
self.fetch_from_url(self.files[src_key]['url'], bigfile)
col = self.files[src_key]['columns']
ummapped_uniprot = 0
with gzip.open(bigfile, 'rb') as csvfile:
csv.field_size_limit(sys.maxsize)
reader = csv.reader( # warning this file is over 10GB unzipped
io.TextIOWrapper(csvfile, newline=""),
delimiter='\t', quotechar='\"')
for row in reader:
uniprotkb_ac = row[col.index('UniProtKB-AC')].strip()
# uniprotkb_id = row[col.index('UniProtKB-ID')]
geneid = row[col.index('GeneID (EntrezGene)')].strip()
# refseq = row[col.index('RefSeq')]
# gi = row[col.index('GI')]
# pdb = row[col.index('PDB')]
# go = row[col.index('GO')]
# uniref100 = row[col.index('UniRef100')]
# unifref90 = row[col.index('UniRef90')]
# uniref50 = row[col.index('UniRef50')]
# uniparc = row[col.index('UniParc')]
# pir = row[col.index('PIR')]
ncbitaxon = row[col.index('NCBI-taxon')].strip()
# mim = row[col.index('MIM')]
# unigene = row[col.index('UniGene')]
# pubmed = row[col.index('PubMed')]
# embl = row[col.index('EMBL')]
# embl_cds = row[col.index('EMBL-CDS')]
ensembl = row[col.index('Ensembl')].strip()
# ensembl_trs = row[col.index('Ensembl_TRS')]
# ensembl_pro = row[col.index('Ensembl_PRO')]
# other_pubmed = row[col.index('Additional PubMed')]
if ncbitaxon not in self.tax_ids:
continue
# neither empty nor a list
if geneid != '' and ';' not in geneid:
id_map[uniprotkb_ac] = 'NCBIGene:' + geneid
elif ensembl != '' and ';' not in ensembl:
id_map[uniprotkb_ac] = 'ENSEMBL:' + ensembl
else:
ummapped_uniprot += 1
LOG.info("Writing id_map out as %s", smallfile)
with open(smallfile, 'w') as yamlwriter:
yaml.dump(id_map, yamlwriter)
LOG.warning('Did not find 1:1 gene IDs for %i uniprots', ummapped_uniprot)
LOG.info(
"Acquired %i 1:1 uniprot to [entrez|ensembl] mappings", len(id_map.keys()))
return id_map
def getTestSuite(self):
import unittest
from tests.test_geneontology import GeneOntologyTestCase
test_suite = unittest.TestLoader().loadTestsFromTestCase(GeneOntologyTestCase)
return test_suite
|
monarch-initiative/dipper
|
dipper/sources/GeneOntology.py
|
GeneOntology.py
|
py
| 24,532 |
python
|
en
|
code
| 53 |
github-code
|
6
|
14138130461
|
from flask_wtf import FlaskForm
from wtforms import SelectField, SubmitField
class Rate(FlaskForm):
rating = SelectField('Выберите оценку',
choices=[(None, 'Не завершено'), (10, 'Шедевр(10)'), (9, 'Великолепно(9)'),
(8, 'Очень хорошо(8)'), (7, 'Хорошо(7)'),
(6, 'Неплохо(6)'), (5, 'Нормально(5)'),
(4, 'Не очень(4)'), (3, 'Плохо(3)'),
(2, 'Ужасно(2)'), (1, 'Отвратительно(1)')])
submit = SubmitField('Подтвердить выбор')
|
DmitriyDog/WEB
|
Rate.py
|
Rate.py
|
py
| 714 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
27215234475
|
import os
import sys
import click
import pytest
from click.exceptions import ClickException
CONTEXT_SETTINGS = dict(
help_option_names=['-h', '--help']
)
class _CustomClickException(ClickException):
exit_code = 0x20
@pytest.fixture()
def cli1():
@click.command('cli1', help='CLI-1 example', context_settings=CONTEXT_SETTINGS)
@click.option('-c', type=int, help='optional C value', default=None, show_default=True)
@click.argument('a', type=int)
@click.argument('b', type=int)
def cli1(a, b, c):
if c is None:
print(f'{a} + {b} = {a + b}')
elif c < 0:
raise ValueError('Uncaught value error', c)
elif c > 1000:
print('Well, well, well...')
raise _CustomClickException(f'custom - {c!r}')
elif os.environ.get('FAIL'):
print('WTF?')
else:
print(f'{a} + {b} + {c} = {a + b + c}', file=sys.stderr)
return cli1
|
HansBug/hbutils
|
test/testing/simulate/conftest.py
|
conftest.py
|
py
| 955 |
python
|
en
|
code
| 7 |
github-code
|
6
|
39712277768
|
import json
import sqlite3
from sqlite3 import Error
import requests
from lxml import html
def get_popular_drinks():
url = 'https://www.esquire.com/food-drink/drinks/a30246954/best-alcohol-bottles-2019/'
page = requests.get(url)
tree = html.fromstring(page.content)
alcohol = tree.xpath('//h3[@class="body-h3"]/text()')
images = tree.xpath('//img[@class="lazyimage lazyload"]/@data-src')
alcohol_list = []
index = 0
j = 0
while index < len(alcohol) and j < len(images):
bottle_dict = {"name": alcohol[index].strip(),
"price": alcohol[index + 1].strip(),
"img": images[j].replace("?resize=480:*", "")
}
if index < 34:
bottle_dict['brand'] = 'scotch'
elif index > 33 and index < 40:
bottle_dict['brand'] = 'tequila'
elif index >= 40 and index < 45:
bottle_dict['brand'] = 'gin'
elif index >= 46 and index < 52:
bottle_dict['brand'] = 'rum'
else:
bottle_dict['brand'] = 'cognac'
alcohol_list.append(bottle_dict)
j += 1
index += 2
return alcohol_list
def get_cocktails(brand):
reponse = requests.get(f'https://www.thecocktaildb.com/api/json/v1/1/filter.php?i={brand}')
return reponse.json()['drinks']
def get_cocktail_ingredients(api_id):
response = requests.get(f'https://www.thecocktaildb.com/api/json/v1/1/lookup.php?i={api_id}')
return response.json()['drinks'][0]
### Old Sqlite3 functions ###
# def create_connection(db_file):
# """ create a database connection to a SQLite database """
# conn = None
# try:
# conn = sqlite3.connect(db_file)
# print(sqlite3.version)
# except Error as e:
# print(e)
# finally:
# if conn:
# conn.close()
# def run_sql_files(db_file, sql_file):
# conn = sqlite3.connect(db_file)
# cursor = conn.cursor()
# sql_file = open(sql_file)
# sql_as_string = sql_file.read()
# cursor.executescript(sql_as_string)
# for row in cursor.execute("SELECT * FROM users"):
# print(row)
# sql_file.close()
# conn.close()
# def show_tables(db_file):
# conn = sqlite3.connect(db_file)
# cursor = conn.cursor()
# cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
# print(cursor.fetchall())
# conn.close()
# def add_user(db_file, username, password, name, email):
# conn = sqlite3.connect(db_file)
# cursor = conn.cursor()
# try:
# cursor.execute(f"INSERT INTO users (username, password, name, email) VALUES ('{username}', '{password}', '{name}', '{email}');")
# except Error as e:
# print(e)
# else:
# conn.commit()
# print("Success")
# conn.close()
# def delete_user(db_file, username):
# conn = sqlite3.connect(db_file)
# cursor = conn.cursor()
# cursor.execute(f"DELETE FROM users WHERE username = '{username}';")
# conn.commit()
# conn.close()
# def select_all(db_file, table):
# conn = sqlite3.connect(db_file)
# cursor = conn.cursor()
# cursor.execute(f"SELECT * FROM {table};")
# rows = cursor.fetchall()
# for row in rows:
# print(row)
# conn.close()
# return rows
# def add_popular_drinks(db_file):
# alcohol_dict = get_popular_drinks()
# conn = sqlite3.connect(db_file)
# cursor = conn.cursor()
# drinks_tuple_list = []
# for k, v in alcohol_dict.items():
# drinks_tuple_list.append((k, v['price']))
# print(drinks_tuple_list)
# cursor.executemany(f"INSERT INTO popular_drinks (name, price) VALUES (?, ?);", drinks_tuple_list)
# conn.commit()
# conn.close()
# if __name__ == '__main__':
# print(get_popular_drinks())
# print(get_cocktails())
# print(get_cocktail_ingredients('11007'))
# print(get_cocktails('scotch'))
# create_connection("mydb.db")
# run_sql_files("mydb.db", "second_step.sql")
# show_tables("mydb.db")
# add_user('mydb.db', 'Admin23', '1234', 'Addie', '[email protected]')
# delete_user('mydb.db', 'Admin')
# add_popular_drinks('mydb.db')
# select_all('mydb.db', 'popular_drinks')
|
advaa123/cocktailcloset
|
models/basic.py
|
basic.py
|
py
| 4,432 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2908367696
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Union
from httpx import AsyncClient
from supertokens_python.recipe.thirdparty.provider import Provider
from supertokens_python.recipe.thirdparty.types import (
AccessTokenAPI, AuthorisationRedirectAPI, UserInfo, UserInfoEmail)
if TYPE_CHECKING:
from supertokens_python.framework.request import BaseRequest
from supertokens_python.utils import get_filtered_list
class Github(Provider):
def __init__(self, client_id: str, client_secret: str, scope: Union[None, List[str]] = None,
authorisation_redirect: Union[None, Dict[str, Union[str, Callable[[
BaseRequest], str]]]] = None,
is_default: bool = False):
super().__init__('github', client_id, is_default)
default_scopes = ["read:user", "user:email"]
if scope is None:
scope = default_scopes
self.client_secret = client_secret
self.scopes = list(set(scope))
self.access_token_api_url = 'https://github.com/login/oauth/access_token'
self.authorisation_redirect_url = 'https://github.com/login/oauth/authorize'
self.authorisation_redirect_params = {}
if authorisation_redirect is not None:
self.authorisation_redirect_params = authorisation_redirect
async def get_profile_info(self, auth_code_response: Dict[str, Any], user_context: Dict[str, Any]) -> UserInfo:
access_token: str = auth_code_response['access_token']
params = {
'alt': 'json'
}
headers = {
'Authorization': 'Bearer ' + access_token,
'Accept': 'application/vnd.github.v3+json'
}
async with AsyncClient() as client:
response_user = await client.get(url='https://api.github.com/user', params=params, headers=headers)
response_email = await client.get(url='https://api.github.com/user/emails', params=params, headers=headers)
user_info = response_user.json()
emails_info = response_email.json()
user_id = str(user_info['id'])
email_info = get_filtered_list(
lambda x: 'primary' in x and x['primary'], emails_info)
if len(email_info) == 0:
return UserInfo(user_id)
is_email_verified = email_info[0]['verified'] if 'verified' in email_info[0] else False
email = email_info[0]['email'] if 'email' in email_info[0] else user_info['email']
return UserInfo(user_id, UserInfoEmail(email, is_email_verified))
def get_authorisation_redirect_api_info(self, user_context: Dict[str, Any]) -> AuthorisationRedirectAPI:
params = {
'scope': ' '.join(self.scopes),
'client_id': self.client_id,
**self.authorisation_redirect_params
}
return AuthorisationRedirectAPI(
self.authorisation_redirect_url, params)
def get_access_token_api_info(
self, redirect_uri: str, auth_code_from_request: str, user_context: Dict[str, Any]) -> AccessTokenAPI:
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': auth_code_from_request,
'redirect_uri': redirect_uri
}
return AccessTokenAPI(self.access_token_api_url, params)
def get_redirect_uri(self, user_context: Dict[str, Any]) -> Union[None, str]:
return None
|
starbillion/supertokens_python
|
supertokens_python/recipe/thirdparty/providers/github.py
|
github.py
|
py
| 3,505 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26898344474
|
# Autor: David Martínez Acha
# Fecha: 04/02/2023 14:30
# Descripción: Permite cargar datasets
# Version: 1.2
from os.path import isfile
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas.api import types
from scipy.io import arff
from algoritmos.utilidades.filetype import FileType
from algoritmos.utilidades.labelencoder import OwnLabelEncoder
class DatasetLoader:
def __init__(self, file):
"""
Cargador para archivos (ARFF o CSV).
:param file: ruta del fichero
"""
self.target = None
if not isfile(file):
raise FileNotFoundError("El archivo no existe en el conjunto de datasets")
if ".csv" in file:
self.type = FileType.CSV
elif ".arff" in file:
self.type = FileType.ARFF
else:
raise ValueError("El fichero no es CSV o ARFF")
self.file = file
def get_allfeatures(self):
"""
Obtiene las columnas (atributos) de los datos, incluye el target
:return: listado de las características de los datos.
"""
return self._get_data().columns.values
def set_target(self, target):
"""
Especifica el target de los datos
:param target: el target o clase para la posterior clasificación
"""
self.target = target
def get_only_features(self):
"""
Obtiene las características de los datos. NO incluye target
:return: listado de las características de los datos (sin target).
"""
if self.target is None:
raise ValueError("La clase o target no ha sido establecida, selecciona primero la característica que "
"actúa como target")
return np.setdiff1d(self._get_data().columns.values, self.target)
def _get_data(self):
"""
Obtiene los datos sin procesar (directamente del fichero) según
el tipo de fichero que sea
:return: datos en forma de dataframe
"""
if self.type == FileType.CSV:
return self._csv_data()
elif self.type == FileType.ARFF:
return self._arff_data()
def _csv_data(self):
"""
Convierte los datos del fichero .CSV en un dataframe
:return: datos en forma de dataframe
"""
return pd.read_csv(self.file)
def _arff_data(self):
"""
Convierte los datos del fichero .ARFF en un dataframe
:return: datos en forma de dataframe
"""
data = arff.loadarff(self.file)
df = pd.DataFrame(data[0])
return df
def _detect_categorical_features(self, x: DataFrame):
"""
Detecta si existen características categóricas.
:param x: instancias
:return: True si todas son numéricas, False en caso contrario
"""
return not all(types.is_numeric_dtype(t) for t in list(x.dtypes))
def _detect_unlabelled_targets(self, y: DataFrame):
"""
Detecta si existen datos no etiquetados. Se sigue la convención del "-1"
para datos no etiquetados.
Casos considerados: -1, -1.0, "-1", "-1.0"
:param y: etiquetas
:return: True si hay datos no etiquetados, False en caso contrario
"""
values = y[self.target].astype(str).values
return "-1" in values or "-1.0" in values
def get_x_y(self):
"""
Obtiene por separado los datos (las características) y los target o clases
:return: las instancias (x), las clases o targets (y), el mapeo de las clases codificadas a las
originales y si el conjunto de datos ya era semi-supervisado
"""
if self.target is None:
raise ValueError("La clase o target no ha sido establecida, selecciona primero la característica que "
"actúa como target")
data = self._get_data()
x = data.drop(columns=[self.target])
if self._detect_categorical_features(x):
raise ValueError("Se han detectado características categóricas o indefinidas, "
"recuerde que los algoritmos solo soportan características numéricas")
if self.type == FileType.CSV:
y = pd.DataFrame(data[self.target], columns=[self.target])
else:
y = pd.DataFrame(
np.array([v.decode("utf-8") if not types.is_numeric_dtype(type(v)) else v for v in
data[self.target].values]),
columns=[self.target])
y.replace("?", "-1", inplace=True)
is_unlabelled = self._detect_unlabelled_targets(y)
y, mapping = OwnLabelEncoder().transform(y)
return x, y, mapping, is_unlabelled
|
dma1004/TFG-SemiSupervisado
|
algoritmos/utilidades/datasetloader.py
|
datasetloader.py
|
py
| 4,948 |
python
|
es
|
code
| 5 |
github-code
|
6
|
43491211360
|
from rest_framework.serializers import ModelSerializer, SlugRelatedField
from products.models import (
Product,
ProductTag
)
class ProductSerializer(ModelSerializer):
'''
Make output appear as an array of strings:
"tags": ["first", "second", "third"]
Rather than an array of objects:
"tags": [{
"content": "first"
},
"content": "second"
}]
'''
tags = SlugRelatedField(source = 'producttag_set',
slug_field = 'content',
many = True,
read_only = True)
class Meta:
model = Product
fields = ['id', 'name', 'price', 'stall', 'description', 'quantity', 'stall']
read_only_fields = ['tags',]
|
skeithtan/iris
|
products/serializers.py
|
serializers.py
|
py
| 768 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39672716054
|
import fileinput
_PAIRS = {
"(": ")",
"[": "]",
"{": "}",
"<": ">",
}
_POINTS = {
")": 3,
"]": 57,
"}": 1197,
">": 25137,
}
class Stack(list):
push = list.append
def solve(input_file):
points = 0
for line in input_file:
# incomplete lines will return 0
points += _check_syntax(list(line.strip()))
return points
def _check_syntax(symbol_list):
stack = Stack()
for i in range(len(symbol_list)):
char = symbol_list.pop(0)
if char in _PAIRS:
stack.push(_PAIRS[char])
else:
expected = stack.pop()
if char != expected:
# syntax error
return _POINTS[char]
return 0
if __name__ == "__main__":
print(solve(fileinput.FileInput()))
|
cmatsuoka/aoc
|
2021 - submarine/10 - syntax checker/solution1.py
|
solution1.py
|
py
| 804 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3480622108
|
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from .quant_func import fix_quant as quant
### ==============================================================================###
### quant for different data types ###
### ==============================================================================###
act_quant = lambda x : quant(x, 3, 4, "act")
weight_quant = lambda x : quant(x, 2, 5, "weight")
bias_quant = lambda x : quant(x, 7, 8, "weight")
### ===============================================================================###
### Quantization Modules ###
### ===============================================================================###
class QReLu(nn.ReLU):
__constants__ = ['inplace']
def __init__(self, inplace=False):
super(nn.ReLU, self).__init__()
self.inplace = inplace
def forward(self, input):
out = F.relu(input, inplace=self.inplace)
out = act_quant(out)
return out
def extra_repr(self):
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class QLinear(nn.Linear):
def __init__(self, in_features, out_features, bias=True):
super(QLinear, self).__init__(in_features, out_features, bias=bias)
self.ia_quant = lambda x : quant(x, 5, 2, "act")
self.weight_quant = lambda x : quant(x, 3, 4, "weight")
self.bias_quant = lambda x : quant(x, 5, 2, "weight")
self.oa_quant = lambda x : quant(x, 5, 2, "act")
def forward(self, input):
input = self.ia_quant(input)
weight = self.weight_quant(self.weight)
if self.bias is not None :
bias = self.bias_quant(self.bias)
else :
bias = None
output = F.linear(input, weight, None)
output = self.oa_quant(output) # post bias
if self.bias is not None :
output = output + bias
output = self.oa_quant(output)
# output = F.linear(input, self.weight, self.bias)
return output
class QAveragePool2d(nn.AvgPool2d):
__constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None):
super(QAveragePool2d, self).__init__(kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None)
def forward(self, input):
input = act_quant(input)
out = F.avg_pool2d(input, self.kernel_size, self.stride,
self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
out = act_quant(out)
return out
class QConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False,
):
super(QConv2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias)
def forward(self, input):
weight = weight_quant(self.weight)
if self.bias is not None:
bias = bias_quant(self.bias)
output = F.conv2d(input, weight, None, self.stride,
self.padding, self.dilation, self.groups)
output = act_quant(output)
if self.bias is not None :
output = output + bias.view(1, -1, 1, 1)
output = act_quant(output)
return output
class _QConvBnNd(nn.modules.conv._ConvNd):
_version = 2
def __init__(self,
# ConvNd args
in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding,
groups,
bias,
padding_mode,
# BatchNormNd args
# num_features: out_channels
eps=1e-05, momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=True,
):
nn.modules.conv._ConvNd.__init__(self, in_channels, out_channels, kernel_size,
stride, padding, dilation, transposed,
output_padding, groups, False, padding_mode)
self.freeze_bn = freeze_bn if self.training else True
# if self.training :
norm_layer = nn.BatchNorm2d
# else :
# norm_layer = IdentityBN
self.bn = norm_layer(out_channels, eps, momentum, True, True)
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_bn_parameters()
# this needs to be called after reset_bn_parameters,
# as they modify the same state
if self.training:
if freeze_bn:
self.freeze_bn_stats()
else:
self.update_bn_stats()
else:
self.freeze_bn_stats()
def reset_running_stats(self):
self.bn.reset_running_stats()
def reset_bn_parameters(self):
self.bn.reset_running_stats()
nn.init.uniform_(self.bn.weight)
nn.init.zeros_(self.bn.bias)
# note: below is actully for conv, not BN
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def get_params(self):
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
scale_factor = self.bn.weight / running_std
scaled_weight = self.weight_fake_quant(self.weight * scale_factor.reshape([-1, 1, 1, 1]))
# scaled bias :
if self.bias is not None :
scaled_bias = scale_factor * (self.bias - self.bn.running_mean) + self.bn.bias
else :
scaled_bias = - scale_factor * self.bn.running_mean + self.bn.bias
scaled_bias_q = self.bias_fake_quant(scaled_bias)
return scaled_weight, scaled_bias_q
def reset_parameters(self):
super(_QConvBnNd, self).reset_parameters()
def update_bn_stats(self):
self.freeze_bn = False
self.bn.training = True
return self
def freeze_bn_stats(self):
self.freeze_bn = True
self.bn.training = False
return self
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def _forward(self, input):
input = act_quant(input)
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
scale_factor = self.bn.weight / running_std
scaled_weight = weight_quant(self.weight * scale_factor.reshape([-1, 1, 1, 1]))
# scaled bias :
# with torch.no_grad():
if self.bias is not None :
scaled_bias = scale_factor *(self.bias - self.bn.running_mean) + self.bn.bias
else :
scaled_bias = - scale_factor * self.bn.running_mean + self.bn.bias
scaled_bias_q = bias_quant(scaled_bias)
# this does not include the conv bias
conv = self._conv_forward(input, scaled_weight)
conv = act_quant(conv)
conv_bias = conv + scaled_bias_q.reshape([1, -1, 1, 1])
conv_bias = act_quant(conv_bias)
if self.training :
conv_bias_orig = conv_bias - scaled_bias.reshape([1, -1, 1, 1])
conv_orig = conv_bias_orig / scale_factor.reshape([1, -1, 1, 1])
conv_orig = conv / scale_factor.reshape([1, -1, 1, 1])
if self.bias is not None:
conv_orig = conv_orig + self.bias.reshape([1, -1, 1, 1])
conv = self.bn(conv_orig)
return conv
else :
return conv_bias
def extra_repr(self):
# TODO(jerryzh): extend
return super(_QConvBnNd, self).extra_repr()
def forward(self, input):
return act_quant(self._forward(input))
def train(self, mode=True):
"""
Batchnorm's training behavior is using the self.training flag. Prevent
changing it if BN is frozen. This makes sure that calling `model.train()`
on a model with a frozen BN will behave properly.
"""
self.training = mode
if not self.freeze_bn:
for module in self.children():
module.train(mode)
return self
# ===== Serialization version history =====
#
# Version 1/None
# self
# |--- weight : Tensor
# |--- bias : Tensor
# |--- gamma : Tensor
# |--- beta : Tensor
# |--- running_mean : Tensor
# |--- running_var : Tensor
# |--- num_batches_tracked : Tensor
#
# Version 2
# self
# |--- weight : Tensor
# |--- bias : Tensor
# |--- bn : Module
# |--- weight : Tensor (moved from v1.self.gamma)
# |--- bias : Tensor (moved from v1.self.beta)
# |--- running_mean : Tensor (moved from v1.self.running_mean)
# |--- running_var : Tensor (moved from v1.self.running_var)
# |--- num_batches_tracked : Tensor (moved from v1.self.num_batches_tracked)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if version is None or version == 1:
# BN related parameters and buffers were moved into the BN module for v2
v2_to_v1_names = {
'bn.weight': 'gamma',
'bn.bias': 'beta',
'bn.running_mean': 'running_mean',
'bn.running_var': 'running_var',
'bn.num_batches_tracked': 'num_batches_tracked',
}
for v2_name, v1_name in v2_to_v1_names.items():
if prefix + v1_name in state_dict:
state_dict[prefix + v2_name] = state_dict[prefix + v1_name]
state_dict.pop(prefix + v1_name)
elif strict:
missing_keys.append(prefix + v2_name)
super(_QConvBnNd, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
class QConvBn2d(_QConvBnNd, nn.Conv2d):
r"""
A ConvBn2d module is a module fused from Conv2d and BatchNorm2d,
attached with FakeQuantize modules for both output activation and weight,
used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv2d` and
:class:`torch.nn.BatchNorm2d`.
Implementation details: https://arxiv.org/pdf/1806.08342.pdf section 3.2.2
Similar to :class:`torch.nn.Conv2d`, with FakeQuantize modules initialized
to default.
Attributes:
freeze_bn:
activation_post_process: fake quant module for output activation
weight_fake_quant: fake quant module for weight
"""
def __init__(self,
# ConvNd args
in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=None,
padding_mode='zeros',
# BatchNorm2d args
# num_features: out_channels
eps=1e-05, momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
_QConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, False, _pair(0), groups, bias, padding_mode,
eps, momentum, freeze_bn)
class QConvBnReLU2d(QConvBn2d):
r"""
A ConvBnReLU2d module is a module fused from Conv2d, BatchNorm2d and ReLU,
attached with FakeQuantize modules for both output activation and weight,
used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv2d` and
:class:`torch.nn.BatchNorm2d` and :class:`torch.nn.ReLU`.
Implementation details: https://arxiv.org/pdf/1806.08342.pdf
Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
default.
Attributes:
observer: fake quant module for output activation, it's called observer
to align with post training flow
weight_fake_quant: fake quant module for weight
"""
def __init__(self,
# Conv2d args
in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=None,
padding_mode='zeros',
# BatchNorm2d args
# num_features: out_channels
eps=1e-05, momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False):
super(QConvBnReLU2d, self).__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias,
padding_mode, eps, momentum,
freeze_bn)
self.relu = nn.ReLU()
def forward(self, input):
return act_quant(self.relu(QConvBn2d._forward(self, input)))
|
jmluu/ICAIS_ML.Pytorch
|
Quantization/modules/qlayers.py
|
qlayers.py
|
py
| 14,252 |
python
|
en
|
code
| 3 |
github-code
|
6
|
22853413046
|
class Solution(object):
#Method 1: Solve by removing closed pair one by one
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
pre = None
while s and pre != s:
pre = s
s = s.replace('()', '').replace('[]', '').replace('{}', '')
if s:
return False
return True
#Method 1: Using stack
def isValid(self, s):
map = {
'(': ')',
'{': '}',
'[': ']'
}
stack = []
for char in s:
if char in map:
stack.append(map[char])
else:
if not stack or stack.pop() != char:
return False
return not stack
|
yuweishi/LeetCode
|
Algorithms/Valid Parentheses/solution.py
|
solution.py
|
py
| 765 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3975243099
|
# -*- coding:utf-8 -*-
import torchvision
import torch.nn as nn
# def load_model(pretrained=True, num_classes=None):
# """加载model
# Parameters
# pretrained: bool
# True: 加载预训练模型; False: 加载未训练模型
# num_classes: int
# Alexnet最后一层输出
# Returns
# alexnet_model: model
# CNN模型
# """
# model = torchvision.models.alexnet(pretrained=pretrained)
# if pretrained:
# fc1 = nn.Linear(256 * 6 * 6, 4096)
# fc1.weight = model.classifier[1].weight
# fc1.bias = model.classifier[1].bias
# fc2 = nn.Linear(4096, 4096)
# fc2.weight = model.classifier[4].weight
# fc2.bias = model.classifier[4].bias
# classifier = nn.Sequential(
# nn.Dropout(),
# fc1,
# nn.ReLU(inplace=True),
# nn.Dropout(),
# fc2,
# nn.ReLU(inplace=True),
# nn.Linear(4096, num_classes),
# )
# model.classifier = classifier
# return model
def load_model(pretrained=True, num_classes=None):
"""加载model
Parameters
pretrained: bool
True: 加载预训练模型; False: 加载未训练模型
num_classes: int
Alexnet最后一层输出
Returns
alexnet_model: model
CNN模型
"""
model = torchvision.models.resnet34(pretrained=pretrained)
classifier = nn.Sequential(
nn.Linear(512, num_classes),
)
model.fc = classifier
return model
|
ray0809/pytorch
|
retrieval/DeepHash/DSDH_PyTorch/models/alexnet.py
|
alexnet.py
|
py
| 1,578 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9273208977
|
from flask import Flask, render_template, request
import joblib
import numpy as np
import pandas as pd
app = Flask(__name__)
# Load your model here
model = joblib.load('C:/Users/Dylan/exoplanets/models/exoplanet_classifier.joblib')
@app.route('/', methods=['GET', 'POST'])
def home():
prediction = None
predicted_label = None
if request.method == 'POST':
exoplanetmass = float(request.form['exoplanetmass'])
exoplanetradius = float(request.form['exoplanetradius'])
exoplanetdensity = float(request.form['exoplanetdensity'])
unit = request.form['unit']
# Convert units if needed
if unit == 'Earths':
# Convert values based on your conversion logic
exoplanetmass = exoplanetmass * 0.00314558
exoplanetradius = exoplanetradius * 0.0892147
new_data = pd.DataFrame({
'pl_radj': [exoplanetradius],
'pl_bmassj': [exoplanetmass],
'pl_dens': [exoplanetdensity]
})
if not exoplanetdensity:
exoplanetradius *= 7.149e+9
exoplanetmass *= 6.99115e+9
volume = (4/3) * (np.pi) * (exoplanetradius**3)
exoplanetdensity = exoplanetmass/volume
# Make prediction using your loaded model
if exoplanetmass and exoplanetradius and exoplanetdensity and unit:
predicted_class = model.predict(new_data)
predicted_label = predicted_class[0]
# Map predicted class to human-readable format
return render_template('index.html', prediction=predicted_label)
if __name__ == '__main__':
app.run(debug=True)
|
DylanBerger/ExoplanetClassifier
|
app.py
|
app.py
|
py
| 1,739 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35743742294
|
import sys
import pandas as pd
import numpy as np
import gzip
import read_write as rw
import LDA as lda
'''
finput_title = "Data/title"
finput_description = "Data/description"
finput_train_item_id = "Data/train_item_id"
finput_test_item_id = "Data/test_item_id"
foutput_title_similarity = "Data/title_similarity_matrix"
foutput_description_similarity = "Data/description_similarity_matrix"
'''
if (__name__ == '__main__'):
#### data path
finput_topic_num = int(sys.argv[1])
finput_title = sys.argv[2]
finput_description = sys.argv[3]
finput_train_item_id = sys.argv[4]
finput_test_item_id = sys.argv[5]
foutput_title_similarity = sys.argv[6]
foutput_description_similarity = sys.argv[7]
#### read into item title and description information (dict: {id : content})
dict_title = rw.readffile(finput_title)
dict_description = rw.readffile(finput_description)
train_item_id = rw.readffile(finput_train_item_id)
test_item_id = rw.readffile(finput_test_item_id)
#### preprocess before LDA
dict_title_preprocessed = lda.texts_preprocess(dict_title)
dict_description_preprocessed = lda.texts_preprocess(dict_description)
list_title_preprocessed = list(dict_title_preprocessed.values())
list_description_preprocessed = list(dict_description_preprocessed.values())
print("text preprocessed done!")
#### generate item title and description similarity for selected items
item_tt_id_lst = list(train_item_id.keys())+list(test_item_id.keys())
item_total_id_lst = list(dict_title.keys())
index_lst = []
for id in item_tt_id_lst:
index_lst.append(item_total_id_lst.index(id))
title_similarity = lda.LDA(texts=list_title_preprocessed, index_lst=index_lst, num_topics=finput_topic_num)
description_similarity = lda.LDA(texts=list_description_preprocessed, index_lst=index_lst, num_topics=finput_topic_num)
print("lda similarity calculated done!")
#### generate train/test item similarity matrix
df_title_similarity_matrix = pd.DataFrame(np.array(title_similarity),index=item_tt_id_lst,columns=item_tt_id_lst)
df_description_similarity_matrix = pd.DataFrame(np.array(description_similarity),index=item_tt_id_lst,columns=item_tt_id_lst)
# train_item_id = rw.readffile(finput_train_item_id)
# test_item_id = rw.readffile(finput_test_item_id)
# #### title/train
# df_title_similarity_matrix_train = df_title_similarity_matrix.loc[list(train_item_id.keys()), list(train_item_id.keys())]
# #### title/test
# df_title_similarity_matrix_test = df_title_similarity_matrix.loc[list(test_item_id.keys()), list(test_item_id.keys())]
# #### description/train
# df_description_similarity_matrix_train = df_description_similarity_matrix.loc[list(train_item_id.keys()), list(train_item_id.keys())]
# #### description/test
# df_description_similarity_matrix_test = df_description_similarity_matrix.loc[list(test_item_id.keys()), list(test_item_id.keys())]
print("similarity matrix generated done!")
#### write data into files
rw.write2file(df_title_similarity_matrix, foutput_title_similarity)
rw.write2file(df_description_similarity_matrix, foutput_description_similarity)
print("file saved done!")
|
clamli/Dissertation
|
Step1-Preprocessing/item_similarity.py
|
item_similarity.py
|
py
| 3,185 |
python
|
en
|
code
| 28 |
github-code
|
6
|
30296220599
|
# -*- encoding: utf-8 -*-
'''
@File : alien.py
@Time : 2021/10/25 23:48:17
@Author : James
@Version : 1.0
@Desc : 外星人类
'''
import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
'''表示单个外星人'''
def __init__(self, ai_game):
'''初始化外星人并设置其初始位置'''
super().__init__()
self.screen = ai_game.screen
# 加载外星人图像并设置其rect属性
self.image = pygame.image.load(r'G:\GitWorkSpace\python_practice\alien_invasion\images\alien.bmp')
self.rect = self.image.get_rect()
# 每个外星人最初在左上角附近出现
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# 存储外星人的精准水平位置
self.x = float(self.rect.x)
|
heisenberg000/python_practice
|
alien_invasion/alien.py
|
alien.py
|
py
| 830 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
37626769194
|
import sys
from vmc import vmcCrawl
from clean import cleaner
from stow import stower
from stow import stow4me
from logWrite import logWriter
logName = ""
if sys.argv[1] == "clean":
print("Cleaning all Log diver txt files.")
cleaner(0)
if sys.argv[1] == "nuke":
print("Nuclear Option Activated...")
print("ALL NON/-LOGDIVER FILES WILL BE DELETED...")
cleaner(1)
if sys.argv[1] == "stow":
folderName = input("Name the folder to stow the files in: ")
stower(folderName)
if sys.argv[1] == "stow4me":
stow4me()
if sys.argv[1] == "vmc":
try:
with open("VMC.log", "r") as vmcLog:
vmcCrawl(vmcLog)
vmcLog.close()
except FileNotFoundError:
print(
"VMC.log Not found."
)
if sys.argv[1] == "crawl":
logWriter()
|
MThicklin/Veeam-LogDiver
|
logdiver.py
|
logdiver.py
|
py
| 807 |
python
|
en
|
code
| 5 |
github-code
|
6
|
4785215660
|
from collections import deque,defaultdict
n,m = map(int,input().split())
d = defaultdict(list)
for i in range(m):
u,v = map(int,input().split())
d[u].append(v)
d[v].append(u)
visited = [0]*n
ans = 0
for i in range(1,n+1):
if visited[i-1] == 1:
continue
q = deque()
q.append(i)
visited[i-1] = 1
while q:
now = q.popleft()
for next in d[now]:
if visited[next-1] == 1:
continue
visited[next-1] = 1
q.append(next)
ans += 1
print(ans)
|
K5h1n0/compe_prog_new
|
VirtualContest/022/06.py
|
06.py
|
py
| 541 |
python
|
en
|
code
| 0 |
github-code
|
6
|
53220412
|
from datetime import date
from zohocrmsdk.src.com.zoho.api.authenticator import OAuthToken
from zohocrmsdk.src.com.zoho.crm.api import Initializer
from zohocrmsdk.src.com.zoho.crm.api.dc import USDataCenter
from zohocrmsdk.src.com.zoho.crm.api.record import RecordOperations, ConvertBodyWrapper, LeadConverter, Record, Field, \
ActionWrapper, SuccessResponse, APIException
from zohocrmsdk.src.com.zoho.crm.api.tags import Tag
from zohocrmsdk.src.com.zoho.crm.api.util import Choice
class ConvertLead:
@staticmethod
def initialize():
environment = USDataCenter.PRODUCTION()
token = OAuthToken(client_id="clientID", client_secret="clientSecret", grant_token="grantToken")
Initializer.initialize(environment, token)
@staticmethod
def convert_lead(lead_id):
"""
This method is used to Convert a Lead record and print the response.
:param lead_id: The ID of the Lead to be converted.
"""
"""
example
lead_id = 3409643002034003
"""
record_operations = RecordOperations()
request = ConvertBodyWrapper()
# List to hold LeadConverter instances
data = []
record = LeadConverter()
record.set_overwrite(True)
record.set_notify_lead_owner(True)
record.set_notify_new_entity_owner(True)
record.set_accounts('34096430692007')
record.set_contacts('34096430836001')
record.set_assign_to('34096430302031')
deals = Record()
"""
Call add_field_value method that takes two arguments
Import the zcrmsdk.src.com.zoho.crm.api.record.field file
1 -> Call Field "." and choose the module from the displayed list and press "." and choose the field name from the displayed list.
2 -> Value
"""
deals.add_field_value(Field.Deals.deal_name(), 'deal_name')
deals.add_field_value(Field.Deals.description(), "deals description")
deals.add_field_value(Field.Deals.closing_date(), date(2020, 10, 2))
deals.add_field_value(Field.Deals.stage(), Choice("Closed Won"))
deals.add_field_value(Field.Deals.amount(), 500.78)
"""
Call add_key_value method that takes two arguments
1 -> A string that is the Field's API Name
2 -> Value
"""
deals.add_key_value('Custom_field', 'Value')
tag_list = []
tag = Tag()
tag.set_name('Converted')
tag_list.append(tag)
deals.set_tag(tag_list)
record.set_deals(deals)
data.append(record)
request.set_data(data)
# Call convertLead method that takes ConvertBodyWrapper instance and lead_id as parameter
response = record_operations.convert_lead(lead_id, request)
if response is not None:
print('Status Code: ' + str(response.get_status_code()))
response_object = response.get_object()
if response_object is not None:
if isinstance(response_object, ActionWrapper):
action_response_list = response_object.get_data()
for action_response in action_response_list:
if isinstance(action_response, SuccessResponse):
print("Status: " + action_response.get_status().get_value())
print("Code: " + action_response.get_code().get_value())
print("Details")
details = action_response.get_details()
for key, value in details.items():
print(key + ' : ' + str(value))
print("Message: " + action_response.get_message().get_value())
elif isinstance(action_response, APIException):
print("Status: " +
action_response.get_status().get_value())
print("Code: " + action_response.get_code().get_value())
print("Details")
details = action_response.get_details()
for key, value in details.items():
print(key + ' : ' + str(value))
print("Message: " +
action_response.get_message().get_value())
elif isinstance(response_object, APIException):
print("Status: " + response_object.get_status().get_value())
print("Code: " + response_object.get_code().get_value())
print("Details")
details = response_object.get_details()
for key, value in details.items():
print(key + ' : ' + str(value))
print("Message: " + response_object.get_message().get_value())
lead_id = 440248001507154
ConvertLead.initialize()
ConvertLead.convert_lead(lead_id)
|
zoho/zohocrm-python-sdk-5.0
|
samples/records/ConvertLead.py
|
ConvertLead.py
|
py
| 4,997 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27713115887
|
# Given an array of events where events[i] = [startDayi, endDayi]. Every event i starts at startDayi and ends at endDayi.
# You can attend an event i at any day d where startTimei <= d <= endTimei. Notice that you can only attend one event at any time d.
# Return the maximum number of events you can attend.
# 1. Sort the events based on starting day of the event
# 2. Now once you have this sorted events, every day check what are the events that can start today
# 3. for all the events that can be started today, keep their ending time in heap.
# - Wait why we only need ending times ?
# i) from today onwards, we already know this event started in the past and all we need to know is when this event will finish
# ii) Also, another key to this algorithm is being greedy, meaning I want to pick the event which is going to end the soonest.
# - So how do we find the event which is going to end the soonest?
# i) brute force way would be to look at all the event's ending time and find the minimum, this is probably ok for 1 day but as we can only attend 1 event a day,
# we will end up repeating this for every day and that's why we can utilize heap(min heap to be precise) to solve the problem of finding the event with earliest ending time
# 4. There is one more house cleaning step, the event whose ending time is in the past, we no longer can attend those event
# 5. Last but very important step, Let's attend the event if any event to attend in the heap.
class Solution:
def maxEvents(self, events):
events = sorted(events, key=lambda event: event[0])
maxDay = max([end for start, end in events])
eventIndex = 0
num_events_attended = 0
minHeap = []
for day in range(1, maxDay + 1):
while eventIndex < len(events) and events[eventIndex][0] == day:
heapq.heappush(minHeap, events[eventIndex][1])
eventIndex += 1
while minHeap and minHeap[0] < day:
heapq.heappop(minHeap)
if minHeap:
heappop(minHeap)
num_events_attended += 1
return num_events_attended
|
jemis140/DSA_Practice
|
amazon-questions/maximum_number_of_events.py
|
maximum_number_of_events.py
|
py
| 2,142 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23907212429
|
#!/usr/bin/env python
# -*- coding:utf-8 -*
import pickle
import sys
import pandas as pd
from utils.change_column_names import changeToName
from utils.feature_engineering import *
from utils.labels import *
from utils.pipelines import transformation_pipeline
from utils.strategy import Strategy
input_path = sys.argv[1]
output_path = sys.argv[2]
model_file = 'model.pkl'
print('Loaded Libraries...')
tick_data = pd.read_csv(input_path)
with open(model_file, 'rb') as f:
model = pickle.load(f)
print('Loaded data and model...')
tick_data = changeToName(tick_data)
tick_data = transformation_pipeline.fit_transform(tick_data)
tick_data.drop(['Label', 'Index'], axis=1)
print('Transformed data...')
print('Building orders...')
tick_data_pred = model.predict(tick_data)
tick_data = tick_data[['Index', 'StockCode', 'TickTime', 'LatestTransactionPriceToTick',
'RollingTransPriceMeanDiff5', 'RollingTransPriceMeanDiff100', 'Label']]
tick_data['Label']=tick_data_pred
order_tick = Strategy().fit_transform(tick_data)
order_tick.to_csv(output_path, index=False)
|
TheLohia/Sultans
|
myModel_demo.py
|
myModel_demo.py
|
py
| 1,090 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6308148292
|
from halo import Halo
from codetiming import Timer
import pandas as pd
import time
import os
import requests
from mj_formatter import mailjet
from BadEmailsData import *
# Default directories where input and output data located
INPUT_DIR_NAME = 'input/'
OUTPUT_DIR_NAME = 'output/'
DV_API_KEY = '' # API KEY of Data validation
NEW_DATA = '' # Name of the data with file extension (csv, xlsx)
MJ_MainData = '' # Downloaded data from mainlist
MJ_Exclusion = '' # Downloaded data from exclusion
# Folder setting
mainlistData = INPUT_DIR_NAME + MJ_MainData
exclusionData = INPUT_DIR_NAME + MJ_Exclusion
# no need to change, this is formatted DB data
exportedData = OUTPUT_DIR_NAME + 'exported.csv'
def clean_data(current_users, NEW_DATA):
# global NEW_DATA
displayText = ''
file_name, file_extension = os.path.splitext(NEW_DATA)
file_extension = file_extension[1:]
if file_extension == 'csv':
# new_user_data = pd.read_csv(INPUT_DIR_NAME + '/' + NEW_DATA)
new_user_data = pd.read_csv(NEW_DATA)
elif file_extension == 'xls':
new_user_data = pd.read_excel(NEW_DATA, engine='xlrd')
elif file_extension == 'xlsx':
new_user_data = pd.read_excel(NEW_DATA, engine='openpyxl')
else:
return -1, "error: file_extention is not supported: " + file_extension
# check email header exist
is_no_email_header = True
email_header = None
for header in list(new_user_data.columns):
formatted_header = header.lower().strip()
if formatted_header.find("email") != -1 or formatted_header.find("e-mail"):
email_header = header
is_no_email_header = False
break
if is_no_email_header is True:
return -1, "error: no email header/column found in your file " + NEW_DATA
new_emails = new_user_data[email_header] # E-Mail or Email
new_emails = new_user_data.rename(columns={email_header: "Email"})['Email']
print("Number of users in the new file: ", len(new_emails))
displayText += "Number of users in the new file: " + \
str(len(new_emails)) + "\n"
new_emails = new_emails.str.lower()
new_emails = new_emails.str.strip()
new_emails.drop_duplicates(keep="last", inplace=True)
print("Number of users after dedup: ", len(new_emails))
displayText += "Number of users after dedup: " + \
str(len(new_emails)) + "\n"
new_emails.to_csv(file_name + "_removed_dup.csv", header=True, index=False)
"""get current existing users"""
current_users.rename(
columns={'email': 'Email', 'status': 'Status'}, inplace=True)
current_users['Email'] = current_users['Email'].str.lower()
current_users['Email'] = current_users['Email'].str.strip()
merged = current_users.merge(new_emails, how="right", indicator=True)
merged.to_csv(file_name + "compared_with_currentdb.csv",
index=False, columns=["Email", "Status"])
new_users = merged[merged['_merge'] == 'right_only']
existing_sub = merged[merged['Status'] == 'sub']
existing_sub.to_csv(file_name + "_existing.csv", index=False)
existing_unsub = merged[merged['Status'] == 'unsub']
suppressed = merged[merged['Status'] == 'excluded']
print("Number of new users: ", len(new_users), end=", ")
displayText += "Number of new users: " + str(len(new_users)) + ", "
print("along with %s existing sub, %s unsub, %s cleaned users" %
(len(existing_sub), len(existing_unsub), len(suppressed)))
displayText += "along with %s existing sub, %s unsub, %s cleaned users" % (
len(existing_sub), len(existing_unsub), len(suppressed)) + "\n"
new_users = pd.DataFrame(new_users['Email'])
new_users.to_csv(file_name + "_new_users.csv", index=False)
# pd.read_csv("bad_emails.csv")
sample_bad_emails = pd.DataFrame(data=badEmails, columns=['Domain'])
new_users['Domain'] = new_users['Email'].str.split('@').str[1]
merged = sample_bad_emails.merge(
new_users, how="right", indicator=True, on="Domain")
good_emails = merged[merged['_merge'] == 'right_only']
print("Number of user after remove blacklisted domain: ", len(good_emails))
displayText += "Number of user after remove blacklisted domain: " + \
str(len(good_emails))
good_emails = good_emails['Email']
good_emails.to_csv(file_name + "_to_hyatt.csv", index=False, header=True)
bad_emails = merged[merged['_merge'] == 'both']
bad_emails.to_csv(file_name + "_blacklisted.csv",
index=False, header=True, columns=["Email", "Domain"])
return displayText
# def getDvScore(NEW_DATA):
# result = '----------------------------------\n'
# result += 'Checking DV Score'
# file_name = os.path.splitext(NEW_DATA)[0]
# file = file_name + "_to_hyatt.csv"
# url = 'https://dv3.datavalidation.com/api/v2/user/me/list/create_upload_url/'
# params = '?name=' + file + '&email_column_index=0&has_header=0&start_validation=false'
# headers = {'Authorization': 'Bearer ' + DV_API_KEY}
# s = requests.Session()
# a = requests.adapters.HTTPAdapter(max_retries=3)
# s.mount("https://", a)
# res = s.get(url+params, headers=headers)
# upload_csv_url = res.json()
# files = {
# 'file': open(file, 'rb')
# }
# list_id = s.post(upload_csv_url, headers=headers, files=files)
# dv_result_url = 'https://dv3.datavalidation.com/api/v2/user/me/list/' + list_id.json()
# dv_result = s.get(dv_result_url, headers=headers).json()
# while dv_result['status_value'] == 'PRE_VALIDATING':
# dv_result = requests.get(dv_result_url, headers=headers).json()
# result += "Status percent complete: " + str(dv_result['status_percent_complete']) + "\n"
# time.sleep(5) # sleep 5 seconds
# try:
# def percent(count): return round((count / dv_result['subscriber_count']), 2) * 100
# result += "Done checking dv score"
# result += "The grade summary is: \n"
# for score_name, score_value in dv_result['grade_summary'].items():
# score = '%-3s : ' % (score_name) + str(percent(score_value))
# result += score + "\n"
# return result
# except:
# if (dv_result['subscriber_count'] == 0):
# result += '''
# Empty list of emails were sent for dv validation!
# Perhaps no new email to check dv?
# Program terminated
# '''
# return result
# if __name__ == "__main__":
# mj = mailjet(mainlistData, exclusionData, exportedData)
# mj.formatData()
# mj_db = pd.read_csv(exportedData)
# print("2) Crosscheck new data with Mailjet data")
# clean_data(mj_db)
|
TaPhuocHai/simple-mailjet-data-checker
|
mj_automation.py
|
mj_automation.py
|
py
| 6,995 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14542676756
|
import requests as rq
from bs4 import BeautifulSoup
import json
#-------------------------------------------
#Variables a utilizar
#-------------------------------------------
iLetras = 0 # variable para recorrer arreglo letras
aLetras=[
'a','b','c','d','e','f','g','h','i','j',
'k','l','m','n','ñ','o','p','q','r','s',
't','u','v','w','x','y','z'
]
#-----------------------------------------------
#-----------------------------------------------
def normalize(s):
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
("ü","u")
)
for a, b in replacements:
s = s.replace(a, b).replace(a.upper(), b.upper())
return s
def CargarWeb(url):
r = rq.get(url)
soup=BeautifulSoup(r.content,'html5lib')
return soup
#Funcion para realizar busqueda en la pagina
#https://www.interpatagonia.com/mapuche/diccionario.html
def BuscarPalabras(iLetras,aLetras):
#Carga la pagina y separa la seccion
web = CargarWeb("https://www.interpatagonia.com/mapuche/diccionario.html").find('section')
pal = [] #Diccionario para establecer palabra + traduccion
for i in aLetras :
pal.append({'letra':i,'palabras':[]})
#-------------------------------------------------
#Recopilacion de palabras para la primera pagina
#-------------------------------------------------
#busca todas las etiquetas Ul de la pagina
for ul in web.findAll('ul'):
#Busta todas las etiquetas li de la pagina
for li in ul.findAll('li'):
try :
palabra = li.strong.text.split(":")[0].strip().lower() # Palabra en mapugundun
letra = palabra[:1].lower() # Obtiene la primera letra
#traduccion = ''
if ( len(li.text.split(":")) > 1 ):
traduccion = li.text.split(":")[1].strip()
if(traduccion != ""):
#Se llena con los datos
for pos in pal:
if(pos['letra'] == letra):
pos['palabras'].append({'palabra':palabra,'significado':traduccion})
#print(traduccion)
#pal[letra].append([text,traduccion])
except AttributeError:
pass
return pal
#-----------------------------------------------------------------
#Recopilacion de palabras fin primera pagina
#-----------------------------------------------------------------
#Se cargan las palabras del txt que este contiene el diccionario obtenido
# del pdf del gobierno con un diccionario amplio de mapudungn
def BuscarPalabras2(iLetras,aLetras):
f = open("Documentos/Palabras.txt")
iLetras = 0 # Inicia el indice en 0
aCad = []
actual_pto = False # Bandera para detectar si existe dob punto en palabra actual
sig_pto = False # Bandera para detectar si existe dob punto en palabra siguiente
for i in f.read().split('.'):
cad = i.split('\n') #Obtiene un arreglo donde separa las palabras por los saltos de linea
#Si al momento de separar las cadenas por salto de linea este presenta
# mas de 2 posiciones en el arreglo se agrega la palabra dependiendo si esta posee ":"
#sino se agrega a la definicion anterior .
if(len(cad)>2):
#print("------------------------")
for ind in range(len(cad)):
if(cad[ind] != "\n" ):
actual = ind #Indice del actual
#Si existe siguiente ve si tiene ":" , sino concadena lo del siguiente con el actual
if ( actual+1 < len(cad) and actual > 0):
siguiente = actual+1
for letras in cad[actual]:
if(letras == ":"):
actual_pto = True
for letras in cad[siguiente]:
if(letras == ":"):
sig_pto = True
#Si existe pto coma en el actual y el siguiente se guarda actual
if(actual_pto == True and sig_pto == True):
aCad.append(cad[actual])
actual_pto = False
sig_pto = False
#Si existe pto coma en el actual y el siguiente no
# se concatena con el actual
if(actual_pto == True and sig_pto == False):
pal = cad[actual] +" "+cad[siguiente]
#print("Concatenacion: " , pal)
aCad.append(pal)
actual_pto = False
sig_pto = False
#print("-----------------------")
else:
#Se guarda las palabras que no tengas mas de 1 posicion
if(len(cad) > 1):
aCad.append(cad[1])
#--------------------------------------------------------------------------
#Parte que regulariza el diccionario en Json por orden alfabetico
#-------------------------------------------------------------------------
pal=[]
#Crea las llaves para el diccionario
for i in aLetras:
pal.append({'letra':i,'palabras':[]})
for i in range(len(aCad)) :
separados = aCad[i].split(":") # Variable que separa la cadena por ":"
if(len(separados) > 1):
palabra = separados[0].lower()
significado = separados[1]
if(significado != " "):
#Se obtiene la primera palabra para ordenar alfabeticamente
letra = normalize(palabra[:1].lower())
for pos in pal:
if(pos['letra'] == letra):
pos['palabras'].append({'palabra':palabra,'significado':significado})
#---------------------------------------------------------------------
return pal
#Funcion para realizar busqueda en la pagina
#https://www.mapuche.nl/espanol/idioma/index_idioma.htm
#Para esta pagina se le debe pasar como parametro la letra para el diccionario
#https://www.mapuche.nl/espanol/idioma/"letra".htm <- tiene esa estructura
def BuscarPalabras3(iLetras,aLetras):
pal = [] #Diccionario para establecer palabra + traduccion
for i in aLetras:
pal.append({'letra':i,'palabras':[]})
for letra in aLetras:
try:
web = CargarWeb("https://www.mapuche.nl/espanol/idioma/"+letra+".htm")
contenido = web.find("td",attrs={'width':'749'}).text.split("\n") # Obtiene la parte que contiene las palabras + traduccion
for i in contenido:
if(len(i.strip().split("-")) > 1):
palabra = i.strip().split("-")[1].strip().lower() # separa la palabra por la "-" y quita los espacios vacios
letra = normalize(palabra[:1]).lower() # obtiene la primera letra de la palabra
traduccion = i.strip().split("-")[0].strip() # separa la traduccion por la "-" y quita los espacios vacios
if(len(letra)>0):
if(traduccion != ""):
for pos in pal:
if(pos['letra'] == letra):
pos['palabras'].append({'palabra':palabra,'significado':traduccion})
except Exception as e:
pass
return pal
def BuscarRepetidos(pal,pal2):
"""
Funcion que busca los repetidos de los 2 arreglo , llenando con valores sin tener ninguno repetido
"""
palabras1 = [pos['palabras'] for pos in pal] #Obtiene el arreglo de palabras
palabras2 = [pos['palabras'] for pos in pal2] # Obtiene el arreglo de palabras
pal_final = [] #Arreglo donde se guardaran las palabras sin repetisione
for i in pal:
pal_final.append({'letra':i['letra'],'palabras':[]})
for i in range(len(palabras1)):
a_palabras1 = palabras1[i] #obtiene el arreglo para cada posicion
a_palabras2 = palabras2[i] #obtiene el arreglo para cada posicion
repetidos = False
i_pal1 = 0 #Indice de a_palabras1
i_pal2 = 0 #Indice de a_palabras2
#Si el largo es mayor a 0 continua la busqueda
if(len(a_palabras1) > 0 ):
for i in a_palabras1:
pal1 = i['palabra'] #Guarda palabra
sig1 = i['significado'] #Guarda significado
#print(sig1)
for y in a_palabras2:
pal2 = y['palabra'] #Guarda palabra
sig2 = y['significado'] #Guarda significado
#Consulta si la palabras son iguales
if(normalize(pal1.lower()) == normalize(pal2.lower())):
letra = pal1[:1].lower()
cad = ""
#Ve si tiene punto y si tiene lo elimina
if(sig1.find(".") > 0 ):
a = sig1.split(".")
cad += a[0]
else:
cad += sig1
#Ve si tiene punto y si tiene lo elimina
if(sig2.find(".") > 0):
a = sig2.split(".")
cad +=","+a[0]
else:
cad +=","+sig2
#Guarda el dato repetido
for z in pal_final:
if(z['letra'] == letra):
z['palabras'].append({'palabra':pal1,'significado':cad})
return pal_final
#Funcion que guarda los valores restantes del diccionario
def llenar(pal,dic):
existe = False
palabras1 = [pos['palabras'] for pos in dic]
palabras2 = [pos['palabras'] for pos in pal]
for i in range(len(palabras1)) :
#Si la posicion de palabras1 esta vacio se llena automaticamente
#con la de palabras2
if(len(palabras1[i]) == 0):
if(len(palabras2[i]) > 0):
palabras1[i] = palabras2[i]
else:
pos1 = palabras1[i]
pos2 = palabras2[i]
for y in pos2:
pal = y['palabra']
sig = y['significado']
for z in pos1:
pal2 = z['palabra']
if(normalize(pal.lower()) == normalize(pal2.lower())):
existe = True
break
if(existe):
#Si existe la palabra la salta
existe=False
else:
#Si no existe la guarda
palabras1[i].append({'palabra':pal,'significado':sig})
for i in range(len(dic)):
dic[i]['palabras'] = palabras1[i]
return dic
#----------------------------------------------------------------
# Proceso de guardado de las palabras en json
#-------------------------------------------------------------------
print("Obteniendo palabras .....")
#Obtiene las palabras de la primera pagina
pal = BuscarPalabras(iLetras,aLetras)
#Obtiene las palabras del txt
pal2= BuscarPalabras2(iLetras,aLetras)
#Obtiene las palabras de la segunda pagina
pal3 = BuscarPalabras3(iLetras,aLetras)
#Busca los valores repetidos
d = BuscarRepetidos(pal,pal2)
d = BuscarRepetidos(d,pal3)
#Llena con las palabras que restan
d = llenar(pal,d);d = llenar(pal2,d);d = llenar(pal3,d);
#Guarda el diccionario
with open('json/dic_inicial.json','w') as file:
json.dump(d,file,indent=4)
print("Palabras obtenidas !! ")
|
CamiloFerreira/Traductor-Esp-Mapuzungun
|
Obtener_palabras.py
|
Obtener_palabras.py
|
py
| 9,734 |
python
|
es
|
code
| 1 |
github-code
|
6
|
42510851613
|
import sys, pathlib
import pytest
sys.path.insert(0,str(pathlib.Path(__file__).parent.parent.joinpath("src").resolve()))
#import pytest
from certauth2.__main__ import main
from certauth2 import CertificateAuthority, Encoding
from certauth2.creds_store import ondiskPathStore, ondiskCredentialStore
from cryptography import x509
@pytest.fixture(params=[e for e in Encoding])
def encoding(request):
return request.param
@pytest.fixture(params=["pem", "pfx", "pkcs12"])
def root_ca_suffix(request):
return request.param
def get_ca(encoding:Encoding, root_ca_suffix:str ):
store = ondiskCredentialStore(f"./.private/{encoding.name.lower()}", encoding=encoding)
return CertificateAuthority(
f"./.private/my-ca.{root_ca_suffix}",
store=store,
)
def test_root_certificate(encoding:Encoding, root_ca_suffix:str):
ca = get_ca(encoding, root_ca_suffix)
root_creds = ca.credentials
assert root_creds.subject.rfc4514_string() == f"CN=my-ca"
assert root_creds.cert.issuer == root_creds.subject
usage:x509.Extension[x509.KeyUsage] = root_creds.cert.extensions.get_extension_for_oid(x509.OID_KEY_USAGE)
assert usage.critical is True
assert usage.value.crl_sign and usage.value.key_cert_sign
assert root_creds.cert.extensions.get_extension_for_class(x509.AuthorityKeyIdentifier).value.key_identifier == root_creds.cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier).value.key_identifier
pass
def test_load_creds(encoding:Encoding):
ca = get_ca(encoding, "pem")
creds = ca.load_creds("example.com", overwrite=True)
cp = ca["example.com"]
assert creds.cert == cp.cert
cp = ca[{"host":"example.com"}]
assert creds.cert == cp.cert
assert "example.com" in creds.cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value.get_values_for_type(x509.DNSName)
assert x509.OID_SERVER_AUTH in creds.cert.extensions.get_extension_for_class(x509.ExtendedKeyUsage).value
assert x509.OID_CLIENT_AUTH in creds.cert.extensions.get_extension_for_class(x509.ExtendedKeyUsage).value
if __name__ == "__main__":
test_root_certificate(Encoding.DER)
test_load_creds()
|
jose-pr/pypki
|
tests/test_certauth2.py
|
test_certauth2.py
|
py
| 2,188 |
python
|
en
|
code
| 2 |
github-code
|
6
|
35848910596
|
from fastapi import Depends, FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from sqlalchemy.orm import Session
import crud
import models
import schemas
from db_handler import SessionLocal, engine
models.Base.metadata.create_all(bind=engine)
app = FastAPI(
title="FDFC Server",
version="1.0.0"
)
origins = [
"http://localhost",
"http://localhost:3000",
"http://localhost:8000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
@app.post("/login", response_model=schemas.UserInfoResponse)
def post_login(request: schemas.UserRequest, db: Session=Depends(get_db)):
user = crud.get_user(
db=db,
username=request.username
)
return user
@app.post("/register", response_model=schemas.UserInfoResponse)
def post_register(request: schemas.UserRequest, db: Session=Depends(get_db)):
response = crud.add_user(
db=db,
username=request.username,
password=request.password
)
return response
@app.put("/additional-info", response_model=schemas.UserInfoResponse)
def put_additional_info(request: schemas.AdditionalInfoRequest, db: Session=Depends(get_db)):
response = crud.set_additional_info(
db=db,
id=request.id,
civil_status=request.civil_status,
occupation=request.occupation
)
return response
@app.put("/contact-info", response_model=schemas.UserInfoResponse)
def put_contact_info(request: schemas.ContactInfoRequest, db: Session=Depends(get_db)):
response = crud.set_contact_info(
db=db,
id=request.id,
mobile=request.mobile,
landline=request.landline,
email_address=request.email_address
)
return response
@app.put("/location-info", response_model=schemas.UserInfoResponse)
def put_location_info(request: schemas.LocationInfoRequest, db: Session=Depends(get_db)):
response = crud.set_location_info(
db=db,
id=request.id,
address_permanent=request.address_permanent,
address_temporary=request.address_temporary
)
return response
|
chris-vill/fdfc-server
|
main.py
|
main.py
|
py
| 2,151 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9624256552
|
import csv
import subprocess
from math import ceil
import os
from amazonscraper.client import Client
from aliexpress.client import AliexpressClient
from db import AmazonProduct, AliexpressProduct, EbayProduct, ProductDetail, create_session
def amazon_transformer(product: dict) -> dict:
arg_transform = {
"prices_per_unit": "price_per_unit",
"units": "units",
"description": "description",
}
optional_args = {
arg_transform[x]: product[x]
for x in ["prices_per_unit", "units", "description"]
if x in product.keys() and product[x] is not float("nan")
}
transformed = {
"AmazonProduct": {"name": product["title"], "ASIN": product["asin"]},
"ProductDetail": {
"rating": product["rating"],
"review_numbers": product["review_nb"],
"price": product["prices_main"],
"price_per_unit": product["prices_per_unit"],
"units": product["units"],
"image_url": product["img"],
"url": product["url"],
},
}
transformed["ProductDetail"].update(optional_args)
return transformed
def aliexpress_transformer(product: dict) -> dict:
transformed = {
"AliexpressProduct": {"name": product["title"], "ID": product["id"]},
"ProductDetail": {
"rating": '',
"review_numbers": '',
"price": product["price_current"],
"price_per_unit": '',
"units": '',
"image_url": product["image"],
"url": '',
"description": product['title'],
},
}
return transformed
def ebay_scraper(keywords, filename, limit=999999, proxy_limit=10):
env = os.environ.copy()
env["EBAY_PROXY_LIMIT"] = str(proxy_limit)
env["EBAY_LIMIT"] = str(limit)
with subprocess.Popen(
[
"scrapy",
"crawl",
"ebay",
"-o",
"../{}".format(filename),
"-a",
'search={}'.format(keywords),
],
cwd="scraping-ebay",
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
env=env,
) as p, open('ebay_scraper_logs.txt', 'w+') as logfile:
while p.poll() is None:
output = p.stdout.readline().decode()
print(output.strip())
logfile.write(output)
return filename
def ebay_transformer(product: dict) -> dict:
transformed = {
"EbayProduct": {
"name": product["Name"],
"ebay_id": product["Link"].split("/")[-1].split("?")[0],
},
"ProductDetail": {
"price": product.get("Price", ""),
"location": product.get("Location", ""),
"description": product.get("Product Details", ""),
"url": product.get("Link", ""),
"image_url": product.get("Image", ""),
"seller": product.get("Seller", ""),
"seller_url": product.get("Seller Profile", ""),
}
}
transformed["ProductDetail"] = {
k: v
for k, v in transformed["ProductDetail"].items()
if v != ""
}
return transformed
def tp_scraper(keywords, filename, limit=999999, proxy_limit=10):
env = os.environ.copy()
env["TP_PROXY_LIMIT"] = str(proxy_limit)
env["TP_LIMIT"] = str(limit)
with subprocess.Popen(
[
"scrapy",
"crawl",
"tp",
"-o",
"../{}".format(filename),
"-a",
'search={}'.format(keywords),
],
cwd="tp-scraper",
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
env=env,
) as p, open('tp_scraper_logs.txt', 'w+') as logfile:
while p.poll() is None:
output = p.stdout.readline().decode()
print(output.strip())
logfile.write(output)
return filename
def store_ebay_results(results, search, session=None):
product_ids = []
with open(results, 'r') as results_file:
csvreader = csv.reader(results_file)
headers = []
for row in csvreader:
if len(headers) == 0:
headers = row
else:
product = {
headers[i]: row[i]
for i in range(len(headers))
if row[i] != ""
}
transformed_product = ebay_transformer(product)
ebay_product = EbayProduct(**transformed_product["EbayProduct"])
session.add(ebay_product)
session.flush()
product_detail = ProductDetail(
product=ebay_product.id, search=search.id, **transformed_product["ProductDetail"]
)
session.add(product_detail)
session.flush()
product_ids.append(ebay_product.id)
session.commit()
return product_ids
def store_amazon_results(results, search, session=None):
if not session:
session = create_session()
product_ids = []
for result in results:
transformed_result = amazon_transformer(result.product)
product = AmazonProduct(**transformed_result["AmazonProduct"])
session.add(product)
session.flush()
product_ids.append(product.id)
product_detail = ProductDetail(
product=product.id, search=search.id, **transformed_result["ProductDetail"]
)
session.add(product_detail)
session.commit()
return product_ids
def store_aliexpress_results(results, search, session=None):
if not session:
session = create_session()
product_ids = []
for result in results:
transformed_result = aliexpress_transformer(result.product)
product = AliexpressProduct(**transformed_result["AliexpressProduct"])
session.add(product)
session.flush()
product_ids.append(product.id)
product_detail = ProductDetail(
product=product.id, search=search.id, **transformed_result["ProductDetail"]
)
session.add(product_detail)
session.commit()
return product_ids
def store_tp_results(results, search, session=None):
pass
def update_null_product_details(proxy_limit=10, session=None):
if not session:
session = create_session()
null_product_details = (
session.query(ProductDetail)
.filter(ProductDetail.description == None)
.all()
)
products = (
session.query(AmazonProduct)
.filter(AmazonProduct.id.in_([x.product for x in null_product_details]))
.all()
)
update_product_details(session, products, null_product_details, proxy_limit=proxy_limit)
def update_product_details(session, products, product_details, amazon_client=None, proxy_limit=10):
# where products is a SQLALchemy query result
if amazon_client is None:
amazon_client = Client(proxy_limit=proxy_limit)
block_size = 5
p_map = {
x.id: x
for x in products
}
pd_map = {
x.product: x
for x in product_details
}
combined_products = []
for x in pd_map.keys():
combined_products.append({
'name': p_map[x].name,
'ASIN': p_map[x].ASIN,
'url': pd_map[x].url
})
pd_asin_map = {
p_map[x.product].ASIN: x
for x in product_details
}
for i in range(int(ceil(len(combined_products) / block_size))):
end_i = (i + 1) * block_size
_products = combined_products[
i * block_size: end_i if end_i < len(products) else len(products)
]
product_details = amazon_client._get_product_details(_products)
for ASIN, details in product_details.items():
if "description" in details.keys():
pd_asin_map[ASIN].description = details['description']
session.commit()
def get_product_and_pds_from_ids(product_ids, session=None):
if not session:
session = create_session()
product_details = (
session.query(ProductDetail)
.filter(ProductDetail.product.in_(product_ids))
.all()
)
products = (
session.query(AmazonProduct)
.filter(AmazonProduct.id.in_([x.product for x in product_details]))
.all()
)
return {
'products': products,
'product_details': product_details,
}
|
Solda1219/aliexpress-proxy-change-scrape-request
|
s3scraper/utils.py
|
utils.py
|
py
| 8,491 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22550276830
|
#!/usr/bin/env python3
import argparse
import logging
import rdflib
import rdflib_util as ru
import re
import sys
# Implementation of "list study group members" query directly in Python using
# rdflib API calls.
# ------------------------------------------------------
# main()
# ------------------------------------------------------
def main():
# input
parser = argparse.ArgumentParser(description='List subjects in a given DATS Dataset and StudyGroup.')
parser.add_argument('--dats_file', help ='Path to TOPMed or GTEx DATS JSON file.')
parser.add_argument('--dataset_id', required=False, help ='DATS identifier of the Dataset linked to the StudyGroup of interest.')
parser.add_argument('--study_group_name', required=False, help ='DATS identifier of the StudyGroup of interest.')
args = parser.parse_args()
# logging
logging.basicConfig(level=logging.INFO)
# parse JSON LD
g = ru.read_json_ld_graph(args.dats_file)
# obo:IAO_0000100 - "data set"
# obo:IAO_0000577 - "centrally registered identifier symbol"
# obo:RO_0003001 - "produced by"
# obo:OBI_0000066 - "investigation"
# obo:BFO_0000051 - "has part"
# obo:STATO_0000193 - "study group population"
# obo:RO_0002351 - "has member"
# obo:IAO_0000590 - "a textual entity that denotes a particular in reality"
# obo:BFO_0000040 - "material entity"
# SELECT ?dbgap_study_acc ?study_group_name ?subject_name
# WHERE {
# ----> ?dataset a obo:IAO_0000100.
# ----> ?dataset obo:IAO_0000577 ?dataset_id.
# ----> ?dataset_id sdo:identifier ?dbgap_study_acc.
# ?dataset obo:RO_0003001 ?study.
# ?study a obo:OBI_0000066.
# ?study obo:BFO_0000051 ?study_group.
# ?study_group a obo:STATO_0000193.
# ?study_group obo:IAO_0000590 ?study_group_name.
# ?study_group obo:RO_0002351 ?subject.
# ?subject a obo:BFO_0000040.
# ?subject obo:IAO_0000590 ?subject_name.
# }
# ORDER BY ?dbgap_study_acc ?study_group_name ?subject_name
# find ALL Datasets, retain those with a DATS identifier
all_datasets = [s for (s,p,o) in g.triples((None, None, ru.DATS_DATASET_TERM))]
dataset_ids = {}
datasets = []
for d in all_datasets:
for (s,p,o) in g.triples((d, ru.CENTRAL_ID_TERM, None)):
for (s2,p2,o2) in g.triples((o, ru.SDO_IDENT_TERM, None)):
dataset_ids[d] = o2
if d in dataset_ids:
datasets.append(d)
# filter datasets by id if one was specified
datasets = [d for d in datasets if (args.dataset_id is None) or (rdflib.term.Literal(args.dataset_id) == dataset_ids[d])]
# SELECT ?dbgap_study_acc ?study_group_name ?subject_name
# WHERE {
# ?dataset a obo:IAO_0000100.
# ?dataset obo:IAO_0000577 ?dataset_id.
# ?dataset_id sdo:identifier ?dbgap_study_acc.
# ----> ?dataset obo:RO_0003001 ?study.
# ----> ?study a obo:OBI_0000066.
# ?study obo:BFO_0000051 ?study_group.
# ?study_group a obo:STATO_0000193.
# ?study_group obo:IAO_0000590 ?study_group_name.
# ?study_group obo:RO_0002351 ?subject.
# ?subject a obo:BFO_0000040.
# ?subject obo:IAO_0000590 ?subject_name.
# }
# ORDER BY ?dbgap_study_acc ?study_group_name ?subject_name
# link each Dataset to Study (should be 1-1)
ds_to_study = {}
for d in datasets:
for (s,p,o) in g.triples((d, ru.PRODUCED_BY_TERM, None)):
for (s2,p2,o2) in g.triples((o, ru.RDF_TYPE_TERM, ru.DATS_STUDY_TERM)):
ds_to_study[d] = o
# filter Datasets not linked to a study
datasets = [d for d in datasets if d in ds_to_study]
# SELECT ?dbgap_study_acc ?study_group_name ?subject_name
# WHERE {
# ?dataset a obo:IAO_0000100.
# ?dataset obo:IAO_0000577 ?dataset_id.
# ?dataset_id sdo:identifier ?dbgap_study_acc.
# ?dataset obo:RO_0003001 ?study.
# ?study a obo:OBI_0000066.
# ----> ?study obo:BFO_0000051 ?study_group.
# ----> ?study_group a obo:STATO_0000193.
# ----> ?study_group obo:IAO_0000590 ?study_group_name.
# ?study_group obo:RO_0002351 ?subject.
# ?subject a obo:BFO_0000040.
# ?subject obo:IAO_0000590 ?subject_name.
# }
# ORDER BY ?dbgap_study_acc ?study_group_name ?subject_name
# link each Study to StudyGroup (1-many) and get StudyGroup name
study_to_groups = {}
study_group_to_name = {}
for s in ds_to_study.values():
groups = []
for (s,p,o) in g.triples((s, ru.HAS_PART_TERM, None)):
for (s2,p2,o2) in g.triples((o, ru.RDF_TYPE_TERM, ru.DATS_STUDY_GROUP_TERM)):
# get name
n_names = 0
for (s3,p3,o3) in g.triples((o, ru.NAME_TERM, None)):
study_group_to_name[o] = o3
n_names += 1
if n_names == 1:
groups.append(o)
# filter study groups by name if one was specified
groups = [g for g in groups if (args.study_group_name is None) or (rdflib.term.Literal(args.study_group_name) == study_group_to_name[g])]
study_to_groups[s] = groups
# SELECT ?dbgap_study_acc ?study_group_name ?subject_name
# WHERE {
# ?dataset a obo:IAO_0000100.
# ?dataset obo:IAO_0000577 ?dataset_id.
# ?dataset_id sdo:identifier ?dbgap_study_acc.
# ?dataset obo:RO_0003001 ?study.
# ?study a obo:OBI_0000066.
# ?study obo:BFO_0000051 ?study_group.
# ?study_group a obo:STATO_0000193.
# ?study_group obo:IAO_0000590 ?study_group_name.
# ----> ?study_group obo:RO_0002351 ?subject.
# ----> ?subject a obo:BFO_0000040.
# ----> ?subject obo:IAO_0000590 ?subject_name.
# }
# ORDER BY ?dbgap_study_acc ?study_group_name ?subject_name
# find subjects in each study group and retrieve their names
study_group_to_subjects = {}
subject_to_name = {}
for sg in study_group_to_name.keys():
subjects = []
for (s,p,o) in g.triples((sg, ru.HAS_MEMBER_TERM, None)):
for (s2,p2,o2) in g.triples((o, ru.RDF_TYPE_TERM, ru.DATS_MATERIAL_TERM)):
for (s3,p3,o3) in g.triples((o, ru.NAME_TERM, None)):
subject_to_name[o] = o3
subjects.append(o)
study_group_to_subjects[sg] = subjects
# SELECT ?dbgap_study_acc ?study_group_name ?subject_name
# WHERE {
# ?dataset a obo:IAO_0000100.
# ?dataset obo:IAO_0000577 ?dataset_id.
# ?dataset_id sdo:identifier ?dbgap_study_acc.
# ?dataset obo:RO_0003001 ?study.
# ?study a obo:OBI_0000066.
# ?study obo:BFO_0000051 ?study_group.
# ?study_group a obo:STATO_0000193.
# ?study_group obo:IAO_0000590 ?study_group_name.
# ?study_group obo:RO_0002351 ?subject.
# ?subject a obo:BFO_0000040.
# ?subject obo:IAO_0000590 ?subject_name.
# }
# ----> ORDER BY ?dbgap_study_acc ?study_group_name ?subject_name
print()
print("StudyGroup members:")
print()
print("dbGaP Study\tStudy Group\tSubject ID")
# sort datasets
datasets.sort(key=lambda x: dataset_ids[x])
for d in datasets:
dataset_id = dataset_ids[d]
study = ds_to_study[d]
groups = study_to_groups[study]
# sort study groups
groups.sort(key=lambda x: study_group_to_name[x])
for g in groups:
group_name = study_group_to_name[g]
subjects = study_group_to_subjects[g]
# sort subjects
subjects.sort(key=lambda x: subject_to_name[x])
for s in subjects:
subject_name = subject_to_name[s]
print("%s\t%s\t%s" % (dataset_id, group_name, subject_name))
print()
if __name__ == '__main__':
main()
|
dcppc/crosscut-metadata
|
sparql/v0.5/rdflib_list_study_group_members.py
|
rdflib_list_study_group_members.py
|
py
| 8,716 |
python
|
en
|
code
| 7 |
github-code
|
6
|
21342569046
|
import socket
import os
import subprocess
client_socket = socket.socket()
host = "10.228.164.122" # paste your server IP address
port = 9999
client_socket.connect((host, port))
while True:
data = str(client_socket.recv(1024), "utf-8")
print(data, end=" ")
|
aniruddhamalkar/Simple-singledirection-Python3-Sockets
|
basicstringtransferclient.py
|
basicstringtransferclient.py
|
py
| 269 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26039871256
|
from __future__ import annotations
import dataclasses
import hashlib
import os.path
from collections import deque
from dataclasses import dataclass
from pathlib import PurePath
from typing import Iterable, Mapping
from pants.backend.go.util_rules import cgo, coverage
from pants.backend.go.util_rules.assembly import (
AssembleGoAssemblyFilesRequest,
FallibleAssembleGoAssemblyFilesResult,
FallibleGenerateAssemblySymabisResult,
GenerateAssemblySymabisRequest,
)
from pants.backend.go.util_rules.build_opts import GoBuildOptions
from pants.backend.go.util_rules.cgo import CGoCompileRequest, CGoCompileResult, CGoCompilerFlags
from pants.backend.go.util_rules.coverage import (
ApplyCodeCoverageRequest,
ApplyCodeCoverageResult,
BuiltGoPackageCodeCoverageMetadata,
FileCodeCoverageMetadata,
)
from pants.backend.go.util_rules.embedcfg import EmbedConfig
from pants.backend.go.util_rules.goroot import GoRoot
from pants.backend.go.util_rules.import_config import ImportConfig, ImportConfigRequest
from pants.backend.go.util_rules.sdk import GoSdkProcess, GoSdkToolIDRequest, GoSdkToolIDResult
from pants.base.glob_match_error_behavior import GlobMatchErrorBehavior
from pants.engine.engine_aware import EngineAwareParameter, EngineAwareReturnType
from pants.engine.fs import (
EMPTY_DIGEST,
AddPrefix,
CreateDigest,
Digest,
DigestEntries,
DigestSubset,
FileContent,
FileEntry,
MergeDigests,
PathGlobs,
)
from pants.engine.process import FallibleProcessResult, Process, ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.resources import read_resource
from pants.util.strutil import path_safe
class BuildGoPackageRequest(EngineAwareParameter):
def __init__(
self,
*,
import_path: str,
pkg_name: str,
digest: Digest,
dir_path: str,
build_opts: GoBuildOptions,
go_files: tuple[str, ...],
s_files: tuple[str, ...],
direct_dependencies: tuple[BuildGoPackageRequest, ...],
import_map: Mapping[str, str] | None = None,
minimum_go_version: str | None,
for_tests: bool = False,
embed_config: EmbedConfig | None = None,
with_coverage: bool = False,
cgo_files: tuple[str, ...] = (),
cgo_flags: CGoCompilerFlags | None = None,
c_files: tuple[str, ...] = (),
header_files: tuple[str, ...] = (),
cxx_files: tuple[str, ...] = (),
objc_files: tuple[str, ...] = (),
fortran_files: tuple[str, ...] = (),
prebuilt_object_files: tuple[str, ...] = (),
pkg_specific_compiler_flags: tuple[str, ...] = (),
pkg_specific_assembler_flags: tuple[str, ...] = (),
is_stdlib: bool = False,
) -> None:
"""Build a package and its dependencies as `__pkg__.a` files.
Instances of this class form a structure-shared DAG, and so a hashcode is pre-computed for
the recursive portion.
"""
if with_coverage and build_opts.coverage_config is None:
raise ValueError(
"BuildGoPackageRequest.with_coverage is set but BuildGoPackageRequest.build_opts.coverage_config is None!"
)
self.import_path = import_path
self.pkg_name = pkg_name
self.digest = digest
self.dir_path = dir_path
self.build_opts = build_opts
self.go_files = go_files
self.s_files = s_files
self.direct_dependencies = direct_dependencies
self.import_map = FrozenDict(import_map or {})
self.minimum_go_version = minimum_go_version
self.for_tests = for_tests
self.embed_config = embed_config
self.with_coverage = with_coverage
self.cgo_files = cgo_files
self.cgo_flags = cgo_flags
self.c_files = c_files
self.header_files = header_files
self.cxx_files = cxx_files
self.objc_files = objc_files
self.fortran_files = fortran_files
self.prebuilt_object_files = prebuilt_object_files
self.pkg_specific_compiler_flags = pkg_specific_compiler_flags
self.pkg_specific_assembler_flags = pkg_specific_assembler_flags
self.is_stdlib = is_stdlib
self._hashcode = hash(
(
self.import_path,
self.pkg_name,
self.digest,
self.dir_path,
self.build_opts,
self.go_files,
self.s_files,
self.direct_dependencies,
self.import_map,
self.minimum_go_version,
self.for_tests,
self.embed_config,
self.with_coverage,
self.cgo_files,
self.cgo_flags,
self.c_files,
self.header_files,
self.cxx_files,
self.objc_files,
self.fortran_files,
self.prebuilt_object_files,
self.pkg_specific_compiler_flags,
self.pkg_specific_assembler_flags,
self.is_stdlib,
)
)
def __repr__(self) -> str:
# NB: We must override the default `__repr__` so that `direct_dependencies` does not
# traverse into transitive dependencies, which was pathologically slow.
return (
f"{self.__class__}("
f"import_path={repr(self.import_path)}, "
f"pkg_name={self.pkg_name}, "
f"digest={self.digest}, "
f"dir_path={self.dir_path}, "
f"build_opts={self.build_opts}, "
f"go_files={self.go_files}, "
f"s_files={self.s_files}, "
f"direct_dependencies={[dep.import_path for dep in self.direct_dependencies]}, "
f"import_map={self.import_map}, "
f"minimum_go_version={self.minimum_go_version}, "
f"for_tests={self.for_tests}, "
f"embed_config={self.embed_config}, "
f"with_coverage={self.with_coverage}, "
f"cgo_files={self.cgo_files}, "
f"cgo_flags={self.cgo_flags}, "
f"c_files={self.c_files}, "
f"header_files={self.header_files}, "
f"cxx_files={self.cxx_files}, "
f"objc_files={self.objc_files}, "
f"fortran_files={self.fortran_files}, "
f"prebuilt_object_files={self.prebuilt_object_files}, "
f"pkg_specific_compiler_flags={self.pkg_specific_compiler_flags}, "
f"pkg_specific_assembler_flags={self.pkg_specific_assembler_flags}, "
f"is_stdlib={self.is_stdlib}"
")"
)
def __hash__(self) -> int:
return self._hashcode
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self._hashcode == other._hashcode
and self.import_path == other.import_path
and self.pkg_name == other.pkg_name
and self.digest == other.digest
and self.dir_path == other.dir_path
and self.build_opts == other.build_opts
and self.import_map == other.import_map
and self.go_files == other.go_files
and self.s_files == other.s_files
and self.minimum_go_version == other.minimum_go_version
and self.for_tests == other.for_tests
and self.embed_config == other.embed_config
and self.with_coverage == other.with_coverage
and self.cgo_files == other.cgo_files
and self.cgo_flags == other.cgo_flags
and self.c_files == other.c_files
and self.header_files == other.header_files
and self.cxx_files == other.cxx_files
and self.objc_files == other.objc_files
and self.fortran_files == other.fortran_files
and self.prebuilt_object_files == other.prebuilt_object_files
and self.pkg_specific_compiler_flags == other.pkg_specific_compiler_flags
and self.pkg_specific_assembler_flags == other.pkg_specific_assembler_flags
and self.is_stdlib == other.is_stdlib
# TODO: Use a recursive memoized __eq__ if this ever shows up in profiles.
and self.direct_dependencies == other.direct_dependencies
)
def debug_hint(self) -> str | None:
return self.import_path
@dataclass(frozen=True)
class FallibleBuildGoPackageRequest(EngineAwareParameter, EngineAwareReturnType):
"""Request to build a package, but fallible if determining the request metadata failed.
When creating "synthetic" packages, use `GoPackageRequest` directly. This type is only intended
for determining the package metadata of user code, which may fail to be analyzed.
"""
request: BuildGoPackageRequest | None
import_path: str
exit_code: int = 0
stderr: str | None = None
dependency_failed: bool = False
def level(self) -> LogLevel:
return (
LogLevel.ERROR if self.exit_code != 0 and not self.dependency_failed else LogLevel.DEBUG
)
def message(self) -> str:
message = self.import_path
message += (
" succeeded." if self.exit_code == 0 else f" failed (exit code {self.exit_code})."
)
if self.stderr:
message += f"\n{self.stderr}"
return message
def cacheable(self) -> bool:
# Failed compile outputs should be re-rendered in every run.
return self.exit_code == 0
@dataclass(frozen=True)
class FallibleBuiltGoPackage(EngineAwareReturnType):
"""Fallible version of `BuiltGoPackage` with error details."""
output: BuiltGoPackage | None
import_path: str
exit_code: int = 0
stdout: str | None = None
stderr: str | None = None
dependency_failed: bool = False
def level(self) -> LogLevel:
return (
LogLevel.ERROR if self.exit_code != 0 and not self.dependency_failed else LogLevel.DEBUG
)
def message(self) -> str:
message = self.import_path
message += (
" succeeded." if self.exit_code == 0 else f" failed (exit code {self.exit_code})."
)
if self.stdout:
message += f"\n{self.stdout}"
if self.stderr:
message += f"\n{self.stderr}"
return message
def cacheable(self) -> bool:
# Failed compile outputs should be re-rendered in every run.
return self.exit_code == 0
@dataclass(frozen=True)
class BuiltGoPackage:
"""A package and its dependencies compiled as `__pkg__.a` files.
The packages are arranged into `__pkgs__/{path_safe(import_path)}/__pkg__.a`.
"""
digest: Digest
import_paths_to_pkg_a_files: FrozenDict[str, str]
coverage_metadata: BuiltGoPackageCodeCoverageMetadata | None = None
@dataclass(frozen=True)
class RenderEmbedConfigRequest:
embed_config: EmbedConfig | None
@dataclass(frozen=True)
class RenderedEmbedConfig:
digest: Digest
PATH = "./embedcfg"
@dataclass(frozen=True)
class GoCompileActionIdRequest:
build_request: BuildGoPackageRequest
@dataclass(frozen=True)
class GoCompileActionIdResult:
action_id: str
# TODO(#16831): Merge this rule helper and the AssemblyPostCompilationRequest.
async def _add_objects_to_archive(
input_digest: Digest,
pkg_archive_path: str,
obj_file_paths: Iterable[str],
) -> ProcessResult:
# Use `go tool asm` tool ID since `go tool pack` does not have a version argument.
asm_tool_id = await Get(GoSdkToolIDResult, GoSdkToolIDRequest("asm"))
pack_result = await Get(
ProcessResult,
GoSdkProcess(
input_digest=input_digest,
command=(
"tool",
"pack",
"r",
pkg_archive_path,
*obj_file_paths,
),
env={
"__PANTS_GO_ASM_TOOL_ID": asm_tool_id.tool_id,
},
description="Link objects to Go package archive",
output_files=(pkg_archive_path,),
),
)
return pack_result
@dataclass(frozen=True)
class SetupAsmCheckBinary:
digest: Digest
path: str
# Due to the bootstrap problem, the asm check binary cannot use the `LoadedGoBinaryRequest` rules since
# those rules call back into this `build_pkg` package. Instead, just invoke `go build` directly which is fine
# since the asm check binary only uses the standard library.
@rule
async def setup_golang_asm_check_binary() -> SetupAsmCheckBinary:
src_file = "asm_check.go"
content = read_resource("pants.backend.go.go_sources.asm_check", src_file)
if not content:
raise AssertionError(f"Unable to find resource for `{src_file}`.")
sources_digest = await Get(Digest, CreateDigest([FileContent(src_file, content)]))
binary_name = "__go_asm_check__"
compile_result = await Get(
ProcessResult,
GoSdkProcess(
command=("build", "-o", binary_name, src_file),
input_digest=sources_digest,
output_files=(binary_name,),
env={"CGO_ENABLED": "0"},
description="Build Go assembly check binary",
),
)
return SetupAsmCheckBinary(compile_result.output_digest, f"./{binary_name}")
# Check whether the given files looks like they could be Golang-format assembly language files.
@dataclass(frozen=True)
class CheckForGolangAssemblyRequest:
digest: Digest
dir_path: str
s_files: tuple[str, ...]
@dataclass(frozen=True)
class CheckForGolangAssemblyResult:
maybe_golang_assembly: bool
@rule
async def check_for_golang_assembly(
request: CheckForGolangAssemblyRequest,
asm_check_setup: SetupAsmCheckBinary,
) -> CheckForGolangAssemblyResult:
"""Return true if any of the given `s_files` look like it could be a Golang-format assembly
language file.
This is used by the cgo rules as a heuristic to determine if the user is passing Golang assembly
format instead of gcc assembly format.
"""
input_digest = await Get(Digest, MergeDigests([request.digest, asm_check_setup.digest]))
result = await Get(
ProcessResult,
Process(
argv=(
asm_check_setup.path,
*(os.path.join(request.dir_path, s_file) for s_file in request.s_files),
),
input_digest=input_digest,
level=LogLevel.DEBUG,
description="Check whether assembly language sources are in Go format",
),
)
return CheckForGolangAssemblyResult(len(result.stdout) > 0)
# Copy header files to names which use platform independent names. For example, defs_linux_amd64.h
# becomes defs_GOOS_GOARCH.h.
#
# See https://github.com/golang/go/blob/1c05968c9a5d6432fc6f30196528f8f37287dd3d/src/cmd/go/internal/work/exec.go#L867-L892
# for particulars.
async def _maybe_copy_headers_to_platform_independent_names(
input_digest: Digest,
dir_path: str,
header_files: tuple[str, ...],
goroot: GoRoot,
) -> Digest | None:
goos_goarch = f"_{goroot.goos}_{goroot.goarch}"
goos = f"_{goroot.goos}"
goarch = f"_{goroot.goarch}"
digest_entries = await Get(DigestEntries, Digest, input_digest)
digest_entries_by_path: dict[str, FileEntry] = {
entry.path: entry for entry in digest_entries if isinstance(entry, FileEntry)
}
new_digest_entries: list[FileEntry] = []
for header_file in header_files:
header_file_path = PurePath(dir_path, header_file)
entry = digest_entries_by_path.get(str(header_file_path))
if not entry:
continue
stem = header_file_path.stem
new_stem: str | None = None
if stem.endswith(goos_goarch):
new_stem = stem[0 : -len(goos_goarch)] + "_GOOS_GOARCH"
elif stem.endswith(goos):
new_stem = stem[0 : -len(goos)] + "_GOOS"
elif stem.endswith(goarch):
new_stem = stem[0 : -len(goarch)] + "_GOARCH"
if new_stem:
new_header_file_path = PurePath(dir_path, f"{new_stem}{header_file_path.suffix}")
new_digest_entries.append(dataclasses.replace(entry, path=str(new_header_file_path)))
if new_digest_entries:
digest = await Get(Digest, CreateDigest(new_digest_entries))
return digest
else:
return None
# Gather transitive prebuilt object files for Cgo. Traverse the provided dependencies and lifts `.syso`
# object files into a single `Digest`.
async def _gather_transitive_prebuilt_object_files(
build_request: BuildGoPackageRequest,
) -> tuple[Digest, frozenset[str]]:
prebuilt_objects: list[tuple[Digest, list[str]]] = []
queue: deque[BuildGoPackageRequest] = deque([build_request])
while queue:
pkg = queue.popleft()
queue.extend(pkg.direct_dependencies)
if pkg.prebuilt_object_files:
prebuilt_objects.append(
(
pkg.digest,
[
os.path.join(pkg.dir_path, obj_file)
for obj_file in pkg.prebuilt_object_files
],
)
)
object_digest = await Get(Digest, MergeDigests([digest for digest, _ in prebuilt_objects]))
object_files = set()
for _, files in prebuilt_objects:
object_files.update(files)
return object_digest, frozenset(object_files)
# NB: We must have a description for the streaming of this rule to work properly
# (triggered by `FallibleBuiltGoPackage` subclassing `EngineAwareReturnType`).
@rule(desc="Compile with Go", level=LogLevel.DEBUG)
async def build_go_package(
request: BuildGoPackageRequest, go_root: GoRoot
) -> FallibleBuiltGoPackage:
maybe_built_deps = await MultiGet(
Get(FallibleBuiltGoPackage, BuildGoPackageRequest, build_request)
for build_request in request.direct_dependencies
)
import_paths_to_pkg_a_files: dict[str, str] = {}
dep_digests = []
for maybe_dep in maybe_built_deps:
if maybe_dep.output is None:
return dataclasses.replace(
maybe_dep, import_path=request.import_path, dependency_failed=True
)
dep = maybe_dep.output
for dep_import_path, pkg_archive_path in dep.import_paths_to_pkg_a_files.items():
if dep_import_path not in import_paths_to_pkg_a_files:
import_paths_to_pkg_a_files[dep_import_path] = pkg_archive_path
dep_digests.append(dep.digest)
merged_deps_digest, import_config, embedcfg, action_id_result = await MultiGet(
Get(Digest, MergeDigests(dep_digests)),
Get(
ImportConfig,
ImportConfigRequest(
FrozenDict(import_paths_to_pkg_a_files),
build_opts=request.build_opts,
import_map=request.import_map,
),
),
Get(RenderedEmbedConfig, RenderEmbedConfigRequest(request.embed_config)),
Get(GoCompileActionIdResult, GoCompileActionIdRequest(request)),
)
unmerged_input_digests = [
merged_deps_digest,
import_config.digest,
embedcfg.digest,
request.digest,
]
# If coverage is enabled for this package, then replace the Go source files with versions modified to
# contain coverage code.
go_files = request.go_files
cgo_files = request.cgo_files
s_files = list(request.s_files)
go_files_digest = request.digest
cover_file_metadatas: tuple[FileCodeCoverageMetadata, ...] | None = None
if request.with_coverage:
coverage_config = request.build_opts.coverage_config
assert coverage_config is not None, "with_coverage=True but coverage_config is None!"
coverage_result = await Get(
ApplyCodeCoverageResult,
ApplyCodeCoverageRequest(
digest=request.digest,
dir_path=request.dir_path,
go_files=go_files,
cgo_files=cgo_files,
cover_mode=coverage_config.cover_mode,
import_path=request.import_path,
),
)
go_files_digest = coverage_result.digest
unmerged_input_digests.append(go_files_digest)
go_files = coverage_result.go_files
cgo_files = coverage_result.cgo_files
cover_file_metadatas = coverage_result.cover_file_metadatas
# Track loose object files to link into final package archive. These can come from Cgo outputs, regular
# assembly files, or regular C files.
objects: list[tuple[str, Digest]] = []
# Add any prebuilt object files (".syso" extension) to the list of objects to link into the package.
if request.prebuilt_object_files:
objects.extend(
(os.path.join(request.dir_path, prebuilt_object_file), request.digest)
for prebuilt_object_file in request.prebuilt_object_files
)
# Process any Cgo files.
cgo_compile_result: CGoCompileResult | None = None
if cgo_files:
# Check if any assembly files contain gcc assembly, and not Go assembly. Raise an exception if any are
# likely in Go format since in cgo packages, assembly files are passed to gcc and must be in gcc format.
#
# Exception: When building runtime/cgo itself, only send `gcc_*.s` assembly files to GCC as
# runtime/cgo has both types of files.
if request.is_stdlib and request.import_path == "runtime/cgo":
gcc_s_files = []
new_s_files = []
for s_file in s_files:
if s_file.startswith("gcc_"):
gcc_s_files.append(s_file)
else:
new_s_files.append(s_file)
s_files = new_s_files
else:
asm_check_result = await Get(
CheckForGolangAssemblyResult,
CheckForGolangAssemblyRequest(
digest=request.digest,
dir_path=request.dir_path,
s_files=tuple(s_files),
),
)
if asm_check_result.maybe_golang_assembly:
raise ValueError(
f"Package {request.import_path} is a cgo package but contains Go assembly files."
)
gcc_s_files = s_files
s_files = [] # Clear s_files since assembly has already been handled in cgo rules.
# Gather all prebuilt object files transitively and pass them to the Cgo rule for linking into the
# Cgo object output. This is necessary to avoid linking errors.
# See https://github.com/golang/go/blob/6ad27161f8d1b9c5e03fb3415977e1d3c3b11323/src/cmd/go/internal/work/exec.go#L3291-L3311.
transitive_prebuilt_object_files = await _gather_transitive_prebuilt_object_files(request)
assert request.cgo_flags is not None
cgo_compile_result = await Get(
CGoCompileResult,
CGoCompileRequest(
import_path=request.import_path,
pkg_name=request.pkg_name,
digest=go_files_digest,
build_opts=request.build_opts,
dir_path=request.dir_path,
cgo_files=cgo_files,
cgo_flags=request.cgo_flags,
c_files=request.c_files,
s_files=tuple(gcc_s_files),
cxx_files=request.cxx_files,
objc_files=request.objc_files,
fortran_files=request.fortran_files,
is_stdlib=request.is_stdlib,
transitive_prebuilt_object_files=transitive_prebuilt_object_files,
),
)
assert cgo_compile_result is not None
unmerged_input_digests.append(cgo_compile_result.digest)
objects.extend(
[
(obj_file, cgo_compile_result.digest)
for obj_file in cgo_compile_result.output_obj_files
]
)
# Copy header files with platform-specific values in their name to platform independent names.
# For example, defs_linux_amd64.h becomes defs_GOOS_GOARCH.h.
copied_headers_digest = await _maybe_copy_headers_to_platform_independent_names(
input_digest=request.digest,
dir_path=request.dir_path,
header_files=request.header_files,
goroot=go_root,
)
if copied_headers_digest:
unmerged_input_digests.append(copied_headers_digest)
# Merge all of the input digests together.
input_digest = await Get(
Digest,
MergeDigests(unmerged_input_digests),
)
# If any assembly files are present, generate a "symabis" file containing API metadata about those files.
# The "symabis" file is passed to the Go compiler when building Go code so that the compiler is aware of
# any API exported by the assembly.
#
# Note: The assembly files cannot be assembled at this point because a similar process happens from Go to
# assembly: The Go compiler generates a `go_asm.h` header file with metadata about the Go code in the package.
symabis_path: str | None = None
extra_assembler_flags = tuple(
*request.build_opts.assembler_flags, *request.pkg_specific_assembler_flags
)
if s_files:
symabis_fallible_result = await Get(
FallibleGenerateAssemblySymabisResult,
GenerateAssemblySymabisRequest(
compilation_input=input_digest,
s_files=tuple(s_files),
import_path=request.import_path,
dir_path=request.dir_path,
extra_assembler_flags=extra_assembler_flags,
),
)
symabis_result = symabis_fallible_result.result
if symabis_result is None:
return FallibleBuiltGoPackage(
None,
request.import_path,
symabis_fallible_result.exit_code,
stdout=symabis_fallible_result.stdout,
stderr=symabis_fallible_result.stderr,
)
input_digest = await Get(
Digest, MergeDigests([input_digest, symabis_result.symabis_digest])
)
symabis_path = symabis_result.symabis_path
# Build the arguments for compiling the Go coe in this package.
compile_args = [
"tool",
"compile",
"-buildid",
action_id_result.action_id,
"-o",
"__pkg__.a",
"-pack",
"-p",
request.import_path,
"-importcfg",
import_config.CONFIG_PATH,
]
# See https://github.com/golang/go/blob/f229e7031a6efb2f23241b5da000c3b3203081d6/src/cmd/go/internal/work/gc.go#L79-L100
# for where this logic comes from.
go_version = request.minimum_go_version or "1.16"
if go_root.is_compatible_version(go_version):
compile_args.extend(["-lang", f"go{go_version}"])
if request.is_stdlib:
compile_args.append("-std")
compiling_runtime = request.is_stdlib and request.import_path in (
"internal/abi",
"internal/bytealg",
"internal/coverage/rtcov",
"internal/cpu",
"internal/goarch",
"internal/goos",
"runtime",
"runtime/internal/atomic",
"runtime/internal/math",
"runtime/internal/sys",
"runtime/internal/syscall",
)
# From Go sources:
# runtime compiles with a special gc flag to check for
# memory allocations that are invalid in the runtime package,
# and to implement some special compiler pragmas.
#
# See https://github.com/golang/go/blob/245e95dfabd77f337373bf2d6bb47cd353ad8d74/src/cmd/go/internal/work/gc.go#L107-L112
if compiling_runtime:
compile_args.append("-+")
if symabis_path:
compile_args.extend(["-symabis", symabis_path])
# If any assembly files are present, request the compiler write an "assembly header" with API metadata
# about the Go code that can be used by assembly files.
asm_header_path: str | None = None
if s_files:
if os.path.isabs(request.dir_path):
asm_header_path = "go_asm.h"
else:
asm_header_path = os.path.join(request.dir_path, "go_asm.h")
compile_args.extend(["-asmhdr", asm_header_path])
if embedcfg.digest != EMPTY_DIGEST:
compile_args.extend(["-embedcfg", RenderedEmbedConfig.PATH])
if request.build_opts.with_race_detector:
compile_args.append("-race")
if request.build_opts.with_msan:
compile_args.append("-msan")
if request.build_opts.with_asan:
compile_args.append("-asan")
# If there are no loose object files to add to the package archive later or assembly files to assemble,
# then pass -complete flag which tells the compiler that the provided Go files constitute the entire package.
if not objects and not s_files:
# Exceptions: a few standard packages have forward declarations for
# pieces supplied behind-the-scenes by package runtime.
if request.import_path not in (
"bytes",
"internal/poll",
"net",
"os",
"runtime/metrics",
"runtime/pprof",
"runtime/trace",
"sync",
"syscall",
"time",
):
compile_args.append("-complete")
# Add any extra compiler flags after the ones added automatically by this rule.
if request.build_opts.compiler_flags:
compile_args.extend(request.build_opts.compiler_flags)
if request.pkg_specific_compiler_flags:
compile_args.extend(request.pkg_specific_compiler_flags)
# Remove -N if compiling runtime:
# It is not possible to build the runtime with no optimizations,
# because the compiler cannot eliminate enough write barriers.
if compiling_runtime:
compile_args = [arg for arg in compile_args if arg != "-N"]
go_file_paths = (
str(PurePath(request.dir_path, go_file)) if request.dir_path else f"./{go_file}"
for go_file in go_files
)
generated_cgo_file_paths = cgo_compile_result.output_go_files if cgo_compile_result else ()
# Put the source file paths into a file and pass that to `go tool compile` via a config file using the
# `@CONFIG_FILE` syntax. This is necessary to avoid command-line argument limits on macOS. The arguments
# may end up to exceed those limits when compiling standard library packages where we append a very long GOROOT
# path to each file name or in packages with large numbers of files.
go_source_file_paths_config = "\n".join([*go_file_paths, *generated_cgo_file_paths])
go_sources_file_paths_digest = await Get(
Digest, CreateDigest([FileContent("__sources__.txt", go_source_file_paths_config.encode())])
)
input_digest = await Get(Digest, MergeDigests([input_digest, go_sources_file_paths_digest]))
compile_args.append("@__sources__.txt")
compile_result = await Get(
FallibleProcessResult,
GoSdkProcess(
input_digest=input_digest,
command=tuple(compile_args),
description=f"Compile Go package: {request.import_path}",
output_files=("__pkg__.a", *([asm_header_path] if asm_header_path else [])),
env={"__PANTS_GO_COMPILE_ACTION_ID": action_id_result.action_id},
),
)
if compile_result.exit_code != 0:
return FallibleBuiltGoPackage(
None,
request.import_path,
compile_result.exit_code,
stdout=compile_result.stdout.decode("utf-8"),
stderr=compile_result.stderr.decode("utf-8"),
)
compilation_digest = compile_result.output_digest
# TODO: Compile any C files if this package does not use Cgo.
# If any assembly files are present, then assemble them. The `compilation_digest` will contain the
# assembly header `go_asm.h` in the object directory.
if s_files:
# Extract the `go_asm.h` header from the compilation output and merge into the original compilation input.
assert asm_header_path is not None
asm_header_digest = await Get(
Digest,
DigestSubset(
compilation_digest,
PathGlobs(
[asm_header_path],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin="the `build_go_package` rule",
),
),
)
assembly_input_digest = await Get(Digest, MergeDigests([input_digest, asm_header_digest]))
assembly_fallible_result = await Get(
FallibleAssembleGoAssemblyFilesResult,
AssembleGoAssemblyFilesRequest(
input_digest=assembly_input_digest,
s_files=tuple(sorted(s_files)),
dir_path=request.dir_path,
import_path=request.import_path,
extra_assembler_flags=extra_assembler_flags,
),
)
assembly_result = assembly_fallible_result.result
if assembly_result is None:
return FallibleBuiltGoPackage(
None,
request.import_path,
assembly_fallible_result.exit_code,
stdout=assembly_fallible_result.stdout,
stderr=assembly_fallible_result.stderr,
)
objects.extend(assembly_result.assembly_outputs)
# If there are any loose object files, link them into the package archive.
if objects:
assembly_link_input_digest = await Get(
Digest,
MergeDigests(
[
compilation_digest,
*(digest for obj_file, digest in objects),
]
),
)
assembly_link_result = await _add_objects_to_archive(
input_digest=assembly_link_input_digest,
pkg_archive_path="__pkg__.a",
obj_file_paths=sorted(obj_file for obj_file, digest in objects),
)
compilation_digest = assembly_link_result.output_digest
path_prefix = os.path.join("__pkgs__", path_safe(request.import_path))
import_paths_to_pkg_a_files[request.import_path] = os.path.join(path_prefix, "__pkg__.a")
output_digest = await Get(Digest, AddPrefix(compilation_digest, path_prefix))
merged_result_digest = await Get(Digest, MergeDigests([*dep_digests, output_digest]))
# Include the modules sources in the output `Digest` alongside the package archive if the Cgo rules
# detected a potential attempt to link against a static archive (or other reference to `${SRCDIR}` in
# options) which necessitates the linker needing access to module sources.
if cgo_compile_result and cgo_compile_result.include_module_sources_with_output:
merged_result_digest = await Get(
Digest, MergeDigests([merged_result_digest, request.digest])
)
coverage_metadata = (
BuiltGoPackageCodeCoverageMetadata(
import_path=request.import_path,
cover_file_metadatas=cover_file_metadatas,
sources_digest=request.digest,
sources_dir_path=request.dir_path,
)
if cover_file_metadatas
else None
)
output = BuiltGoPackage(
digest=merged_result_digest,
import_paths_to_pkg_a_files=FrozenDict(import_paths_to_pkg_a_files),
coverage_metadata=coverage_metadata,
)
return FallibleBuiltGoPackage(output, request.import_path)
@rule
def required_built_go_package(fallible_result: FallibleBuiltGoPackage) -> BuiltGoPackage:
if fallible_result.output is not None:
return fallible_result.output
raise Exception(
f"Failed to compile {fallible_result.import_path}:\n"
f"{fallible_result.stdout}\n{fallible_result.stderr}"
)
@rule
async def render_embed_config(request: RenderEmbedConfigRequest) -> RenderedEmbedConfig:
digest = EMPTY_DIGEST
if request.embed_config:
digest = await Get(
Digest,
CreateDigest(
[FileContent(RenderedEmbedConfig.PATH, request.embed_config.to_embedcfg())]
),
)
return RenderedEmbedConfig(digest)
# Compute a cache key for the compile action. This computation is intended to capture similar values to the
# action ID computed by the `go` tool for its own cache.
# For details, see https://github.com/golang/go/blob/21998413ad82655fef1f31316db31e23e0684b21/src/cmd/go/internal/work/exec.go#L216-L403
@rule
async def compute_compile_action_id(
request: GoCompileActionIdRequest, goroot: GoRoot
) -> GoCompileActionIdResult:
bq = request.build_request
h = hashlib.sha256()
# All Go action IDs have the full version (as returned by `runtime.Version()` in the key.
# See https://github.com/golang/go/blob/master/src/cmd/go/internal/cache/hash.go#L32-L46
h.update(goroot.full_version.encode())
h.update("compile\n".encode())
if bq.minimum_go_version:
h.update(f"go {bq.minimum_go_version}\n".encode())
h.update(f"goos {goroot.goos} goarch {goroot.goarch}\n".encode())
h.update(f"import {bq.import_path}\n".encode())
# TODO: Consider what to do with this information from Go tool:
# fmt.Fprintf(h, "omitdebug %v standard %v local %v prefix %q\n", p.Internal.OmitDebug, p.Standard, p.Internal.Local, p.Internal.LocalPrefix)
# TODO: Inject cgo-related values here.
# TODO: Inject cover mode values here.
# TODO: Inject fuzz instrumentation values here.
compile_tool_id = await Get(GoSdkToolIDResult, GoSdkToolIDRequest("compile"))
h.update(f"compile {compile_tool_id.tool_id}\n".encode())
# TODO: Add compiler flags as per `go`'s algorithm. Need to figure out
if bq.s_files:
asm_tool_id = await Get(GoSdkToolIDResult, GoSdkToolIDRequest("asm"))
h.update(f"asm {asm_tool_id.tool_id}\n".encode())
# TODO: Add asm flags as per `go`'s algorithm.
# TODO: Add micro-architecture into cache key (e.g., GOAMD64 setting).
if "GOEXPERIMENT" in goroot._raw_metadata:
h.update(f"GOEXPERIMENT={goroot._raw_metadata['GOEXPERIMENT']}".encode())
# TODO: Maybe handle go "magic" env vars: "GOCLOBBERDEADHASH", "GOSSAFUNC", "GOSSADIR", "GOSSAHASH" ?
# TODO: Handle GSHS_LOGFILE compiler debug option by breaking cache?
# Note: Input files are already part of cache key. Thus, this algorithm omits incorporating their
# content hashes into the action ID.
return GoCompileActionIdResult(h.hexdigest())
def rules():
return (
*collect_rules(),
*cgo.rules(),
*coverage.rules(),
)
|
pantsbuild/pants
|
src/python/pants/backend/go/util_rules/build_pkg.py
|
build_pkg.py
|
py
| 38,872 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
27280445528
|
from aiogram import types
from aiogram.dispatcher.filters import BoundFilter
import config
import dispatcher
class IsOwnerFilter(BoundFilter):
"""
Custom filter "is_owner".
"""
key = "is_owner"
def __init__(self, is_owner):
self.is_owner = is_owner
async def check(self, message: types.Message):
return message.from_user.id in config.BOT_OWNERS
class IsAuthFilter(BoundFilter):
"""
Custom filter "is_owner".
"""
key = "is_auth"
def __init__(self, is_auth):
self.is_owner = is_auth
async def check(self, message: types.Message):
query = 'SELECT user_id FROM auth_users WHERE user_id = %s'
args = (message.from_user.id,)
res = dispatcher.db.execute_query(query, args)
res = bool(len(res))
return res
|
YarikATM/Metall
|
tg_bot/filters.py
|
filters.py
|
py
| 819 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3372040192
|
import jieba
from os import path
import os
from wordcloud import WordCloud
def jieba_processing_txt(text, user_dict=[]):
for word in user_dict:
jieba.add_word(word)
mywordlist = []
seg_list = jieba.cut(text, cut_all=False)
liststr = "/ ".join(seg_list)
for myword in liststr.split('/'):
if len(myword.strip()) > 1:
mywordlist.append(myword)
return ' '.join(mywordlist)
def word_cloud(text, savePath, user_dict=[]):
d = path.dirname(__file__) if "__file__" in locals() else os.getcwd()
font_path = d + '/SourceHanSerifK-Light.otf'
wc = WordCloud(font_path=font_path, background_color="white", max_words=8000,
max_font_size=100, random_state=42, width=1200, height=900, margin=2,)
wc.generate(jieba_processing_txt(text, user_dict))
wc.to_file(savePath)
|
gewas/VChaCha
|
wc.py
|
wc.py
|
py
| 877 |
python
|
en
|
code
| 1 |
github-code
|
6
|
43417946445
|
import cv2
import matplotlib.pyplot as plt
import pandas as pd
img1_path = 'U14.png'
csv_path = 'colours.csv'
img2 = cv2.imread(img1_path)
img2 = cv2.resize(img2, (800, 600))
plt.figure(figsize=(20, 8))
plt.imshow(img2)
grid_RGB = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
plt.figure(figsize=(20, 8))
plt.imshow(grid_RGB)
index = ['colour', 'colour_name', 'hex', 'R', 'G', 'B']
df = pd.read_csv(csv_path, names=index, header=None)
clicked = False
r = g = b = xpos = ypos = 0
def get_color_name(R, G, B):
minimum = 1000
for i in range(len(df)):
d = abs(R - int(df.loc[i, 'R'])) + abs(G - int(df.loc[i, 'G'])) + abs(
B - int(df.loc[i, 'B']))
if d <= minimum:
minimum = d
cname = df.loc[i, 'colour_name']
return cname
def draw_function(event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDBLCLK:
global b, g, r, xpos, ypos, clicked
clicked = True
xpos = x
ypos = y
b, g, r = img2[y, x]
b = int(b)
g = int(g)
r = int(r)
cv2.namedWindow('Detection')
cv2.setMouseCallback('Detection', draw_function)
while True:
cv2.imshow('Detection', img2)
if clicked:
cv2.rectangle(img2, (20, 20), (600, 60), (b, g, r), -1)
text = get_color_name(r, g, b) + ' R =' + str(r) + ' G = ' + str(
g) + ' B = ' + str(b)
cv2.putText(img2, text, (50, 50), 2, 0.8, (0, 255, 255), 2, cv2.LINE_AA)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows()
|
AnupCloud/Color_Detection
|
color_detection.py
|
color_detection.py
|
py
| 1,604 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39932475902
|
import argparse
import requests
from tabulate import tabulate
def make_api_request(query, filters, page, pagesize):
url = 'http://localhost:3000/log/search'
data = {
'query': query,
'filters': filters,
'page': page,
'pageSize': pagesize
}
response = requests.post(url, json=data)
if response.status_code == 200:
result = response.json()
if result['success']:
pagination = result['pagination']
page = pagination['page']
total_hits = pagination['totalHits']
total_pages = pagination['totalPages']
print(
f"Search Results -> Current Page: {page} ")
if result['data']:
table_data = [{k: v for k, v in item.items()}
for item in result['data']]
print(tabulate(table_data, headers="keys", tablefmt="pretty"))
else:
print("No results found.")
print(f"total {total_hits} hits across {total_pages} pages")
else:
print(f"Error: {result.get('error', 'Unknown error')}")
else:
print(f"Error: {response.status_code}")
print(response.text)
def main():
parser = argparse.ArgumentParser(
description='Make API request to http://localhost:3000/log/search')
parser.add_argument('--query', type=str, default="", help='Search query')
parser.add_argument('--filters', nargs='+', default=[],
help='Additional filters')
parser.add_argument('--page', type=int, default=1, help='Current Page')
parser.add_argument('--pagesize', type=int, default=10, help='Page Size')
args = parser.parse_args()
query = args.query
page = args.page
pagesize = args.pagesize
filters_dict = {}
for filter_arg in args.filters:
key, value = filter_arg.split('=')
filters_dict[key] = value
make_api_request(query, filters_dict, page, pagesize)
if __name__ == "__main__":
main()
|
harikrishnanum/LogQube
|
cli/search.py
|
search.py
|
py
| 2,041 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22330842884
|
import numpy as np
import matplotlib, gc
import matplotlib.pyplot as plt
from tensorflow import gradients
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops, math_ops
def hessian_vector_product(ys, xs, v):
""" Multiply the Hessian of `ys` wrt `xs` by `v` """
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
]
# Second backprop
grads_with_none = gradients(elemwise_products, xs)
return_grads = [
grad_elem if grad_elem is not None \
else tf.zeros_like(x) \
for x, grad_elem in zip(xs, grads_with_none)]
return return_grads
def avg_l2_dist(orig, adv):
"""Get the mean l2 distortion between two orig and adv images"""
l2_dist = 0.0
num_ = orig.shape[0]
if num_ > 0:
for i in range(orig.shape[0]):
l2_dist+= np.linalg.norm(orig[i] - adv[i])
return l2_dist/orig.shape[0]
else:
return np.nan
def visualize(image_list, num_images, savefig=''):
"""Visualize images in a grid"""
assert(len(image_list) == num_images)
fig=plt.figure(figsize=(15,15))
columns = num_images
for i in range(1, columns+1):
img = image_list[i-1]
fig.add_subplot(1, columns, i)
if img.shape[-1] == 1:
img = np.squeeze(img)
plt.imshow(img,cmap='Greys')
else:
plt.imshow(img)
plt.axis('off')
plt.show()
fig.savefig(savefig,bbox_inches='tight')
#Normalize rows of a given matrix
def normalize(matrix):
"""Normalize each row vector in a matrix"""
matrix_nm = np.zeros_like(matrix)
for i in range(matrix.shape[0]):
norm = np.linalg.norm(matrix[i])
if norm > 0:
matrix_nm[i] = matrix[i]/np.linalg.norm(matrix[i])
return matrix_nm
def preds_to_labels(preds):
labels = np.zeros(preds.shape)
labels[np.arange(preds.shape[0]),np.argmax(preds, axis=1)] = 1
return labels
def norms_and_cos(model, data, labels, grads_train):
grads = model.get_gradients_wrt_params(data, labels)
grads_nm = normalize(grads)
norms = np.sqrt(np.dot(grads, grads.T)).diagonal()
cos_sim = np.dot(grads_nm, grads_train.T)
del grads_nm, grads
gc.collect()
return norms, cos_sim
def greater_cos(cos_sim, eta):
count = 0.0
num_ = cos_sim.shape[0]
if num_ > 0:
for i in range(num_):
if np.max(cos_sim[i]) > eta:
count+=1.0
return (count/num_)
else:
return 0.0
def smaller_norm(norms, gamma):
count=0.0
num_ = norms.shape[0]
if num_ > 0:
for i in range(num_):
if norms[i] < gamma:
count+=1.0
return (count/num_)
else:
return 0.0
def cos_and_norm_sep(cos_sim, norms, eta, gamma):
count=0.0
num_ = norms.shape[0]
if num_ > 0:
for i in range(num_):
if np.max(cos_sim[i]) > eta and norms[i] < gamma:
count+=1.0
return (count/num_)
else:
return 0.0
def comp_cos(cos_a, cos_b):
count = 0.0
num_ = cos_a.shape[0]
if num_ > 0:
for i in range(num_):
if np.max(cos_a[i]) > np.max(cos_b[i]):
count+=1.0
return (count/num_)
else:
return 0.0
def comp_norm(norm_a, norm_b):
count = 0.0
num_ = norm_a.shape[0]
if num_ > 0:
for i in range(num_):
if norm_a[i] > norm_b[i]:
count+=1.0
return (count/num_)
else:
return 0.0
def get_test_from_train_idx(a, b):
mask = np.ones_like(a,dtype=bool)
mask[b] = False
return a[mask]
def get_guide_idx(model, idx_filter=None, cos_sim=None, data_indices=None, idx=0):
idx_ = np.where(idx_filter == idx)[0][0]
max_sim_idx = np.argmax(cos_sim[idx_])
guide_img_idx = data_indices[max_sim_idx]
return guide_img_idx
|
jasjeetIM/AdversarialDetector
|
models/util.py
|
util.py
|
py
| 4,254 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31165098736
|
from torch.utils.data import Dataset, DataLoader
from albumentations.pytorch import ToTensorV2
from augmix import RandomAugMix
from utils import in_colab
import albumentations as A
import torchvision.io as io
import pytorch_lightning as pl
import torch
import cv2
def get_default_transforms(img_size):
transform = {
'train': A.Compose([
A.HorizontalFlip(p=0.5),
A.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, p=0.5),
A.SmallestMaxSize(max_size=img_size[0], p=1),
A.RandomCrop(height=img_size[0], width=img_size[1], p=1),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
max_pixel_value=255.0,
p=1.0,
),
ToTensorV2()
]),
'inference': A.Compose([
A.SmallestMaxSize(max_size=img_size[0], p=1.0),
A.CenterCrop(height=img_size[0], width=img_size[1], p=1.0),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
max_pixel_value=255.0,
p=1.0,
),
ToTensorV2()
]),
}
return transform
class Dataset(Dataset):
def __init__(self, img_ids, targets=None, img_size=(224, 224), inference=False, tta=False):
self.img_ids = img_ids
self.targets = targets
self.tta = tta
if tta:
self.augs = get_default_transforms(img_size)['tta']
elif inference:
self.augs = get_default_transforms(img_size)['inference']
else:
self.augs = get_default_transforms(img_size)['train']
def __len__(self):
return self.img_ids.shape[0]
def __getitem__(self, i):
image = cv2.imread(self.img_ids[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if not self.tta:
image = self.augs(image=image)['image']
if self.targets is not None:
target = torch.as_tensor(self.targets[i]).float()
return {
'images': image,
'targets': target
}
else:
if self.tta:
return {f'images_{i}': self.augs[i](image=image)['image'] for i in range(len(self.augs))}
else:
return {'images': image}
class DataModule(pl.LightningDataModule):
def __init__(
self, data, img_size=(224, 224),
train_filter=None, val_filter=None,
batch_size=64, inference=False, tta=False
):
super().__init__()
self.data = data
self.img_size = img_size
self.train_filter = train_filter
self.val_filter = val_filter
self.batch_size = batch_size
self.inference = inference
if tta:
self.augs = get_default_transforms(img_size)['tta']
def setup(self, stage=None):
if not self.inference:
self.train_df = self.data.loc[self.train_filter, :]
self.val_df = self.data.loc[self.val_filter, :]
def train_dataloader(self):
img_ids = self.train_df['file_path'].values
targets = self.train_df['Pawpularity'].values
train_dset = Dataset(img_ids, targets, img_size=self.img_size)
return DataLoader(
train_dset, shuffle=True, num_workers=2 if in_colab() else 4,
pin_memory=True, batch_size=self.batch_size, drop_last=True
)
def val_dataloader(self):
img_ids = self.val_df['file_path'].values
targets = self.val_df['Pawpularity'].values
val_dset = Dataset(img_ids, targets, img_size=self.img_size, inference=True)
return DataLoader(
val_dset, shuffle=False, num_workers=2 if in_colab() else 4,
pin_memory=True, batch_size=self.batch_size,
)
def test_dataloader(self):
pass
def predict_dataloader(self):
img_ids = self.data['file_path'].values
pred_dset = Dataset(img_ids, img_size=self.img_size, inference=True, tta=False)
return DataLoader(
pred_dset, shuffle=False, num_workers=2 if in_colab() else 4,
pin_memory=True, batch_size=self.batch_size,
)
|
mtenenholtz/petfinder-pawpularity-score
|
dataset.py
|
dataset.py
|
py
| 4,274 |
python
|
en
|
code
| 3 |
github-code
|
6
|
7967036430
|
"""aiohttp-based client to retrieve web pages.
"""
import asyncio
from contextlib import closing
import time
import aiohttp
async def fetch_page(session, host, port=8000, wait=0):
"""Get one page.
"""
url = '{}:{}/{}'.format(host, port, wait)
with aiohttp.Timeout(10):
async with session.get(url) as response:
assert response.status == 200
return await response.text()
def get_multiple_pages(host, waits, port=8000, show_time=True):
"""Get multiple pages.
"""
tasks = []
pages = []
start = time.perf_counter()
with closing(asyncio.get_event_loop()) as loop:
with aiohttp.ClientSession(loop=loop) as session:
for wait in waits:
tasks.append(fetch_page(session, host, port, wait))
pages = loop.run_until_complete(asyncio.gather(*tasks))
duration = time.perf_counter() - start
sum_waits = sum(waits)
if show_time:
msg = 'It took {:4.2f} seconds for a total waiting time of {:4.2f}.'
print(msg.format(duration, sum_waits))
return pages
if __name__ == '__main__':
def main():
"""Test it.
"""
pages = get_multiple_pages(host='http://localhost', port='8000',
waits=[1, 5, 3, 2])
for page in pages:
print(page)
main()
|
asyncio-docs/asyncio-doc
|
examples/aiohttp_client.py
|
aiohttp_client.py
|
py
| 1,359 |
python
|
en
|
code
| 196 |
github-code
|
6
|
16578681384
|
"""
"""
import csv
class Input:
def __init__(self):
self.users = {}
class User:
def __init__(self):
self.months = {}
class Month:
def __init__(self):
self.dates = {}
self.minBalance = float("inf") # to be converted to int
self.maxBalance = float("-inf") # to be converted to int
self.endBalance = 0
class Date:
def __init__(self):
self.cred = 0
self.debt = 0
class File_Handler:
def __init__(self):
self.file = Input()
def handle_csv(self, file_name):
raw_data = self.read_csv(file_name)
processed_data = self.process_csv(raw_data)
self.save_csv(processed_data)
def read_csv(self, file_name):
form = self.file
with open(file_name, 'r') as file:
csvreader = csv.reader(file)
for row in csvreader:
# Display raw data
print(row)
if row[0] == "" or row[1] == "" or row[2] == "":
continue
userID = row[0]
if userID not in form.users:
form.users[userID] = User()
currentUser = form.users[userID]
month = row[1][:2] + row[1][5:]
if month not in currentUser.months:
currentUser.months[month] = Month()
currentMonth = currentUser.months[month]
date = row[1][3:5]
if date not in currentMonth.dates:
currentMonth.dates[date] = Date()
currentDate = currentMonth.dates[date]
amount = int(row[2])
if amount >= 0:
currentDate.cred += amount
else:
currentDate.debt += amount
return form
def process_csv(self, file):
data = []
for user, month in file.users.items():
# Sort by key value (month)
sorted_months = sorted(month.months.items(), key=lambda x: x[0])
for m, date in sorted_months:
# Sort by key value (date)
sorted_dates = sorted(date.dates.items(), key=lambda x: x[0])
currentBalance = 0
for d, amount in sorted_dates:
# print(user, month, d, amount.cred, amount.debt)
# Update minBalance, maxBalance, endBalance
currentBalance += amount.cred
if amount.cred != 0:
date.maxBalance = max(date.maxBalance, currentBalance)
date.minBalance = min(date.minBalance, currentBalance)
currentBalance += amount.debt
if amount.debt != 0:
date.maxBalance = max(date.maxBalance, currentBalance)
date.minBalance = min(date.minBalance, currentBalance)
date.endBalance = currentBalance
# print(user, m, date.minBalance, date.maxBalance, date.endBalance)
data.append({'CustomerID': user, 'MM/YYYY': m, \
'MinBalance': date.minBalance, 'MaxBalance': date.maxBalance, \
'EndingBalance': date.endBalance})
# print(data)
return data
def save_csv(self, data):
# field names
fields = ['CustomerID', 'MM/YYYY', \
'MinBalance', 'MaxBalance', 'EndingBalance']
# Open the CSV file
with open('data.csv', 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fields)
writer.writeheader()
for d in data:
writer.writerow(d)
|
KaiserZZK/CSV-Assistant
|
ver1/util.py
|
util.py
|
py
| 3,725 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36849230183
|
from google.cloud import bigquery
import pandas as pd
import os
def ReadAlreadyProcessedData():
vAR_client = bigquery.Client()
vAR_table_name = "DMV_ELP_GPT4_RECOMMENDATION"
vAR_sql =(
"select REQUEST_ID,REQUEST_DATE,ORDER_CONFIGURATION,ORDER_PAYMENT_DATE from `"+ os.environ["GCP_PROJECT_ID"]+"."+os.environ["GCP_BQ_SCHEMA_NAME"]+"."+vAR_table_name+"`"
)
vAR_df = vAR_client.query(vAR_sql).to_dataframe()
return vAR_df
|
Deepsphere-AI/https-github.com-Deepsphere-AI-DMV_ELP_GPT4_Recommendation
|
DMV_Bigquery_Utility.py
|
DMV_Bigquery_Utility.py
|
py
| 454 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11890054314
|
import math
L,R = map(int,input().split())
count = 0
for i in range(L,R+1):
if math.sqrt(i) == int(math.sqrt(i)):
count+=1
if count > 0:
print(count)
else:
print(-1)
|
syedjaveed18/codekata-problems
|
Arrays/Q121.py
|
Q121.py
|
py
| 187 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43095853918
|
from tython.main import run
from colorama import init
init(autoreset=True)
while True:
text = input("> ")
if text.strip() == "":
continue
result, error = run("<stdin>", text)
if error:
print(f"\033[31merror \033[0m" + f"{error}")
elif result:
if len(result.elements) == 1:
print(repr(result.elements[0]))
else:
print(repr(result))
|
traceover/tython
|
shell.py
|
shell.py
|
py
| 411 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31192442591
|
"""
Created on Fri Mar 4 19:28:46 2022
@author: Miguel
"""
from _legacy.exe_isotopeChain_taurus import DataTaurus
class Template:
com = 'com'
z = 'z'
a = 'a'
seed = 'seed'
b20 = 'b20'
varN2 = 'varN2'
iterartions = 'iters'
hamil = 'interaction'
TEMPLATE = """NUCLEUS {a:03} XX Z= {z:03} >>> HFB OPT <<< {com} COULECH 2
EPSG 0.000001 MAXITER {iters:05} >>> OUTPUT <<< 0 **** TSTG 0
ETMAX 0.7501 ETMIN 0.0351 DMAX 0.90 DMIN 0.70 TSHH 399.0
GOGNY FORCE {interaction} *** 0=D1S 1=D1 2=D1' 3(t3=0)
INPUT W.F. {seed} *** 0,1=WF FROM UNIT 10 (1 kicks), 2=NEW Function
OSCILLATOR LENGHT 0 *** 0 BP 1.7510000 BZ 1.7510000
>>>>>>>>>> C O N S T R A I N T S <<<<<<<<<<<
C.O.M. 1 1 0.00000000D+00
{b20}{varN2} >>>>>>>>>> E N D <<<<<<<<<<<<<<<<<<<<<<<<<<< """
# BP 1.7719772 BZ 1.7719772
# BP 1.7185258 BZ 1.7185258 (A=25)
temp_noGrad = """NUCLEUS {a:03} He Z= {z:03} >>> HFB OPT <<< {com} COULECH 2
EPSG 0.000001 MAXITER {iters:05} >>> OUTPUT <<< 0 **** TSTG 0
ETMAX 0.0001 ETMIN 0.0001 DMAX 0.01 DMIN 0.01 TSHH 000.1
GOGNY FORCE {interaction} *** 0=D1S 1=D1 2=D1' 3(t3=0)
INPUT W.F. {seed} *** 0,1=WF FROM UNIT 10 (1 kicks), 2=NEW Function
OSCILLATOR LENGHT 0 *** 0 BP 2.0402454 BZ 2.0402454
>>>>>>>>>> C O N S T R A I N T S <<<<<<<<<<<
C.O.M. 1 1 0.00000000D+00
{b20}{varN2} >>>>>>>>>> E N D <<<<<<<<<<<<<<<<<<<<<<<<<<< """
q10_constr_template = "QL 1 {:1} {:1} {:10.8f}D+00\n"
q20_constr_template = "QL 2 {:1} {:1} {:10.8f}D+00\n"
b20_constr_template = "BL 2 {:1} {:1} {:10.8f}D+00\n"
b30_constr_template = "BL 3 {:1} {:1} {:10.8f}D+00\n"
DN2_constr_template = "DN**2 {:1} {:1} {:10.8f}D+00\n"
DJX2_constr_template= "DJX**2 {:1} {:1} {:10.8f}D+00\n"
MSR2_constr_template= "<R**2> {:1} {:1} {:10.8f}D+00\n"
com_template = "CM1 {} CM2 {}"
from collections import OrderedDict
import os
import shutil
import subprocess
from _legacy.exe_isotopeChain_axial import DataAxial
import math as mth
import matplotlib.pyplot as plt
import numpy as np
HAMIL_AXIAL_PROGRAM = 'HFBaxialMZ3'
nucleus = [
# Z N
# (2, 2),
# (2, 4),
# (4, 4),
# (4, 6),
# # (6, 6),
# (6, 8),
# (8, 4),
# (8, 6),
# (8, 8),
# (8, 10),
# (8, 12),
#
(10, 6),
(10, 8),
(10, 10),
(10, 12),
(10, 14),
(10, 16)
#
# (12, 8),
# (12, 10),
# (12, 12),
# (12, 14),
# (12, 16),
# (14, 8),
# (14, 10),
# (14, 12),
# (14, 14),
# (14, 16),
#
# (16, 12),
# (16, 14),
# (16, 16),
# (16, 18),
# (16, 20),
# (36, 34),
# (34, 36),
# (38, 40),
# (40, 38),
]
#nucleus = [(8, n) for n in range(6, 15, 2)]
## put here value in axial (divide by 3/5 to fix with taurus q20)
repeat = {
# # (2, 2),
# (2, 4) : 0.1 / 0.6,
# (4, 4) : 0.4 / 0.6,
# # (4, 6) : -0.4 / 0.6,
# # (6, 6) : -0.4 / 0.6,
# # (6, 8) : -0.1 / 0.6,
# # (8, 8),
# (10, 8) : 0.0,
# # (10, 10): +0.2 / 0.6,
# (12, 10): +0.3 / 0.6,
# (14, 12) : 0.23 / 0.6,
# #(6, 6) : -0.4 / 0.6,
}
def _executeProgram(params, output_filename, q20_const,
print_result=True, save_final_wf=True, force_converg=False,
noGradient=False):
"""
In NOT save_final_wf, the initial wf previous the calculation is restored
"""
res = None
if params[Template.seed] == 1: print("[WARNING] seed 1 in Axial kicks wf!")
try:
status_fin = ''
text = TEMPLATE.format(**params)
if noGradient:
text = temp_noGrad.format(**params)
#print("\n no Grad\n{}".format(text),'\n')
with open(DataAxial.INPUT_FILENAME, 'w+') as f:
f.write(text)
#_e = subprocess.call('cp fort.10 initial_fort.10', shell=True)
_e = subprocess.call('./{} < {} > {}' # noReaHFBMZ2
.format(HAMIL_AXIAL_PROGRAM,
DataAxial.INPUT_FILENAME,
output_filename),
shell=True)
res = DataAxial(z, n, output_filename)
# move shit to the folder
str_q20 = str(int(1000*q20_const)).replace('-','_')
folder_dest = os.getcwd()+'/'+DataAxial.BU_folder+'/'
_e = subprocess.call('mv {} {}'.format(output_filename,
folder_dest+output_filename
+'_Z{}N{}'.format(z,n)
+'_{}'.format(str_q20)),
shell=True)
_e = subprocess.call('cp fort.11 '+folder_dest+
'seed_q{}_'.format(str_q20)+
'_Z{}N{}'.format(z,n)+'.11',
shell=True)
#_e = subprocess.call('cp fort.11 final_fort.11', shell=True)
_e = subprocess.call('rm fort.38 fort.4* fort.5* fort.6*', shell=True)
# refresh the initial function to the new deformation
if save_final_wf and (res.properly_finished or (not force_converg)):
_e = subprocess.call('rm fort.10', shell=True)
_e = subprocess.call('cp fort.11 fort.10', shell=True)
print(" *** exec. [OK] copied the final wf to the initial wf!")
# else:
# _e = subprocess.call('cp initial_fort.10 fort.10', shell=True)
status_fin = 'X' if not res.properly_finished else '.'
if print_result:
print(" {:2} {:2} ( {}) {:9.4f} {:9.4f} {:7.4f} {:5.4f}={:6.2f}"
.format(z, n, status_fin, res.E_HFB, res.kin, res.pair,
res.beta_isoscalar, res.q20_isoscalar))
except Exception as e:
print(" >> EXCEP >>>>>>>>>> ")
print(" >> current b20 =", q20_const)
print(" > [",e.__class__.__name__,"]:", e, "<")
if res and res.E_HFB == None and not res.properly_finished:
print(" > the result is NULL (final_wf wasn't copied to initial_wf)")
print("> RESULT <DataAxial>:\n",res,"\n END RESULT <")
print(" << EXCEP <<<<<<<<<< ")
return None
return res
def _energyDiffRejectionCriteria(curr_energ, old_energ, old_e_diff,
tol_factor=2.0):
new_e_diff = curr_energ - old_energ
# change in direction of the derivative, reject if difference is > 25%
if new_e_diff * old_e_diff < 0:
return abs(new_e_diff) > 1.5 * abs(old_e_diff)
# reject if new difference is tol_factor greater than the last one.
return abs(new_e_diff) > tol_factor * abs(old_e_diff)
def _set_deform_for_PES(res_0, b_min=-0.3, b_max=0.3, N = 20):
"""
Set an evenly spaced grid, dividing in "oblate" for points to the left
of a b_20 minumum and "prolate" to the right.
In case the seed minumum is outside the range, the old range is shifted
and centered to the new b20.
"""
N = 2 * (N // 2) # only even number N/2
b_range = b_max - b_min
assert b_min < b_max, \
"b_max[{}] needs to be extricly greater than b_min[{}]!".format(b_max, b_min)
dq = b_range / N
dq_decimals = int(mth.ceil(abs(np.log10(dq)))) + 1 # 2 significative decimals
dq = round(dq, dq_decimals)
b = getattr(res_0, 'b20_isoscalar', 0.0) # default 0.0
if b > b_max or b < b_min:
b_max = b + (b_range / 2) # * abs(b_max) / abs(b_max))
b_min = b - (b_range / 2) #* abs(b_min) /abs(b_min))
print("Min/Max :: ", b_min, b_max, b_max - b_min)
# b = round(b_min + (dq * ((b - b_min) // dq)), dq_decimals)
# print("b1=", b1," to ",b)
total_def = np.linspace(b_min, b_max, num=N, endpoint=True)
deform_prolate = list(filter(lambda x: x > b, total_def))
deform_oblate = list(filter(lambda x: x <= b, total_def))
deform_oblate.append(b)
deform_oblate.reverse()
Npro = len(deform_prolate)
Nobl = N - Npro
return deform_oblate, deform_prolate
def mainLinuxEvenlyDeform(z, n, b_min=-0.1, b_max=0.1, N=30, voidDD_path=None):
"""
Old process that sets an even single-evaluated step over b range
voidDD_path is the equivalent of the DataTaurus.export_list_results for the
output of the final calculation
"""
#
#%% Executing the process, run the list of isotopes
#
output_filename = 'aux_output' ### DataTaurus.output_filename_DEFAULT #
A = n + z
HEAD = " z n (st) E_HFB Kin Pair b2"
# Overwrite/create the buck up folder
DataAxial.setUpFolderBackUp()
if os.path.exists(DataAxial.export_list_results):
os.remove(DataAxial.export_list_results)
if voidDD_path and os.path.exists(voidDD_path):
os.remove(voidDD_path)
results = []
results_voidStep = []
print(HEAD)
constr_N2, constr_DJ2, constr_MSR = '', '', ''
constr = ''
# create a spherical seed to proceed
## NOTE: spherical constraint fits better with the following constrained
## process, avoid (if possible) the first seed to be the a deformed minimum
# constr = q20_constr_template.format(1,1, 0.0000)
# constr = b20_constr_template.format(1,0, 0.0000)
constr += b20_constr_template.format(1,1, b_max-0.01)
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 2,
Template.iterartions : 2000,
Template.b20 : constr, #"",
Template.hamil : 0,
Template.varN2 : constr_N2 + constr_DJ2 + constr_MSR
}
print(" * first convergence (seed2)")
_ = _executeProgram(kwargs, output_filename, 0.0)
res_0 = _executeProgram(kwargs, output_filename, 0.0)
_e = subprocess.call('cp fort.11 initial_Spheric.11', shell=True)
print(" ... done.")
# ###
deform_oblate, deform_prolate = _set_deform_for_PES(res_0, b_min,b_max, N)
for i_deform, deform in enumerate((deform_oblate, deform_prolate)):
# copy it.
_e = subprocess.call('cp initial_Spheric.11 fort.10', shell=True)
## ----- execution ----
for b20_const in deform:
# create a spherical seed to proceed
#q20_const *= 2 * np.pi / (np.sqrt(5 * np.pi))
constr = b20_constr_template.format(1,1, b20_const)
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 0,
Template.iterartions : 2000,
Template.b20 : constr,
Template.hamil : 0,
Template.varN2: constr_N2 + constr_DJ2 + constr_MSR
}
res = _executeProgram(kwargs, output_filename, b20_const,
print_result=False)
if res == None:
continue # dont save empty result
if i_deform == 0:
results.insert(0, res)
else:
results.append(res)
## SECOND PROCESS --------------------------------
if voidDD_path == None:
continue
# do a void step to activate DD with no rearrangement
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 0,
Template.iterartions : 0,
Template.b20 : constr,
Template.hamil : 0,
Template.varN2: constr_N2 + constr_DJ2 + constr_MSR
}
res2 = _executeProgram(kwargs, output_filename+'_VS_', b20_const,
save_final_wf=False, noGradient=True)
if res2 == None:
continue # dont save empty result
if i_deform == 0: #grow in order [-.5, -.4, ..., .0,..., +.4, +.5]
results_voidStep.insert(0, res2)
else:
results_voidStep.append(res2)
# intermediate print
_exportResult(results, DataAxial.export_list_results)
if voidDD_path != None:
_exportResult(results_voidStep, voidDD_path)
# ## ------ end exec. -----
_exportResult(results, DataAxial.export_list_results)
print(" ** generate File 1st convergence in:", DataAxial.export_list_results)
if results_voidStep:
_exportResult(results_voidStep, voidDD_path)
print(" ** generate File VoidStep in:", voidDD_path)
def mainLinuxSweepingPES(z, n, b_min=-0.1, b_max=0.1, N_max=30,
invert=False, voidDD_path=None):
"""
Process that starts from the limit of the PES, advances until the end
and return from the limit point, if the surface fall along the way, in the
backward process it will register the case E' < E and save
"""
#
#%% Executing the process, run the list of isotopes
#
output_filename = 'aux_output' ### DataTaurus.output_filename_DEFAULT #
A = n + z
HEAD = " z n (st) E_HFB Kin Pair b2"
# Overwrite/create the buck up folder
DataAxial.setUpFolderBackUp()
if os.path.exists(DataAxial.export_list_results):
os.remove(DataAxial.export_list_results)
if voidDD_path and os.path.exists(voidDD_path):
os.remove(voidDD_path)
N_max += 1
b20_base = b_min if not invert else b_max
b20_lim = b_max if not invert else b_min
results = [None] * N_max
results_voidStep = [None] * N_max
print(HEAD)
constr_N2, constr_DJ2, constr_MSR = '', '', ''
# create a spherical seed to proceed
## NOTE: spherical constraint fits better with the following constrained
## process, avoid (if possible) the first seed to be the a deformed minimum
# constr = q10_constr_template.format(1,0, 0.0)
# constr += q10_constr_template.format(0,1, 0.0)
constr = b20_constr_template.format(1,1, b20_base)
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 2,
Template.iterartions : 2000,
Template.b20 : constr, #"",
Template.hamil : 8,
Template.varN2 : constr_N2 + constr_DJ2 + constr_MSR
}
print(" * first convergence (seed2)")
_ = _executeProgram(kwargs, output_filename, 0.0)
res_0 = _executeProgram(kwargs, output_filename, 0.0)
_e = subprocess.call('cp fort.11 initial_Spheric.11', shell=True)
print(" ... done.")
# ###
deform_array = list(np.linspace(b20_base, b20_lim, num=N_max, endpoint=True))
for reverse in (0, 1):
print('\n==== REVERSE READING [', bool(reverse), '] ==================\n')
for i in range(N_max):
i2 = i
if reverse:
i2 = - i - 1
b20_const = deform_array[i2]
constr = b20_constr_template.format(1,1, b20_const)
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 0,
Template.iterartions : 2000,
Template.b20 : constr,
Template.hamil : 8,
Template.varN2: constr_N2 + constr_DJ2 + constr_MSR
}
res = _executeProgram(kwargs, output_filename, b20_const,
print_result=True)
if res == None:
continue # dont save empty result
if reverse:
if results[i2] != None:
if results[i2].E_HFB < res.E_HFB:
continue # don't save, new energy is bigger
results[i2] = res
# includes direct result, reverse valid over None, and E' < E
# intermediate print
_exportResult(results, DataAxial.export_list_results)
## SECOND PROCESS --------------------------------
if voidDD_path != None:
## do a void step to activate DD with no rearrangement
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 0,
Template.iterartions : 0,
Template.b20 : constr,
Template.hamil : 0,
Template.varN2 : constr_N2 + constr_DJ2 + constr_MSR
}
res2 = _executeProgram(kwargs, output_filename+'_VS_', b20_const,
save_final_wf=False, noGradient=True)
if res2 == None:
continue # dont save empty result
if reverse:
if results_voidStep[i2] != None:
if results_voidStep[i2].E_HFB < res2.E_HFB:
continue # don't save, new energy is bigger
results_voidStep[i2] = res2
# intermediate print
if voidDD_path != None:
_exportResult(results_voidStep, voidDD_path)
# ## ------ end exec. -----
_exportResult(results, DataAxial.export_list_results)
print(" ** generate File 1st convergence in:", DataAxial.export_list_results)
if voidDD_path != None:
_exportResult(results_voidStep, voidDD_path)
print(" ** generate File VoidStep in:", voidDD_path)
def mainLinuxSecurePES(z, n, b_min=-0.1, b_max=0.1, N_base=50, b20_base=None,
voidDD_path=None):
"""
Process that evaluates the deformation limits fitting the q20 to not
phase breaking, q20 is reduced up to 2^3 of the evenly spaced step.
The criteria to continue the iteration is the HFB/Kin energy jump
for the new step (pair omitted since pair=0.0 is common)
!! Note the increment of N_base will just increase the precision,
dq_base will be progressively smaller (if it's stuck in a point you will
need to increase the factor of the N_MAX limit)
"""
#
#%% Executing the process, run the list of isotopes
#
output_filename = 'aux_output' ### DataTaurus.output_filename_DEFAULT #
A = n + z
HEAD = " z n (st) E_HFB Kin Pair b2"
# Overwrite/create the buck up folder
DataAxial.setUpFolderBackUp()
if os.path.exists(DataAxial.export_list_results):
os.remove(DataAxial.export_list_results)
if voidDD_path and os.path.exists(voidDD_path):
os.remove(voidDD_path)
results = []
results_voidStep = []
## definitions for the iteration
dq_base = (b_max - b_min) / N_base
b20_base = 0.0000 if not b20_base else b20_base
ener_base = None
N_MAX = 70 * N_base # 7 * N_base
dqDivisionMax = 6
print(HEAD)
constr_N2, constr_DJ2, constr_MSR = '', '', ''
# create a spherical seed to proceed
## NOTE: spherical constraint fits better with the following constrained
## process, avoid (if possible) the first seed to be the a deformed minimum
# constr = b20_constr_template.format(1,1, 0.0000)
constr = b20_constr_template.format(1,1, b20_base)
# constr_N2 = DN2_constr_template.format(1,0,2.6925926)
# constr_N2+= DN2_constr_template.format(0,1,2.7390982)
kwargs = {
Template.com : com_template.format(1,0),
Template.z : z, Template.a : A,
Template.seed : 2,
Template.iterartions : 2000,
Template.b20 : constr, #"", #
Template.hamil : 8,
Template.varN2 : constr_N2 + constr_DJ2 + constr_MSR
}
print(" * first convergence (seed2)")
for iter_ in range(1, 4):
res_0 = _executeProgram(kwargs, output_filename, 0.0)
if res_0.properly_finished:
break
else:
if iter_ == 3:
print("[ERROR], after 4 tries the calculation STOP for", z, n)
return
kwargs[Template.eta_grad] -= 0.007 * iter_
kwargs[Template.eta_grad] = max(kwargs[Template.eta_grad], 0.001)
kwargs[Template.iterations] += 150 * iter_
print(" [WARNING] 1st step non converged, next eta:", iter_, kwargs[Template.eta_grad])
# First convergence done
ener_base = float(res_0.E_HFB)
print("[Ener Base] =", ener_base)
_e = subprocess.call('cp fort.11 initial_Spheric.11', shell=True)
print(" ... done.")
results.append(res_0)
## WARNING! compromising constraint
b20_base = float(res_0.beta_isoscalar)
print(" WARNING! compromising start point b20=", b20_base)
# ###
for prolate, b_lim in enumerate((b_min, b_max)): #prolate = 1
# copy the first function.
_e = subprocess.call('cp initial_Spheric.11 fort.10', shell=True)
b20_i = b20_base
energ = ener_base
curr_energ = ener_base
e_diff = 10.0 #
i = 0
div = 0
print("runing deform[",prolate,"] up to:", b_lim, N_MAX)
while (abs(b20_i) < abs(b_lim)) and i < N_MAX:
b20 = b20_i - (((-1)**(prolate))*(dq_base / (2**div)))
# execute but do not save the final function
constr = b20_constr_template.format(1,1, b20)
kwargs = {
Template.com : com_template.format(1,0),
Template.z : z,
Template.a : A,
Template.seed : 0,
Template.iterartions : 2000,
Template.b20 : constr,
Template.hamil : 8, # 0,#
Template.varN2: ""
}
res = _executeProgram(kwargs, output_filename, b20,
print_result=True, save_final_wf=False,
force_converg=True)
## Case 1: the program broke and the result is NULL
if res == None:
i += 1
if div < dqDivisionMax:
# reject(increase division)
div += 1
print(" * reducing b20 increment(1): [{}] Ei{:9.2f} - Eim1{:9.2f} ={:8.5f} > {:8.5f}"
.format(div, curr_energ, energ, curr_energ - energ, e_diff))
continue
else:
# accept and continue (DONT copy final function)
# increase the step for valid or deformation precision overflow
div = max(0, div - 1) ## smoothly recover the dq
e_diff = curr_energ - energ
energ = curr_energ
b20_i = b20
print(" * Failed but continue: DIV{} DIFF{:10.4f} ENER{:10.4f} B{:5.3f}"
.format(div, e_diff, energ, b20_i))
continue # cannot evaluate next Step or save results
## Case 2: the program did'nt broke and the result has values
# take the E_HFB energy and compare the previous (acceptance criteria)
curr_energ = float(res.E_HFB)
i += 1
if ((div < dqDivisionMax)
and (_energyDiffRejectionCriteria(curr_energ, energ, e_diff,
tol_factor= 2.0)
or (not res.properly_finished))):
# reject(increase division)
div += 1
print(" * reducing b20 increment(2) [i{}]: [{}] Ei{:9.2f} - Eim1{:9.2f} ={:8.5f} > ({:8.5f}, {:8.5f})"
.format(i, div, curr_energ, energ, curr_energ - energ,
3.0*e_diff, 1.5*e_diff))
continue
else:
print(" * [OK] step accepted DIV:{} CE{:10.4} C.DIFF:{:10.4}"
.format(div, curr_energ, curr_energ - energ))
# accept and continue (copy final function)
_e = subprocess.call('cp fort.11 fort.10', shell=True)
# increase the step for valid or deformation precision overflow
div = max(0, div - 2) ## smoothly recover the dq
e_diff = curr_energ - energ
energ = curr_energ
b20_i = b20
print(" * [OK] WF directly copied [i{}]: DIV:{} DIFF{:10.4f} ENER{:10.4f} B{:5.3f}"
.format(i,div, e_diff, energ, b20_i))
if prolate == 0:
results.insert(0, res)
else:
results.append(res)
## SECOND PROCESS --------------------------------
if voidDD_path != None:
# do a void step to activate DD with no rearrangement
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 0,
Template.iterartions : 0,
Template.b20 : constr,
Template.hamil : 0,
Template.varN2: ""
}
res2 = _executeProgram(kwargs, output_filename+'_VS', b20,
save_final_wf=False, noGradient=True)
if res2 == None:
continue # dont save empty result
if prolate == 0: #grow in order [-.5, -.4, ..., .0,..., +.4, +.5]
results_voidStep.insert(0, res2)
else:
results_voidStep.append(res2)
print("-------------------------------------------------------------------------------")
print()
# intermediate print
_exportResult(results, DataAxial.export_list_results)
if voidDD_path != None:
_exportResult(results_voidStep, voidDD_path)
# ## ------ end exec. -----
_exportResult(results, DataAxial.export_list_results)
print(" ** generate File 1st convergence in:", DataAxial.export_list_results)
if results_voidStep:
_exportResult(results_voidStep, voidDD_path)
print(" ** generate File VoidStep in:", voidDD_path)
def _exportResult(results, path_):
data = []
for res in results:
if res:
line = res.getAttributesDictLike
data.append(line+'\n')
with open(path_, 'w+') as f:
f.writelines(data)
def mainLinux(z, n):
#
#%% Executing the process, run the list of isotopes
#
output_filename = 'aux_output' ### DataTaurus.output_filename_DEFAULT #
A = n + z
HEAD = " z n (st) E_HFB Kin Pair b2"
# Overwrite/create the buck up folder
DataAxial.setUpFolderBackUp()
if os.path.exists(DataAxial.export_list_results):
os.remove(DataAxial.export_list_results)
results = []
print(HEAD)
constr_N2, constr_DJ2, constr_MSR = '', '', ''
deform_prolate = np.linspace(0.0, 40.0, num=45, endpoint=True)
deform_oblate = np.linspace(0.0,-40.0, num=45, endpoint=True) #18,
for i_deform, deform in enumerate((deform_oblate, deform_prolate)):
# create a spherical seed to proceed
constr = q20_constr_template.format(1,1, 0.000)
# constr += b20_constr_template.format(0,1, b20_const/2)
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 2,
Template.iterartions : 2000,
Template.b20 : constr,
Template.hamil : 0,
Template.varN2: constr_N2 + constr_DJ2 + constr_MSR
}
print(" * first convergence (seed2)")
_ = _executeProgram(kwargs, output_filename, 0.0)
print(" ... done.")
## ----- execution ----
for q20_const in deform:
# create a spherical seed to proceed
#q20_const *= 2 * np.pi / (np.sqrt(5 * np.pi))
constr = q20_constr_template.format(1,1, q20_const)
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 1,
Template.iterartions : 2000,
Template.b20 : constr,
Template.hamil : 0,
Template.varN2 : constr_N2 + constr_DJ2 + constr_MSR
}
res = _executeProgram(kwargs, output_filename, q20_const,
print_result=False)
if res == None:
continue # dont save empty result
# do a void step to activate DD with no rearrangement
kwargs = {
Template.com : com_template.format(1,1),
Template.z : z,
Template.a : A,
Template.seed : 1,
Template.iterartions : 500,
Template.b20 : constr,
Template.hamil : 0,
Template.varN2 : constr_N2 + constr_DJ2 + constr_MSR
}
res = _executeProgram(kwargs, output_filename, q20_const)
if i_deform == 0: #grow in order [-.5, -.4, ..., .0,..., +.4, +.5]
results.insert(0, res)
else:
results.append(res)
# ## ------ end exec. -----
data = []
for res in results:
# line = ', '.join([k+' : '+str(v) for k,v in res.__dict__.items()])
line = res.getAttributesDictLike
data.append(line+'\n')
# for i, r in enumerate(results):
# print("{} : {},".format(i, r.r_isoscalar))
with open(DataAxial.export_list_results, 'a+') as f:
f.writelines(data)
#%% main
z = 12
n = 12
output_filename = 'aux_output'
tail = ''
# tail = 'B1'
# tail = '1GCOM0'
# tail = 'B1COM0'
# tail = 'D1SnoR'
# tail = 'D1S_voidDD'
tail = 'D1S'
nucleus = []
for z in range(10,15, 2):
for n in range(max(6, z-2), 17, 2):
nucleus.append((z, n))
DataAxial.export_list_results = "export_PESz{}n{}Axial{}.txt".format(z,n,tail)
if __name__ == '__main__':
output_filename = DataAxial.output_filename_DEFAULT
if not os.getcwd().startswith('C:'):
print()
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print(' Running PES with HFBAxial:', HAMIL_AXIAL_PROGRAM)
print(' !!! CHECK, CHECK MZ:', HAMIL_AXIAL_PROGRAM.upper())
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print()
for z, n in nucleus:
print("PES for Z={} N={}".format(z,n))
output_filename = 'aux_output'
DataAxial.export_list_results = "export_PESz{}n{}Axial{}.txt".format(z,n, tail)
voidDD_path = "export_PESz{}n{}Axial{}_voidDD.txt".format(z,n, tail)
# mainLinux(z, n)
# mainLinuxEvenlyDeform(z, n, -0.28, 0.28, 100, voidDD_path)
# mainLinuxSecurePES(z, n, -0.30, 0.32, 100,
# voidDD_path=voidDD_path, b20_base= 0.3093)
# mainLinuxSecurePES(z, n, -0.30, 0.30, 100, b20_base=-0.29)
mainLinuxSweepingPES(z, n, -0.6, 0.6, 300, False, None)
else:
#%% process in windows
results_axial = []
import_file_Axi = 'BU_results_old/export_PESz{}n{}Axial{}.txt'.format(z, n, tail)
with open(import_file_Axi, 'r') as f:
data = f.readlines()
for line in data:
res = DataAxial(None, None, None, True)
res.setDataFromCSVLine(line)
results_axial.append(res)
for attr_ in (
'E_HFB',
'kin',
'var_n', 'pair',#, 'properly_finished'
# 'Jx_var',
# 'Jz',
# 'r_isoscalar',
):
## plot energies
x_tau, y_tau = [], []
x_ax, y_ax = [], []
for r in results_axial:
x_ax.append(r.q20_isoscalar)
if attr_ == 'r_isoscalar':
y_ax.append(getattr(r, attr_, 0.0))#/(r.n + r.z))
else:
y_ax.append(getattr(r, attr_, 0.0))#/(r.n + r.z))
if attr_ == 'properly_finshed':
y_ax = [1 if p == 'True' else 0 for p in y_ax]
plt.figure()
plt.xlabel(r"$Q_{20} [fm^2]$")
plt.plot(x_ax, y_ax, 'o-b', label="HFB axial")
plt.title(attr_+" [Z:{} N:{}] ".format(z, n)+" B1 no LS")
plt.legend()
plt.tight_layout()
plt.show()
|
migueldelafuente1/taurus_tools
|
_legacy/exe_q20pes_axial.py
|
exe_q20pes_axial.py
|
py
| 33,646 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7438419042
|
"""Module containing class `Settings`."""
from vesper.util.bunch import Bunch
import vesper.util.os_utils as os_utils
import vesper.util.yaml_utils as yaml_utils
class Settings(Bunch):
"""
Collection of software configuration settings.
A *setting* has a *name* and a *value*. The name must be a Python
identifier. The value must be `None` or a boolean, integer, float,
string, list, or `Settings` object. A setting contained in a
`Settings` object is accessed as an attribute of the object.
For example, a setting `x` of a settings object `s` is accessed
as `s.x`.
"""
@staticmethod
def create_from_dict(d):
"""Creates a settings object from a dictionary."""
if not isinstance(d, dict):
raise TypeError(
'Settings data must be a dictionary, not a {}.'.format(
d.__class__.__name__))
d = dict(
(k, Settings._create_from_dict_aux(v))
for k, v in d.items())
return Settings(**d)
@staticmethod
def _create_from_dict_aux(v):
if isinstance(v, dict):
return Settings.create_from_dict(v)
elif isinstance(v, list):
return [Settings._create_from_dict_aux(i) for i in v]
else:
return v
@staticmethod
def create_from_yaml(s):
"""Creates a settings object from a YAML string."""
try:
d = yaml_utils.load(s)
except Exception as e:
raise ValueError(
'YAML parse failed. Error message was:\n{}'.format(str(e)))
if d is None:
d = dict()
elif not isinstance(d, dict):
raise ValueError('Settings must be a YAML mapping.')
return Settings.create_from_dict(d)
@staticmethod
def create_from_yaml_file(file_path):
"""Creates a settings object from a YAML file."""
s = os_utils.read_file(file_path)
return Settings.create_from_yaml(s)
|
HaroldMills/Vesper
|
vesper/util/settings.py
|
settings.py
|
py
| 2,132 |
python
|
en
|
code
| 47 |
github-code
|
6
|
4787539776
|
# -*- coding: utf-8 -*
import sys
import re
import unicodedata
from Table import Table
import settings
reload(sys)
sys.setdefaultencoding("utf-8")
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
class Database:
def __init__(self):
self.tables = []
def get_number_of_tables(self):
return len(self.tables)
def get_tables(self):
return self.tables
def get_tables_into_dictionnary(self):
data = {}
for table in self.tables:
data[table.name] = []
for column in table.columns:
data[table.name].append(column.name)
return data
def get_primary_keys_by_table(self):
data = {}
for table in self.tables:
data[table.name] = table.primary_keys
return data
def get_primary_keys_of_table(self, table):
for _table in self.tables:
if _table.name == table:
return _table.primary_keys
def get_foreign_keys_of_table(self, table):
for _table in self.tables:
if _table.name == table:
return _table.get_foreign_keys()
def add_table(self, table):
self.tables.append(table)
def load(self, path):
with open(path) as f:
content = f.read()
tables_string = [p.split(';')[0]
for p in content.split('CREATE') if ';' in p]
for table_string in tables_string:
if 'TABLE' in table_string:
table = self.create_table(table_string)
self.add_table(table)
alter_table_string = [p.split(';')[0]
for p in content.split('ALTER') if ';' in p]
for s in alter_table_string:
if 'TABLE' in s:
self.alter_table(s)
def predict_type(self, string):
if 'int' in string.lower():
return 'int'
elif 'char' in string.lower() or 'text' in string.lower():
return 'string'
elif 'date' in string.lower():
return 'date'
else:
return 'unknow'
def create_table(self, table_string):
lines = table_string.split("\n")
table = Table()
for line in lines:
if 'TABLE' in line:
table_name = re.search("`(\w+)`", line)
table.set_name(table_name.group(1))
elif 'PRIMARY KEY' in line:
primary_key_columns = re.findall("`(\w+)`", line)
for primary_key_column in primary_key_columns:
table.add_primary_key(primary_key_column)
else:
column_name = re.search("`(\w+)`", line)
if column_name is not None:
column_type = self.predict_type(line)
table.add_column(column_name.group(1), column_type)
return table
def alter_table(self, alter_string):
lines = alter_string.replace('\n', ' ').split(';')
for line in lines:
if 'PRIMARY KEY' in line:
table_name = re.search("TABLE `(\w+)`", line).group(1)
table = [t for t in self.tables if t.get_name() == table_name][
0]
primary_key_columns = re.findall(
"PRIMARY KEY \(`(\w+)`\)", line)
for primary_key_column in primary_key_columns:
table.add_primary_key(primary_key_column)
elif 'FOREIGN KEY' in line:
table_name = re.search("TABLE `(\w+)`", line).group(1)
table = [t for t in self.tables if t.get_name() == table_name][
0]
foreign_keys_list = re.findall(
"FOREIGN KEY \(`(\w+)`\) REFERENCES `(\w+)` \(`(\w+)`\)", line)
for col, ref_table, ref_col in foreign_keys_list:
table.add_foreign_key(col, ref_table, ref_col)
def print_me(self):
if settings.DEBUG:
for table in self.tables:
print('+-------------------------------------+')
print("| %25s |" % (table.name.upper()))
print('+-------------------------------------+')
for column in table.columns:
if column.name in table.primary_keys:
print("| 🔑 %31s |" % (
color.BOLD + column.name + ' (' + column.type + ')' + color.END))
else:
print("| %23s |" %
(column.name + ' (' + column.type + ')'))
print('+-------------------------------------+\n')
|
rupinder1133/ln2sqlmodule
|
ln2sqlmodule/Database.py
|
Database.py
|
py
| 4,930 |
python
|
en
|
code
| 16 |
github-code
|
6
|
40290816138
|
"""
Purpose : Find Half-Life of C14
Author : Vivek T S
Date : 04/11/2018
"""
import matplotlib.pyplot as pyplot
def halfLifeC14(originalAmount, dt):
k = -0.000121
c14amount = originalAmount
time = 0
while c14amount > originalAmount * 0.5:
c14amount = c14amount + ( k * c14amount * dt)
time = time + dt
return time
def main():
print(halfLifeC14(100,0.0001))
main()
|
vivekworks/learning-to-code
|
4. Discovering Computer Science/Python/Chapter 4 - Growth And Decay/Exercises 4/exercise443.py
|
exercise443.py
|
py
| 382 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74025598587
|
import arcade
from src.characters.model import Player
class LuffyPlayer(Player):
life = 100
basic_attack = 5
special_attack = 15
speed = 5
def __init__(self, x, y, direction):
super().__init__()
self.x = x
self.y = y
self.direction = direction
self.animation = LuffyAnimation(self.x, self.y, self.direction, 2)
def move(self, direction):
self.x += direction*self.speed
self.animation.move_x(self.x, direction)
def action_basic_attack(self, direction):
self.animation.action_basic_attack(self.x, direction)
def stop(self, direction):
self.animation.stop_move(direction)
def draw(self):
self.animation.draw()
def update(self):
self.animation.update()
class LuffyAnimation(object):
sprite_path = "assets/sprites/luffy.png"
def __init__(self, x, y, direction, scale):
super().__init__()
self.scale = scale
self.sprite_list = arcade.SpriteList()
move_sprite = arcade.AnimatedTimeSprite(scale=self.scale)
move_sprite.position = [x, y]
move_sprite.textures = self.stand_animation(direction)
self.sprite_list.append(move_sprite)
def draw(self):
self.sprite_list.draw()
def update(self):
self.sprite_list.update_animation()
def move_animation(self, direction) -> [arcade.Texture]:
mirror = (direction == "left")
t1 = arcade.load_texture(self.sprite_path, 387, 12, 55, 70, mirrored=mirror, scale=self.scale)
t2 = arcade.load_texture(self.sprite_path, 440, 12, 51, 70, mirrored=mirror, scale=self.scale)
t3 = arcade.load_texture(self.sprite_path, 490, 12, 58, 70, mirrored=mirror, scale=self.scale)
t4 = arcade.load_texture(self.sprite_path, 547, 12, 51, 70, mirrored=mirror, scale=self.scale)
t5 = arcade.load_texture(self.sprite_path, 597, 12, 51, 70, mirrored=mirror, scale=self.scale)
t6 = arcade.load_texture(self.sprite_path, 646, 12, 52, 70, mirrored=mirror, scale=self.scale)
t7 = arcade.load_texture(self.sprite_path, 698, 12, 58, 70, mirrored=mirror, scale=self.scale)
t8 = arcade.load_texture(self.sprite_path, 755, 12, 51, 70, mirrored=mirror, scale=self.scale)
return [t1, t2, t3, t4, t5, t6, t7, t8]
def stand_animation(self, direction) -> [arcade.Texture]:
mirror = (direction == "left")
t1 = arcade.load_texture(self.sprite_path, 0, 10, 46, 66, mirrored=mirror, scale=self.scale)
t2 = arcade.load_texture(self.sprite_path, 44, 10, 44, 66, mirrored=mirror, scale=self.scale)
t3 = arcade.load_texture(self.sprite_path, 88, 7, 44, 69, mirrored=mirror, scale=self.scale)
t4 = arcade.load_texture(self.sprite_path, 139, 7, 44, 69, mirrored=mirror, scale=self.scale)
t5 = arcade.load_texture(self.sprite_path, 181, 5, 40, 72, mirrored=mirror, scale=self.scale)
t6 = arcade.load_texture(self.sprite_path, 139, 7, 44, 69, mirrored=mirror, scale=self.scale)
t7 = arcade.load_texture(self.sprite_path, 88, 7, 44, 69, mirrored=mirror, scale=self.scale)
t8 = arcade.load_texture(self.sprite_path, 44, 10, 44, 66, mirrored=mirror, scale=self.scale)
return [t1, t2, t3, t4, t5, t6, t7, t8]
def basic_attack_animation(self, direction) -> [arcade.Texture]:
mirror = (direction == "left")
t1 = arcade.load_texture(self.sprite_path, 0, 83, 48, 66, mirrored=mirror, scale=self.scale)
t2 = arcade.load_texture(self.sprite_path, 46, 83, 44, 66, mirrored=mirror, scale=self.scale)
t3 = arcade.load_texture(self.sprite_path, 88, 83, 65, 66, mirrored=mirror, scale=self.scale)
t4 = arcade.load_texture(self.sprite_path, 153, 83, 68, 66, mirrored=mirror, scale=self.scale)
t5 = arcade.load_texture(self.sprite_path, 220, 83, 44, 66, mirrored=mirror, scale=self.scale)
return [t1, t2, t3, t4, t5]
def get_sprite(self) -> arcade.AnimatedTimeSprite:
return self.sprite_list[0]
def move_x(self, x, direction):
sprite = self.get_sprite()
sprite.center_x = x
if direction > 0:
sprite.textures = self.move_animation("right")
else:
sprite.textures = self.move_animation("left")
def action_basic_attack(self, x, direction):
sprite = self.get_sprite()
sprite.center_x = x
if direction > 0:
sprite.textures = self.basic_attack_animation("right")
else:
sprite.textures = self.basic_attack_animation("left")
def stop_move(self, direction):
sprite = self.get_sprite()
sprite.textures = self.stand_animation(direction)
|
anthonykgross/One-fight
|
src/characters/luffy/model.py
|
model.py
|
py
| 4,704 |
python
|
en
|
code
| 2 |
github-code
|
6
|
73817524986
|
# This work is licensed under the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
import json
import os
import re
import tests
import tests.mockbackend
import tests.utils
#################################
# 'bugzilla query' mock testing #
#################################
def test_query(run_cli):
# bad field option
fakebz = tests.mockbackend.make_bz()
cmd = "bugzilla query --field FOO"
out = run_cli(cmd, fakebz, expectfail=True)
assert "Invalid field argument" in out
# Simple query with some comma opts
cmd = "bugzilla query "
cmd += "--product foo --component foo,bar --bug_id 1234,2480"
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query1.txt",
bug_search_return="data/mockreturn/test_query1.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query1.txt")
# RHBZ query with a ton of opts
cmd = "bugzilla query "
cmd += "--product foo --component foo,bar --bug_id 1234,2480 "
cmd += "--keywords fribkeyword --fixed_in amifixed "
cmd += "--qa_whiteboard some-example-whiteboard "
cmd += "--cc [email protected] --qa_contact [email protected] "
cmd += "--comment 'some comment string' "
fakebz = tests.mockbackend.make_bz(rhbz=True,
bug_search_args="data/mockargs/test_query1-rhbz.txt",
bug_search_return="data/mockreturn/test_query1.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query1-rhbz.txt")
# --emailtype handling
cmd = "bugzilla query --cc [email protected] --emailtype BAR "
fakebz = tests.mockbackend.make_bz(rhbz=True,
bug_search_args="data/mockargs/test_query2-rhbz.txt",
bug_search_return="data/mockreturn/test_query1.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query2-rhbz.txt")
# Same but with --ids output
cmd = "bugzilla query --ids "
cmd += "--product foo --component foo,bar --bug_id 1234,2480"
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query1-ids.txt",
bug_search_return="data/mockreturn/test_query1.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query1-ids.txt")
# Same but with --raw output
cmd = "bugzilla query --raw --bug_id 1165434"
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query2.txt",
bug_search_return={"bugs": [{"id": 1165434}]},
bug_get_args=None,
bug_get_return="data/mockreturn/test_getbug_rhel.txt")
out = run_cli(cmd, fakebz)
# Dictionary ordering is random, so scrub it from our output
out = re.sub(r"\{.*\}", r"'DICT SCRUBBED'", out, re.MULTILINE)
tests.utils.diff_compare(out, "data/clioutput/test_query2.txt")
# Test a bunch of different combinations for code coverage
cmd = "bugzilla query --status ALL --severity sev1,sev2 "
cmd += "--outputformat='%{foo}:%{bar}::%{whiteboard}:"
cmd += "%{flags}:%{flags_requestee}%{whiteboard:devel}::"
cmd += "%{flag:needinfo}::%{comments}::%{external_bugs}'"
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query3.txt",
bug_search_return="data/mockreturn/test_getbug_rhel.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query3.txt")
# Test --status DEV and --full
cmd = "bugzilla query --status DEV --full"
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query4.txt",
bug_search_return="data/mockreturn/test_getbug_rhel.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query4.txt")
# Test --status QE and --extra, and components-file
compfile = os.path.dirname(__file__) + "/data/components_file.txt"
cmd = "bugzilla query --status QE --extra "
cmd += "--components_file %s" % compfile
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query5.txt",
bug_search_return="data/mockreturn/test_getbug_rhel.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query5.txt")
# Test --status EOL and --oneline, and some --field usage
cmd = "bugzilla query --status EOL --oneline "
cmd += "--field FOO=1 --field=BAR=WIBBLE "
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query6.txt",
bug_search_return="data/mockreturn/test_getbug_rhel.txt",
bug_get_args="data/mockargs/test_query_cve_getbug.txt",
bug_get_return="data/mockreturn/test_query_cve_getbug.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query6.txt")
# Test --status OPEN and --from-url
url = "https://bugzilla.redhat.com/buglist.cgi?bug_status=NEW&bug_status=ASSIGNED&bug_status=MODIFIED&bug_status=ON_DEV&bug_status=ON_QA&bug_status=VERIFIED&bug_status=FAILS_QA&bug_status=RELEASE_PENDING&bug_status=POST&classification=Fedora&component=virt-manager&order=bug_status%2Cbug_id&product=Fedora&query_format=advanced" # noqa
cmd = "bugzilla query --status OPEN --from-url %s" % url
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query7.txt",
bug_search_return="data/mockreturn/test_getbug_rhel.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query7.txt")
# Test --json output
cmd = "bugzilla query --json --id 1165434"
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query8.txt",
bug_search_return={"bugs": [{"id": 1165434}]},
bug_get_args=None,
bug_get_return="data/mockreturn/test_getbug_rhel.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(tests.utils.sanitize_json(out),
"data/clioutput/test_query8.txt")
assert json.loads(out)
# Test --json output
cmd = ("bugzilla query --json --id 1165434 "
"--includefield foo --includefield bar "
"--excludefield excludeme "
"--extrafield extrame1 --extrafield extrame2 ")
fakebz = tests.mockbackend.make_bz(rhbz=True,
bug_search_args="data/mockargs/test_query9.txt",
bug_search_return={"bugs": [{"id": 1165434}]},
bug_get_args="data/mockargs/test_getbug_query9.txt",
bug_get_return="data/mockreturn/test_getbug_rhel.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(tests.utils.sanitize_json(out),
"data/clioutput/test_query9.txt")
assert json.loads(out)
# Test every remaining option
cmd = "bugzilla query "
cmd += "--sub-component FOOCOMP "
cmd += "--version 5.6.7 --reporter [email protected] "
cmd += "--summary 'search summary' "
cmd += "--assignee [email protected] "
cmd += "--blocked 12345 --dependson 23456 "
cmd += "--keywords FOO --keywords_type substring "
cmd += "--url https://example.com --url_type sometype "
cmd += "--target_release foo --target_milestone bar "
cmd += "--quicksearch 1 --savedsearch 2 --savedsearch-sharer-id 3 "
cmd += "--tags +foo --flag needinfo --alias somealias "
cmd += "--devel_whiteboard DEVBOARD "
cmd += "--priority wibble "
cmd += "--fixed_in 5.5.5 --fixed_in_type substring "
cmd += "--whiteboard FOO --status_whiteboard_type substring "
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query10.txt",
bug_search_return="data/mockreturn/test_getbug_rhel.txt",
rhbz=True)
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query10.txt")
|
python-bugzilla/python-bugzilla
|
tests/test_cli_query.py
|
test_cli_query.py
|
py
| 7,747 |
python
|
en
|
code
| 120 |
github-code
|
6
|
12816084120
|
import requests
import os
from dotenv import load_dotenv
class YaUploader:
BASE_URL = 'https://cloud-api.yandex.net/v1/disk/resources'
GET_FILES = '/files'
UPLOAD_LINK = '/upload'
def __init__(self, token) -> None:
self.token = token
def get_headers(self):
headers = {
"Authorization": self.token,
"Content-type": "application/json"
}
return headers
def get_files(self):
url = self.BASE_URL + self.GET_FILES
response = requests.get(url, headers=self.get_headers())
response.raise_for_status()
return response.json()
def _get_upload_link(self, params):
url = self.BASE_URL + self.UPLOAD_LINK
response = requests.get(url, headers=self.get_headers(), params=params)
response.raise_for_status()
response_body = response.json()
href = response_body.get('href', '')
return href
def upload(self, filename, path_to_file):
params = {
"path": path_to_file,
"overwrite": "true"
}
url = self._get_upload_link(params)
response = requests.put(
url,
headers=self.get_headers(),
params=params,
data=open(filename, 'rb')
)
response.raise_for_status()
if response.status_code == 201:
print(f'{params["path"]} successfully created!')
if __name__ == '__main__':
load_dotenv()
TOKEN = os.getenv('YA_TOKEN')
file_name = 'test.txt'
path_to_file = r'netology/' + file_name
ya = YaUploader(TOKEN)
ya.upload(file_name, path_to_file)
|
SergeyMMedvedev/8_api_requests
|
task_2.py
|
task_2.py
|
py
| 1,652 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16315480908
|
from dronekit import Vehicle, connect, VehicleMode, Command
import time
from pymavlink.dialects.v20 import ardupilotmega
from pymavlink import mavutil
class DKVehicle(Vehicle):
def __init__(self, connection):
print ("Connecting to vehicle on: %s" % connection)
self.vehicle = connect(connection, baud=57600, wait_ready=True)
print ("Connected to vehicle on: %s" % connection)
self.connection = connection
# Force Dronekit to use Mavlink v2.0
self.vehicle._master.first_byte = True
self.servo_output = []
for x in range(17):
self.servo_output.append(0)
self.servo_func = {
'1': 4, # Aileron
'2': 19, # Elevator
'3': 70, # Throttle
'4': 21, # Rudder
'5': 33, # Motor1
'6': 34, # Motor2
'7': 35, # Motor3
'8': 36, # Motor4
'9': 0, # Disabled
'10': 0, # Disabled
'11': 0, # Disabled
'12': 0, # Disabled
'13': 0, # Disabled
'14': 0, # Disabled
'15': 0, # Disabled
'16': 0, # Disabled
}
def printstats(self):
print ("Vehicle: %s" % self.vehicle.version)
print (" Connected on: %s" % self.connection)
print (" GPS: %s" % self.vehicle.gps_0)
print (" Battery: %s" % self.vehicle.battery)
print (" Last Heartbeat: %s" % self.vehicle.last_heartbeat)
print (" Is Armable?: %s" % self.vehicle.is_armable)
print (" System status: %s" % self.vehicle.system_status.state)
print (" Mode: %s" % self.vehicle.mode.name)
def print_servos(self):
for x in range(0,16):
print("Servo%s: %s" % (x, self.servo_output[x]))
def print_servo(self, servo):
print("Servo%s: %s" % (servo, self.servo_output[servo]))
def print_channels(self):
print(" Ch1: %s" % self.vehicle.channels['1'])
print(" Ch2: %s" % self.vehicle.channels['2'])
print(" Ch3: %s" % self.vehicle.channels['3'])
print(" Ch4: %s" % self.vehicle.channels['4'])
print(" Ch5: %s" % self.vehicle.channels['5'])
print(" Ch6: %s" % self.vehicle.channels['6'])
print(" Ch7: %s" % self.vehicle.channels['7'])
print(" Ch8: %s" % self.vehicle.channels['8'])
def override_servo(self, servo,val):
servo_string = 'SERVO' + str(servo) + '_FUNCTION'
self.vehicle.parameters[servo_string]=0
msg = self.vehicle.message_factory.command_long_encode(0, 0, mavutil.mavlink.MAV_CMD_DO_SET_SERVO, 0, servo, val, 0, 0, 0, 0, 0)
self.vehicle.send_mavlink(msg)
self.vehicle.flush()
def disable_servo(self, servo):
servo_string = 'SERVO' + str(servo) + '_FUNCTION'
servo_trim = 'SERVO' + str(servo) + '_TRIM'
self.vehicle.parameters[servo_string]=0
val = self.vehicle.parameters[servo_trim]
msg = self.vehicle.message_factory.command_long_encode(0, 0, mavutil.mavlink.MAV_CMD_DO_SET_SERVO, 0, servo, val, 0, 0, 0, 0, 0)
self.vehicle.send_mavlink(msg)
self.vehicle.flush()
def enable_servo(self, servo):
servo_string = 'SERVO' + str(servo) + '_FUNCTION'
servo_trim = 'SERVO' + str(servo) + '_TRIM'
val = self.vehicle.parameters[servo_trim]
if self.servo_func[str(servo)] == 0:
val = 0
msg = self.vehicle.message_factory.command_long_encode(0, 0, mavutil.mavlink.MAV_CMD_DO_SET_SERVO, 0, servo, val, 0, 0, 0, 0, 0)
self.vehicle.send_mavlink(msg)
self.vehicle.parameters[servo_string]=self.servo_func[str(servo)]
self.vehicle.flush()
def print_servo_functions(self):
for servo in range(1,17):
servo_string = 'SERVO' + str(servo) + '_FUNCTION'
print(servo_string + ': ' + str(self.vehicle.parameters[servo_string]))
def print_frame_type(self):
print("Q_ENABLE: " + str(self.vehicle.parameters['Q_ENABLE']))
print("Q_FRAME_TYPE: " + str(self.vehicle.parameters['Q_FRAME_TYPE']))
def set_frame_type(self,frame):
self.vehicle.parameters['Q_FRAME_TYPE'] = frame
|
JarrydSteele/pythonscripts
|
First/dk_vehicle.py
|
dk_vehicle.py
|
py
| 4,478 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32466009573
|
from tech_news.scraper import get_tech_news
from tech_news.analyzer.search_engine import (
search_by_title, search_by_date, search_by_tag, search_by_category)
from tech_news.analyzer.ratings import (
top_5_news, top_5_categories)
import sys
def choice_0():
amount = input("Digite quantas notícias serão buscadas:")
return get_tech_news(amount)
def choice_1():
title = input("Digite o título:")
return search_by_title(title)
def choice_2():
date = input("Digite a data no formato aaaa-mm-dd:")
return search_by_date(date)
def choice_3():
tag = input("Digite a tag:")
return search_by_tag(tag)
def choice_4():
category = input("Digite a categoria:")
return search_by_category(category)
# Requisito 12
def analyzer_menu():
option = input(
"""Selecione uma das opções a seguir:
0 - Popular o banco com notícias;
1 - Buscar notícias por título;
2 - Buscar notícias por data;
3 - Buscar notícias por tag;
4 - Buscar notícias por categoria;
5 - Listar top 5 notícias;
6 - Listar top 5 categorias;
7 - Sair.
"""
)
if option in ["0", "1", "2", "3", "4"]:
eval(f"choice_{option}")()
elif option == "5":
return top_5_news()
elif option == "6":
return top_5_categories()
elif option == "7":
return print("Encerrando script")
else:
return print("Opção inválida", file=sys.stderr)
|
janaolive/phyton_raspagem_de_dados
|
tech_news/menu.py
|
menu.py
|
py
| 1,427 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
32102918749
|
def findMid(n, arr1, arr2):
if n == 1:
return arr1[0] if arr1[0] < arr2[0] else arr2[0]
s1, e1, s2, e2 = 0, n-1, 0, n-1
while s1 < e1:
mid1 = (e1-s1)//2 + s1
mid2 = (e2-s2)//2 + s2
# 元素个数为奇数,则offset=0, 元素个数为偶数, 则offset=1
offset = ((e1-s1+1) & 1) ^1
if arr1[mid1] > arr2[mid2]:
e1 = mid1
s2 = mid2 + offset
elif arr1[mid1] < arr2[mid2]:
s1 = mid1 + offset
e2 = mid2
else:
return arr1[mid1]
return min(arr1[s1], arr2[s2])
n = int(input())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
print(findMid(n, a, b))
|
Eleanoryuyuyu/LeetCode
|
程序员代码面试指南/二分查找/在两个长度相等的排序数组中找到上中位数.py
|
在两个长度相等的排序数组中找到上中位数.py
|
py
| 712 |
python
|
en
|
code
| 3 |
github-code
|
6
|
69887984508
|
import nltk
import gensim
import cleantext
import re
import xlrd
import sys
from gensim.models import word2vec
from data_treatment import data_treatment
from nltk.corpus import reuters
from nltk.corpus import wordnet as wn
from sklearn.externals import joblib
from nltk.stem import WordNetLemmatizer
class Synonyms_suggestion:
def __init__(self,model_type):
self.model1_path = "E:\\programming\\NLP\\TQCorpus\\GoogleNews-vectors-negative300.bin\\GoogleNews-vectors-negative300.bin"
self.model2_path = "data\\txt\\Economist\\Economist.txt"
if model_type == 0:
pass
if model_type==1:
self.model1 = gensim.models.KeyedVectors.load_word2vec_format(self.model1_path, binary=True)
elif model_type==2:
self.f_model2_in = open(self.model2_path,"r",encoding="ISO-8859-1") #使用ISO解码
all_string = self.f_model2_in.read()
all_list = nltk.sent_tokenize(all_string)
train_sentences_model2 = []
for i in range(0,len(all_list)):
train_sentences_model2.append(cleantext.clean(all_list[i]))
train_sentences_model3 = list(reuters.sents())
for sent in train_sentences_model3:
train_sentences_model2.append(sent)
self.model2 = word2vec.Word2Vec(train_sentences_model2, min_count=2, window=3, size=300)
self.lemmatizer = WordNetLemmatizer() # 词性还原器
# 载入人工规则替换词表
self.artificial_word_book = xlrd.open_workbook("data/suggestion/word_by_rule.xls")
self.artificial_word_sheet_vec = self.artificial_word_book.sheet_by_index(0)
self.artificial_word_sheet_adj = self.artificial_word_book.sheet_by_index(1)
self.artificial_word_vec = []
self.artificial_word_adj = []
for i in range(0,self.artificial_word_sheet_vec.ncols):
temp_list = self.artificial_word_sheet_vec.col_values(i)[2:]
temp_list = [w.lower() for w in temp_list]
temp_list = [w for w in temp_list if w != ' ' and w != '']
for i in range(len(temp_list)):
temp_list[i] = self.lemmatizer.lemmatize(temp_list[i], pos='v')
self.artificial_word_vec.append(temp_list)
for i in range(0,self.artificial_word_sheet_adj.ncols):
temp_list = self.artificial_word_sheet_adj.col_values(i)[2:]
temp_list = [w.lower() for w in temp_list]
temp_list = [w for w in temp_list if w != ' ' and w != '']
self.artificial_word_adj.append(temp_list)
def suggestion_word(self,word,sentence,model=2):
# 词性处理
sentence = nltk.word_tokenize(sentence)
pos_tag_list = nltk.pos_tag(sentence)
tag = pos_tag_list[sentence.index(word)][1]
word = word.lower()
# suggestion by artificial rule
suggestion_list_artificial_rule = []
if tag.startswith('VB'):
word = self.lemmatizer.lemmatize(word, pos='v')
for i in range(0, len(self.artificial_word_vec)):
if word in self.artificial_word_vec[i]:
suggestion_list_artificial_rule = self.artificial_word_vec[i]
break
elif tag.startswith('JJ'):
word = self.lemmatizer.lemmatize(word, pos='a')
for i in range(0, len(self.artificial_word_adj)):
if word in self.artificial_word_adj[i]:
suggestion_list_artificial_rule = self.artificial_word_adj[i]
break
elif tag.startswith('R'):
word = self.lemmatizer.lemmatize(word, pos='r')
for i in range(0, len(self.artificial_word_vec)):
if word in self.artificial_word_adj[i]:
suggestion_list_artificial_rule = self.artificial_word_adj[i]
break
else:
word = self.lemmatizer.lemmatize(word, pos='n')
# suggestion by wordnet
if tag.startswith('NN'):
word_meaning_list = wn.synsets(word, pos=wn.NOUN)
elif tag.startswith('VB'):
word_meaning_list = wn.synsets(word, pos=wn.VERB)
elif tag.startswith('JJ'):
word_meaning_list = wn.synsets(word, pos=wn.ADJ)
elif tag.startswith('R'):
word_meaning_list = wn.synsets(word, pos=wn.ADV)
else:
word_meaning_list = wn.synsets(word)
suggestion_ans_wordnet = []
for word_meaning in word_meaning_list:
lemmas_ans_wordnet = []
word_meaning_hypernyms = word_meaning.hypernyms()
word_meaning_hyponyms = word_meaning.hyponyms()
word_meaning_similar = word_meaning.similar_tos()
lemmas_ans_wordnet+=word_meaning_hyponyms
lemmas_ans_wordnet+=word_meaning_hypernyms
lemmas_ans_wordnet+=word_meaning_similar
for i in range(len(lemmas_ans_wordnet)):
syn = lemmas_ans_wordnet[i]
suggestion_ans_wordnet.append(str(syn.lemmas()[0].name()))
suggestion_ans_wordnet = data_treatment.pretrement_for_synonyms(suggestion_ans_wordnet)
# suggestion by word2vec
suggestion_list_word2vec = []
if model==0:
suggestion_list_word2vec = []
if model==1:
suggestion_list_word2vec = self.model1.most_similar([word],topn=20)
elif model==2:
suggestion_list_word2vec = self.model2.most_similar([word],topn=20)
suggestion_ans_word2vec = []
for i in range (0,len(suggestion_list_word2vec)):
suggestion_ans_word2vec.append(suggestion_list_word2vec[i][0])
suggestion_ans_word2vec = data_treatment.pretrement_for_synonyms(suggestion_ans_word2vec)
## 去除_号
for i in range(len(suggestion_ans_word2vec)):
word = suggestion_ans_word2vec[i]
word = word.replace("_", " ")
if tag.startswith('NN'):
word = self.lemmatizer.lemmatize(word, pos='n')
elif tag.startswith('VB'):
word = self.lemmatizer.lemmatize(word, pos='v')
elif tag.startswith('JJ'):
word = self.lemmatizer.lemmatize(word, pos='a')
elif tag.startswith('R'):
word = self.lemmatizer.lemmatize(word, pos='r')
else:
word = self.lemmatizer.lemmatize(word)
suggestion_ans_word2vec[i] = word
suggestion_ans_word2vec = list(set(suggestion_ans_word2vec))
final_ans = []
final_ans+=suggestion_list_artificial_rule
final_ans += suggestion_ans_wordnet
final_ans += suggestion_ans_word2vec
return final_ans
if __name__=="__main__":
word = sys.argv[1]
sent = sys.argv[2]
mode = sys.argv[3]
# word = "love"
# sent = "i love you"
# mode = 0
synonyms = Synonyms_suggestion(int(mode))
syn_list = synonyms.suggestion_word(word,sent,int(mode))
print(syn_list)
|
caffe-in/TQLwriter
|
后端/Synonyms_suggestion.py
|
Synonyms_suggestion.py
|
py
| 7,026 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30172432184
|
#!/usr/bin/env python3
import io
import time
import serial
from serial.tools.list_ports import comports
class Arduino(object):
def __init__(self, port):
self.port= serial.Serial(port, 115200, timeout=0.1)
self.iow= io.TextIOWrapper(
io.BufferedRWPair(self.port, self.port, 1),
'utf-8',
newline='\r\n'
)
self.reset()
def reset(self):
self.port.setDTR(0)
time.sleep(0.5)
self.port.setDTR(1)
time.sleep(0.5)
def exec_cmd(self, *params):
cmd= 'AT'
if len(params) >= 1:
cmd+= '+' + params[0]
if len(params) >= 2:
cmd+= '=' + ','.join(
map(str, params[1:])
)
self.iow.write(cmd + '\r\n')
resp= list()
for ln in map(str.strip, self.iow):
if ln == 'OK':
return(resp)
elif ln == 'FAIL':
raise(Exception('Arduino Error'))
else:
resp.append(ln)
class OutputPin(object):
def __init__(self, arduino, pin_no):
self.arduino= arduino
self.pin_no= pin_no
self.arduino.exec_cmd('SET_OUT', self.pin_no)
def turn_on(self):
self.set_state(True)
def turn_off(self):
self.set_state(False)
def set_state(self, state):
self.arduino.exec_cmd(
'WRITE_HIGH' if state else 'WRITE_LOW',
self.pin_no
)
class InputPin(object):
def __init__(self, arduino, pin_no, pullup=False):
self.arduino= arduino
self.pin_no= pin_no
self.arduino.exec_cmd('SET_IN', self.pin_no)
self.arduino.exec_cmd(
'WRITE_HIGH' if pullup else 'WRITE_LOW',
self.pin_no
)
def is_high(self):
res= self.arduino.exec_cmd('PIN_READ', self.pin_no)
state= True if (res[0].split(':')[1] == '1') else False
return(state)
def enumerate_ports():
ports= list(
port[0] for port in comports()
)
return(ports)
|
ComNets-Bremen/GDI-Tutorials
|
target/examples/21_atuino.py
|
21_atuino.py
|
py
| 2,069 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12441870149
|
import redis
from redis_lru import RedisLRU
from connect import connect
from models import Quote
client = redis.StrictRedis(host="localhost", port=6379, password=None)
cache = RedisLRU(client)
quotes = Quote.objects()
@cache
def find_by_name(value):
finding_quotes = []
full_name = value.split(":")[1]
for quote in quotes:
if quote.author.fullname.lower() == full_name.lower():
finding_quotes.append(quote.quote)
print(finding_quotes)
@cache
def find_by_tag(value):
finding_quotes = []
tags = value.split(":")[1].split(",")
for quote in quotes:
for tag in tags:
if tag in quote.tags:
finding_quotes.append(quote.quote)
print(finding_quotes)
def main():
while True:
command = input("Enter your 'command:value' or 'exit': ")
if command.startswith("name"):
find_by_name(command)
elif command.startswith("tag"):
find_by_tag(command)
elif command.startswith("exit"):
break
else:
print("Wrong command. Please, try again.")
continue
if __name__ == "__main__":
main()
|
DanielDDZ/web_modul_8
|
MongoDB/main.py
|
main.py
|
py
| 1,218 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10159658078
|
# 13. Input any 4 digit number and find out the sum of middle digits
number =int (input('enter a 4 digit no:'))
sum = 0
rem1 = 0
if number >= 10000:
print("you entered max no")
elif number <= 999:
print("you entered min no")
else:
while number >= 100:
rem1 = rem1 // 10
number = number//10
rem = number % 100
rem1 = number % 10
sum = sum + rem1
print(sum)
|
suchishree/django_assignment1
|
python/looping/while loop/assignment2/demo13.py
|
demo13.py
|
py
| 411 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12884353742
|
import pandas as pd
from constants import USERINFO
path = f"C:/Users/Asus/Desktop/solarFire/{USERINFO.NAME.replace(' ','_')}.xlsx"
df = pd.read_excel(f'{path}')
#set numeric columns
columns = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
df.columns = columns
print(df.columns)
if USERINFO.GENDER == "K":
print("Female")
else:
print("Male")
house_cups = pd.DataFrame(pd.concat([df[16:19][1], df[16:19][4], df[16:19][7], df[16:19][10]], ignore_index=True))
house_cups["index"] = [int(i.strip()[0:2]) for i in house_cups[0]]
planets = df[22:36][[1, 2]].reset_index(drop=True)
|
furkancets/astro-bot
|
src/dataPrep.py
|
dataPrep.py
|
py
| 592 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13525657769
|
#1bc test code against sklearn - optimizer
# import necessary packages
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from random import random, seed
import numpy as np
from sklearn.neural_network import MLPRegressor
from tqdm import tqdm
# ensure the same random numbers appear every time
np.random.seed(0)
# Make data.
x = np.arange(0, 1, 0.05)
y = np.arange(0, 1, 0.05)
x, y = np.meshgrid(x,y)
m = len(x)
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
#adding normalized noise to the Franke function
sigma2 = 0.1
z = FrankeFunction(x, y) + np.random.normal(0,sigma2, len(x))
x = np.ravel(x)
y = np.ravel(y)
z = np.ravel(z)
# The design matrix now as function of a given polynomial
def create_X(x, y, n ):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta
X = np.ones((N,l))
idx = 0
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,idx] = (x**(i-k))*(y**k)
idx +=1
return X
X = create_X(x, z, n=7)
# We split the data in test and training data
X_train, X_test, z_train, z_test = train_test_split(X, z, test_size=0.2)
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_train[:,0] = 1
X_test[:,0] = 1
# one-liner from scikit-learn library
train_size = 0.8
test_size = 1 - train_size
X_train, X_test, Y_train, Y_test = train_test_split(X, z, train_size=train_size,
test_size=test_size)
max_iter=5000 #default=200
score = 0
etalist = np.logspace(-1,-6, num=20)
reglist= np.logspace(0,-10, num=20)
optimal = [0,0]
for eta in tqdm(etalist):
for lmbd in reglist:
regr = MLPRegressor(activation='relu',solver='sgd',alpha=lmbd,learning_rate_init=eta,
max_iter=max_iter).fit(X_train, Y_train)
#regr.predict(X_test)
if score < regr.score(X_test, Y_test):
score = regr.score(X_test, Y_test)
optimal[0] = eta; optimal[1] = lmbd
print('optimal score = ', score)
print('optimal learning rate = ', optimal[0])
print('optimal lambda =', optimal[1])
'''
PS C:\python\project2> python .\1bcMLPReg2.py
100%|█████████████████████████████████████████████████████████████████████████████████████████| 20/20 [01:24<00:00, 4.24s/it]
optimal score = 0.9670078678439595
optimal learning rate = 0.0545559478116852
optimal lambda = 1e-10
'''
''' relu
PS C:\python\project2> python .\1bcMLPReg2.py
100%|█████████████████████████████████████████████████████████████████████████████████████████| 20/20 [01:53<00:00, 5.67s/it]
optimal score = 0.992733368588985
optimal learning rate = 0.0545559478116852
optimal lambda = 1e-10
PS C:\python\project2>
'''
|
gery2/FYS-STK-4155---Project-2
|
Codes/1bcMLPReg2.py
|
1bcMLPReg2.py
|
py
| 3,760 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1775721628
|
import datetime
EXISTING_TYPES = (
(0, "Пионер"),
(1, "Педсостав"),
)
SIGN_SET = (
('dining_services+', "Дежурный в столовой"),
('activity+', "Активность"),
('salary+', "Зарплата"),
('fee+', "Гонорар"),
('purchase-', "Покупка"),
('fine-', "Штраф"),
)
SIGN_SET_ALL = (
('p2p+', "Личный перевод"),
('dining_services+', "Дежурный в столовой"),
('activity+', "Активность"),
('salary+', "Зарплата"),
('fee+', "Гонорар"),
('purchase-', "Покупка"),
('fine-', "Штраф"),
)
DATE_START_OF_ = datetime.date(year=2024, month=6, day=25)
DATE_END_OF_ = datetime.date(year=2024, month=6, day=25) + datetime.timedelta(weeks=3)
|
RegSirius06/SWBM
|
constants/bank/forms.py
|
forms.py
|
py
| 810 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8318915337
|
import mtools
from test.test_compiler import Toto
zones = range(1, 21) + [25]
tags = "SDT"
class Score:
def __init__(self, v, t):
self.value = v
self.tag = t
def __str__(self):
return '(%s, %s)' % (self.value, self.tag)
def init_points():
dps = [Score(2*x, "D"+str(x)) for x in zones]
ps = []
for i in range(3):
for z in zones:
v = (i+1)*z
if (v != 75): #no treble of bull
s = Score(v, tags[i]+str(z))
ps.append(s)
ps.sort(cmp = lambda x,y: cmp(x.value, y.value))
return dps, ps
def encode_way(l):
if len(l) == 3 and l[2] > l[1]:
t = l[2]
l[2] = l[1]
l[1] = t
return ''.join(l)
class SearchContext:
def __init__(self, cnt):
self.count = cnt
self.ways = {}
self.steps = []
def add_step(self, step):
self.steps.append(step)
def remove_step(self):
self.steps.pop()
def account_checkout(self):
ew = encode_way(list(self.steps))
try:
if self.ways[ew]:
return
except KeyError:
self.ways[ew] = True
self.count += 1
def dart(n, i, sc):
assert i == 1 or i == 2 or i == 3
#allocate in reverse order
#last dart must double
if i == 1:
for p in double_points:
n1 = n
n -= p.value
step = p.tag
if (n > 0):
sc.add_step(step)
dart(n, 2, sc)
sc.remove_step()
elif (n == 0):
sc.add_step(step)
sc.account_checkout()
sc.remove_step()
else:
return
n = n1
elif i == 2:
#free allocation
for p in points:
n1 = n
n -= p.value
step = p.tag
if (n > 0):
sc.add_step(step)
dart(n, 3, sc)
sc.remove_step()
elif (n == 0):
sc.add_step(step)
sc.account_checkout()
sc.remove_step()
else:
return
n = n1
else:
#done allocation
for p in points:
n1 = n
n -= p.value
step = p.tag
if n == 0:
sc.add_step(step)
sc.account_checkout()
sc.remove_step()
if n < 0:
return
n = n1
def checkout(n):
sc = SearchContext(0)
dart(n, 1, sc)
return sc.count
def total_checkout(m):
l = [(n, checkout(n)) for n in range(1, m)]
a = 0
for (x, y) in l:
if y > 0:
a += y
return a
class q109:
def test(self):
assert 11 == checkout(6)
assert 5 == checkout(5)
assert 42336 == total_checkout(171)
def solve(self):
return total_checkout(100)
if __name__ == "__main__":
double_points, points = init_points()
mtools.run(q109())
|
chrisliu529/euler_proj
|
src/p109.py
|
p109.py
|
py
| 3,131 |
python
|
en
|
code
| 1 |
github-code
|
6
|
81653711
|
import json
import os
from corai_util.tools.src.function_json import zip_json, unzip_json
def list_of_dicts_to_txt(parameter_options, column_size=15, file_name="config.txt"):
"""
Writes the parameter options in a formatted file, the header of the file contains the parameter names,
each following line contains an entry from the parameter options.
Args:
parameter_options: The list of dictionaries to be written to the file
column_size: The size of the columns in the file
file_name: The path to where the config file should be written
Returns:
None
"""
# get the names of all the parameters
p_names = list(parameter_options[0])
# get the number of values for each parameter
length = len(p_names)
# start with the line name
line_pattern = ""
for i in range(length):
line_pattern += " {:>" + str(column_size) + "}"
line_pattern += "\n"
with open(file_name, "w") as file:
line = line_pattern.format(*p_names)
file.write(line)
for p_option in parameter_options:
values = []
for p_name in p_names:
values.append(p_option[p_name])
line = line_pattern.format(*values)
file.write(line)
def list_of_dicts_to_json(list_of_dicts, file_name="config.json", compress=False):
"""
Writes the list_of_dicts to a json file.
Create a directory if the path yields a non-existent directory.
Args:
list_of_dicts(list<dict>): to be written to the file
file_name (str): The path to where the config file should be written with extension.
compress: Boolean to specify if compression should be applied before writing to the file.
Returns:
None
"""
if compress:
list_of_dicts = zip_json(list_of_dicts)
directory_where_to_save = os.path.dirname(file_name)
if not os.path.exists(directory_where_to_save):
if directory_where_to_save != '':
os.makedirs(directory_where_to_save)
with open(file_name, 'w') as file:
json.dump(list_of_dicts, file)
def json2python(path, compress=False):
with open(path, 'r') as file:
dict = json.load(file)
if compress:
dict = unzip_json(dict)
file.close()
return dict
def factory_fct_linked_path(ROOT_DIR, path_to_folder):
"""
Semantics:
Args:
ROOT_DIR: Path to the root of the project.
path_to_folder: a path written in the format you want because we use the function os.path.join to link it.
Returns:
The linker
Examples:
linked_path = factory_fct_linked_path(ROOT_DIR, "path/a"):
path_save_history = linked_path(['plots', f"best_score_{nb}.pth"])
#and ROOT_DIR should be imported from a script at the root where it is written:
import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
"""
# example:
PATH_TO_ROOT = os.path.join(ROOT_DIR, path_to_folder)
def linked_path(path):
# a list of folders like: ['C','users','name'...]
# when adding a '' at the end like
# path_to_directory = linker_path_to_result_file([path, ''])
# one adds a \ at the end of the path. This is necessary in order to continue writing the path.
return os.path.join(PATH_TO_ROOT, *path)
return linked_path
|
Code-Cornelius/CorAI
|
corai_util/tools/src/function_writer.py
|
function_writer.py
|
py
| 3,449 |
python
|
en
|
code
| 3 |
github-code
|
6
|
42842609252
|
# This is a replacement for test/integration/inflate_tokens.sh.
# The original script had a lot of problems as described in https://app.zenhub.com/workspaces/current-sprint---engineering-615a2e9fe2abd5001befc7f9/issues/sifchain/issues/719.
# See https://www.notion.so/sifchain/TEST-TOKEN-DISTRIBUTION-PROCESS-41ad0861560c4be58918838dbd292497
import json
import re
from typing import Any, Mapping, Iterable, Sequence
from siftool import eth, test_utils, cosmos
from siftool.common import *
log = siftool_logger(__name__)
TokenDict = Mapping[str, Any]
class InflateTokens:
def __init__(self, ctx: test_utils.EnvCtx):
self.ctx = ctx
self.wait_for_account_change_timeout = 120
self.excluded_token_symbols = ["erowan"] # TODO peggy1 only
# Only transfer this tokens in a batch for Peggy1. See #2397. You would need to adjust this if
# test_inflate_tokens_short is passing, but test_inflate_tokens_long is timing out. It only applies to Peggy 1.
# The value of 3 is experimental; if tokens are still not getting across the bridge reliably, reduce the value
# down to 1 (minimum). The lower the value the more time the transfers will take as there will be more
# sequential transfers instead of parallel.
self.max_ethereum_batch_size = 0
def get_whitelisted_tokens(self) -> List[TokenDict]:
whitelist = self.ctx.get_whitelisted_tokens_from_bridge_bank_past_events()
ibc_pattern = re.compile("^ibc/([0-9a-fA-F]{64})$")
result = []
for token_addr, value in whitelist.items():
token_data = self.ctx.get_generic_erc20_token_data(token_addr)
token_symbol = token_data["symbol"]
token = {
"address": token_addr,
"symbol": token_symbol,
"name": token_data["name"],
"decimals": token_data["decimals"],
"is_whitelisted": value,
"sif_denom": self.ctx.eth_symbol_to_sif_symbol(token_symbol),
}
m = ibc_pattern.match(token_symbol)
if m:
token["ibc"] = m[1].lower()
log.debug("Found whitelisted entry: {}".format(repr(token_data)))
assert token_symbol not in result, f"Symbol {token_symbol} is being used by more than one whitelisted token"
result.append(token)
erowan_token = [t for t in result if t["symbol"] == "erowan"]
# These assertions are broken in Tempnet, possibly indicating missing/incomplete chain init, see README.md
# for comparision of steps
assert len(erowan_token) == 1, "erowan is not whitelisted, probably bad/incomplete deployment"
assert erowan_token[0]["is_whitelisted"], "erowan is un-whitelisted"
return result
def wait_for_all(self, pending_txs):
result = []
for txhash in pending_txs:
txrcpt = self.ctx.eth.wait_for_transaction_receipt(txhash)
result.append(txrcpt)
return result
def build_list_of_tokens_to_create(self, existing_tokens: Iterable[TokenDict], requested_tokens: Iterable[TokenDict]
) -> Sequence[Mapping[str, Any]]:
"""
This part deploys SifchainTestoken for every requested token that has not yet been deployed.
The list of requested tokens is (historically) read from assets.json, but in practice it can be
a subset of tokens that are whitelisted in production.
The list of existing tokens is reconstructed from past LogWhiteListUpdate events of the BridgeBank
smart contract (since there is no way to "dump" the contents of a mapping in Solidity).
Deployed tokens are whitelisted with BridgeBank, minted to owner's account and approved to BridgeBank.
This part only touches EVM chain through web3.
"""
# Strictly speaking we could also skip tokens that were un-whitelisted (value == False) since the fact that
# their addresses appear in BridgeBank's past events implies that the corresponding ERC20 smart contracts have
# been deployed, hence there is no need to deploy them.
tokens_to_create = []
for token in requested_tokens:
token_symbol = token["symbol"]
if token_symbol in self.excluded_token_symbols:
assert False, f"Token {token_symbol} cannot be used by this procedure, please remove it from list of requested assets"
existing_token = zero_or_one(find_by_value(existing_tokens, "symbol", token_symbol))
if existing_token is None:
tokens_to_create.append(token)
else:
if not all(existing_token[f] == token[f] for f in ["name", "decimals"]):
assert False, "Existing token's name/decimals does not match requested for token: " \
"requested={}, existing={}".format(repr(token), repr(existing_token))
if existing_token["is_whitelisted"]:
log.info(f"Skipping deployment of smmart contract for token {token_symbol} as it should already exist")
else:
log.warning(f"Skipping token {token_symbol} as it is currently un-whitelisted")
return tokens_to_create
def create_new_tokens(self, tokens_to_create: Iterable[TokenDict]) -> Sequence[TokenDict]:
pending_txs = []
for token in tokens_to_create:
token_name = token["name"]
token_symbol = token["symbol"]
token_decimals = token["decimals"]
log.info(f"Deploying generic ERC20 smart contract for token {token_symbol}...")
txhash = self.ctx.tx_deploy_new_generic_erc20_token(self.ctx.operator, token_name, token_symbol, token_decimals)
pending_txs.append(txhash)
token_contracts = [self.ctx.get_generic_erc20_sc(txrcpt.contractAddress) for txrcpt in self.wait_for_all(pending_txs)]
new_tokens = []
pending_txs = []
for token_to_create, token_sc in [[tokens_to_create[i], c] for i, c in enumerate(token_contracts)]:
token_symbol = token_to_create["symbol"]
token_name = token_to_create["name"]
token_decimals = token_to_create["decimals"]
assert token_sc.functions.totalSupply().call() == 0
assert token_sc.functions.name().call() == token_name
assert token_sc.functions.symbol().call() == token_symbol
assert token_sc.functions.decimals().call() == token_decimals
new_tokens.append({
"address": token_sc.address,
"symbol": token_symbol,
"name": token_name,
"decimals": token_decimals,
"is_whitelisted": True,
"sif_denom": self.ctx.eth_symbol_to_sif_symbol(token_symbol),
})
if not on_peggy2_branch:
txhash = self.ctx.tx_update_bridge_bank_whitelist(token_sc.address, True)
pending_txs.append(txhash)
self.wait_for_all(pending_txs)
return new_tokens
def mint(self, list_of_tokens_addrs, amount_in_tokens, mint_recipient):
pending_txs = []
for token_addr in list_of_tokens_addrs:
token_sc = self.ctx.get_generic_erc20_sc(token_addr)
decimals = token_sc.functions.decimals().call()
amount = amount_in_tokens * 10**decimals
txhash = self.ctx.tx_testing_token_mint(token_sc, self.ctx.operator, amount, mint_recipient)
pending_txs.append(txhash)
self.wait_for_all(pending_txs)
def transfer_from_eth_to_sifnode(self, from_eth_addr, to_sif_addr, tokens_to_transfer, amount_in_tokens, amount_eth_gwei):
sif_balances_before = self.ctx.get_sifchain_balance(to_sif_addr)
sent_amounts = []
pending_txs = []
for token in tokens_to_transfer:
token_addr = token["address"]
decimals = token["decimals"]
token_sc = self.ctx.get_generic_erc20_sc(token_addr)
amount = amount_in_tokens * 10**decimals
pending_txs.extend(self.ctx.tx_approve_and_lock(token_sc, from_eth_addr, to_sif_addr, amount))
sent_amounts.append([amount, token["sif_denom"]])
if amount_eth_gwei > 0:
amount = amount_eth_gwei * eth.GWEI
pending_txs.append(self.ctx.tx_bridge_bank_lock_eth(from_eth_addr, to_sif_addr, amount))
sent_amounts.append([amount, self.ctx.ceth_symbol])
self.wait_for_all(pending_txs)
log.info("{} Ethereum transactions commited: {}".format(len(pending_txs), repr(sent_amounts)))
# Wait for intermediate_sif_account to receive all funds across the bridge
previous_block = self.ctx.eth.w3_conn.eth.block_number
self.ctx.advance_blocks()
log.info("Ethereum blocks advanced by {}".format(self.ctx.eth.w3_conn.eth.block_number - previous_block))
self.ctx.sifnode.wait_for_balance_change(to_sif_addr, sif_balances_before, min_changes=sent_amounts,
polling_time=5, timeout=0, change_timeout=self.wait_for_account_change_timeout)
# Distributes from intermediate_sif_account to each individual account
def distribute_tokens_to_wallets(self, from_sif_account, tokens_to_transfer, amount_in_tokens, target_sif_accounts, amount_eth_gwei):
send_amounts = [[amount_in_tokens * 10**t["decimals"], t["sif_denom"]] for t in tokens_to_transfer]
if amount_eth_gwei > 0:
send_amounts.append([amount_eth_gwei * eth.GWEI, self.ctx.ceth_symbol])
progress_total = len(target_sif_accounts) * len(send_amounts)
progress_current = 0
for sif_acct in target_sif_accounts:
remaining = send_amounts
while remaining:
batch_size = len(remaining)
if (self.ctx.sifnode.max_send_batch_size > 0) and (batch_size > self.ctx.sifnode.max_send_batch_size):
batch_size = self.ctx.sifnode.max_send_batch_size
batch = remaining[:batch_size]
remaining = remaining[batch_size:]
sif_balance_before = self.ctx.get_sifchain_balance(sif_acct)
self.ctx.send_from_sifchain_to_sifchain(from_sif_account, sif_acct, batch)
self.ctx.sifnode.wait_for_balance_change(sif_acct, sif_balance_before, min_changes=batch,
polling_time=2, timeout=0, change_timeout=self.wait_for_account_change_timeout)
progress_current += batch_size
log.debug("Distributing tokens to wallets: {:0.0f}% done".format((progress_current/progress_total) * 100))
def export(self):
return [{
"symbol": token["symbol"],
"name": token["name"],
"decimals": token["decimals"]
} for token in self.get_whitelisted_tokens() if ("ibc" not in token) and (token["symbol"] not in self.excluded_token_symbols)]
def transfer(self, requested_tokens: Sequence[TokenDict], token_amount: int,
target_sif_accounts: Sequence[cosmos.Address], eth_amount_gwei: int
):
"""
It goes like this:
1. Starting with assets.json of your choice, It will first compare the list of tokens to existing whitelist and deploy any new tokens (ones that have not yet been whitelisted)
2. For each token in assets.json It will mint the given amount of all listed tokens to OPERATOR account
3. It will do a single transaction across the bridge to move all tokens from OPERATOR to sif_broker_account
4. It will distribute tokens from sif_broker_account to each of given target accounts
The sif_broker_account and OPERATOR can be any Sifchain and Ethereum accounts, we might want to use something
familiar so that any tokens that would get stuck in the case of interrupting the script can be recovered.
"""
# TODO Add support for "rowan"
n_accounts = len(target_sif_accounts)
total_token_amount = token_amount * n_accounts
total_eth_amount_gwei = eth_amount_gwei * n_accounts
# Calculate how much rowan we need to fund intermediate account with. This is only an estimation at this point.
# We need to take into account that we might need to break transfers in batches. The number of tokens is the
# number of ERC20 tokens plus one for ETH, rounded up. 5 is a safety factor
number_of_batches = 1 if self.ctx.sifnode.max_send_batch_size == 0 else (len(requested_tokens) + 1) // self.ctx.sifnode.max_send_batch_size + 1
fund_rowan = [5 * test_utils.sifnode_funds_for_transfer_peggy1 * n_accounts * number_of_batches, "rowan"]
log.debug("Estimated number of batches needed to transfer tokens from intermediate sif account to target sif wallet: {}".format(number_of_batches))
log.debug("Estimated rowan funding needed for intermediate account: {}".format(fund_rowan))
ether_faucet_account = self.ctx.operator
sif_broker_account = self.ctx.create_sifchain_addr(fund_amounts=[fund_rowan])
eth_broker_account = self.ctx.operator
if (total_eth_amount_gwei > 0) and (ether_faucet_account != eth_broker_account):
self.ctx.eth.send_eth(ether_faucet_account, eth_broker_account, total_eth_amount_gwei)
log.info("Using eth_broker_account {}".format(eth_broker_account))
log.info("Using sif_broker_account {}".format(sif_broker_account))
# Check first that we have the key for ROWAN_SOURCE since the script uses it as an intermediate address
keys = self.ctx.sifnode.keys_list()
rowan_source_key = zero_or_one([k for k in keys if k["address"] == sif_broker_account])
assert rowan_source_key is not None, "Need private key of broker account {} in sifnoded test keyring".format(sif_broker_account)
existing_tokens = self.get_whitelisted_tokens()
tokens_to_create = self.build_list_of_tokens_to_create(existing_tokens, requested_tokens)
log.info("Existing tokens: {}".format(len(existing_tokens)))
log.info("Requested tokens: {}".format(len(requested_tokens)))
log.info("Tokens to create: {}".format(len(tokens_to_create)))
new_tokens = self.create_new_tokens(tokens_to_create)
existing_tokens.extend(new_tokens)
# At this point, all tokens that we want to transfer should exist both on Ethereum blockchain as well as in
# existing_tokens.
tokens_to_transfer = [exactly_one(find_by_value(existing_tokens, "symbol", t["symbol"]))
for t in requested_tokens]
self.mint([t["address"] for t in tokens_to_transfer], total_token_amount, eth_broker_account)
if (self.max_ethereum_batch_size > 0) and (len(tokens_to_transfer) > self.max_ethereum_batch_size):
log.debug(f"Transferring {len(tokens_to_transfer)} tokens from ethereum to sifndde in batches of {self.max_ethereum_batch_size}...")
remaining = tokens_to_transfer
while remaining:
batch = remaining[:self.max_ethereum_batch_size]
remaining = remaining[self.max_ethereum_batch_size:]
self.transfer_from_eth_to_sifnode(eth_broker_account, sif_broker_account, batch, total_token_amount, 0)
log.debug(f"Batch completed, {len(remaining)} tokens remaining")
# Transfer ETH separately
log.debug("Thansfering ETH from ethereum to sifnode...")
self.transfer_from_eth_to_sifnode(eth_broker_account, sif_broker_account, [], 0, total_eth_amount_gwei)
else:
log.debug(f"Transferring {len(tokens_to_transfer)} tokens from ethereum to sifnode in single batch...")
self.transfer_from_eth_to_sifnode(eth_broker_account, sif_broker_account, tokens_to_transfer, total_token_amount, total_eth_amount_gwei)
self.distribute_tokens_to_wallets(sif_broker_account, tokens_to_transfer, token_amount, target_sif_accounts, eth_amount_gwei)
log.info("Done.")
log.info("To see newly minted tokens in UI, you need to edit 'scripts/ibc/tokenregistry/generate-erc20-jsons.sh' "
"and add any tokens that are not already there. Then cd into the directory and run './generate-erc20-jsons.sh devnet' "\
"and commit the results in the sifchain-devnet-1 folder. @tim will pick up the PR and register it on "
"devnet by running './register-one.sh' with the registry key. In the future this might be open for anybody "
"to do on their own for devnet and testnet.")
def transfer_eth(self, from_eth_addr: eth.Address, amount_gwei: int, target_sif_accounts: Iterable[cosmos.Address]):
pending_txs = []
for sif_acct in target_sif_accounts:
txrcpt = self.ctx.tx_bridge_bank_lock_eth(from_eth_addr, sif_acct, amount_gwei * eth.GWEI)
pending_txs.append(txrcpt)
self.wait_for_all(pending_txs)
def run(*args):
# This script should be run with SIFTOOL_ENV_FILE set to a file containing definitions for OPERATOR_ADDRESS,
# ROWAN_SOURCE eth. Depending on if you're running it on Peggy1 or Peggy2 the format might be different.
# See get_env_ctx() for details.
assert not on_peggy2_branch, "Not supported yet on peggy2.0 branch"
ctx = test_utils.get_env_ctx()
script = InflateTokens(ctx)
script.wait_for_account_change_timeout = 1800 # For Ropsten we need to wait for 50 blocks i.e. ~20 min = 1200 s
cmd = args[0]
args = args[1:]
if cmd == "export":
# Usage: inflate_tokens.py export assets.json
ctx.cmd.write_text_file(args[0], json.dumps(script.export(), indent=4))
elif cmd == "transfer":
# Usage: inflate_tokens.py transfer assets.json token_amount accounts.json amount_eth_gwei
assets_json_file, token_amount, accounts_json_file, amount_eth_gwei = args
tokens = json.loads(ctx.cmd.read_text_file(assets_json_file))
accounts = json.loads(ctx.cmd.read_text_file(accounts_json_file))
script.transfer(tokens, int(token_amount), accounts, int(amount_eth_gwei))
else:
raise Exception("Invalid usage")
if __name__ == "__main__":
import sys
basic_logging_setup()
run(*sys.argv[1:])
|
Sifchain/sifnode
|
test/integration/framework/src/siftool/inflate_tokens.py
|
inflate_tokens.py
|
py
| 18,307 |
python
|
en
|
code
| 106 |
github-code
|
6
|
3676061367
|
a = input()
a = a.split()
k = int(a[0])
m = int(a[1])
n = 1
count = 1
while not n % m ==k:
count = count + 1
n = n * 10 + 1
print(count)
|
yingziyu-llt/OI
|
c/Luogu/水题/U38228 签到题.py
|
U38228 签到题.py
|
py
| 156 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21888795134
|
import time
from pyvisauto import Region
import api.api_core as api
import fleet.fleet_core as flt
import config.config_core as cfg
import nav.nav as nav
import stats.stats_core as sts
import util.kca as kca_u
from kca_enums.kcsapi_paths import KCSAPIEnum
from util.logger import Log
class FactoryCore(object):
enabled = False
disable_timer = 0
order_oil_region = {1 : "order_oil_region_1",
10 : "order_oil_region_10",
100 : "order_oil_region_100"}
order_ammo_region = {1 : "order_ammo_region_1",
10 : "order_ammo_region_10",
100 : "order_ammo_region_100"}
order_steel_region = {1 : "order_steel_region_1",
10 : "order_steel_region_10",
100 : "order_steel_region_100"}
order_bauxite_region = {1 : "order_bauxite_region_1",
10 : "order_bauxite_region_10",
100 : "order_bauxite_region_100"}
order_resource_region = [order_oil_region,
order_ammo_region,
order_steel_region,
order_bauxite_region]
def __init__(self):
self.enabled = cfg.config.factory.enabled
pass
def set_timer(self):
self.disable_timer = time.time()
def disable_time_up(self):
return time.time() > self.disable_timer + (15 * 60)
def develop_logic(self, count):
self.goto()
oil, ammo, steel, bauxite = self.read_config_develop()
return self.develop(oil, ammo, steel, bauxite, count)
def build_logic(self, count):
self.goto()
oil, ammo, steel, bauxite = self.read_config_build()
return self.build(oil, ammo, steel, bauxite, count)
def goto(self):
nav.navigate.to('development')
def develop(self, oil, ammo, steel, bauxite, count):
"""Place the develop order"""
"""Assume currently at factory page when called"""
while count > 0:
"""click develop"""
retry = 0
while not kca_u.kca.exists(
'lower', "factory|develop_menu.png") and retry < 5:
kca_u.kca.r["develop_region"].click()
kca_u.kca.sleep(1)
retry += 1
if retry == 5:
Log.log_error("Cannot open develop menu, probably because the port is full")
Log.log_error("Disable factory module")
self.enabled = False
return False
resource_list = [oil, ammo, steel, bauxite]
for i in range(4):
"""The init 10 point of resource on the order"""
resource = resource_list[i]
resource -= 10
while resource >= 100:
kca_u.kca.r[self.order_resource_region[i][100]].click()
kca_u.kca.sleep
resource -= 100
while resource >= 10:
kca_u.kca.r[self.order_resource_region[i][10]].click()
kca_u.kca.sleep
resource -= 10
while resource >= 1:
kca_u.kca.r[self.order_resource_region[i][1]].click()
kca_u.kca.sleep
resource -= 1
if count >= 3:
"""click triple develop"""
kca_u.kca.r["use_item_region"].click()
kca_u.kca.sleep
count -= 3
else:
count -= 1
kca_u.kca.r["order_confirm_region"].click()
kca_u.kca.wait('lower_right_corner', 'global|next_alt.png', 20)
while kca_u.kca.exists('lower_right_corner', 'global|next_alt.png'):
kca_u.kca.sleep()
kca_u.kca.r['shipgirl'].click()
kca_u.kca.r['top'].hover()
kca_u.kca.sleep()
return True
def build(self, oil, ammo, steel, bauxite, count):
"""Place the build order"""
"""Assume currently at factory page when called"""
while count > 0:
kca_u.kca.sleep(1)
"""return false if both slots are occupied"""
if kca_u.kca.exists("build_slot_1_stat_region",
"factory|build_progressing.png")\
and \
kca_u.kca.exists("build_slot_2_stat_region",
"factory|build_progressing.png"):
return False
build_slot_stat = {1:"build_slot_1_stat_region",
2:"build_slot_2_stat_region"}
build_slot = {1:"build_slot_1_region",
2:"build_slot_2_region"}
"""receive if a build is done"""
for i in range(1,3):
if kca_u.kca.exists(build_slot_stat[i],
"factory|build_finish.png"):
kca_u.kca.r[build_slot[i]].click()
kca_u.kca.sleep(1)
retry = 0
while not kca_u.kca.exists(
build_slot_stat[i], "factory|build_idle.png")\
and retry < 10:
kca_u.kca.r[build_slot_stat[i]].click()
kca_u.kca.sleep(3)
retry += 1
if retry == 10:
Log.log_error("Cannot receive ship, probably because the port is full")
Log.log_error("Disable factory module")
self.enabled = False
return False
while kca_u.kca.exists('lower_right_corner', 'global|next_alt.png'):
kca_u.kca.sleep()
kca_u.kca.r['shipgirl'].click()
kca_u.kca.r['top'].hover()
kca_u.kca.sleep()
kca_u.kca.wait('lower', 'factory|factory_init.png', 20)
"""place the order on a empty slot"""
for j in range(1,3):
if kca_u.kca.exists(build_slot_stat[j],
"factory|build_idle.png"):
"""click build slot"""
retry = 0
while not kca_u.kca.exists(
'lower', "factory|develop_menu.png") and retry < 5:
kca_u.kca.r[build_slot[j]].click()
kca_u.kca.sleep(1)
retry += 1
if retry == 5:
Log.log_error("Cannot open develop menu, probably because the port is full")
Log.log_error("Disable factory module")
self.enabled = False
return False
resource_list = [oil, ammo, steel, bauxite]
for i in range(4):
"""The init 30 point of resource on the order"""
resource = resource_list[i]
resource -= 30
while resource >= 100:
kca_u.kca.r[self.order_resource_region[i][100]].click()
kca_u.kca.sleep
resource -= 100
while resource >= 10:
kca_u.kca.r[self.order_resource_region[i][10]].click()
kca_u.kca.sleep
resource -= 10
while resource >= 1:
kca_u.kca.r[self.order_resource_region[i][1]].click()
kca_u.kca.sleep
resource -= 1
kca_u.kca.r["order_confirm_region"].click()
kca_u.kca.wait('lower', 'factory|factory_init.png', 20)
count -= 1
if count <= 0:
break
"""all requested build seccessfully done"""
return True
def read_config_develop(self):
oil = cfg.config.factory.develop["recipe"][0]
ammo = cfg.config.factory.develop["recipe"][1]
steel = cfg.config.factory.develop["recipe"][2]
bauxite = cfg.config.factory.develop["recipe"][3]
return oil, ammo, steel, bauxite
def read_config_build(self):
oil = cfg.config.factory.build["recipe"][0]
ammo = cfg.config.factory.build["recipe"][1]
steel = cfg.config.factory.build["recipe"][2]
bauxite = cfg.config.factory.build["recipe"][3]
return oil, ammo, steel, bauxite
factory = FactoryCore()
|
XVs32/kcauto_custom
|
kcauto/factory/factory_core.py
|
factory_core.py
|
py
| 8,911 |
python
|
en
|
code
| 5 |
github-code
|
6
|
4992730292
|
import torch
import numpy as np
import math
import torch.nn.functional as F
import re
import nltk, json
from fairseq import pybleu, options, progress_bar, tasks, tokenizer, utils, strategies
from fairseq.meters import TimeMeter
from fairseq.strategies.strategy_utils import duplicate_encoder_out
def getSubstitutePairs(pred_lst, input_lst):
def LCS(A,B):
A.append('0')
B.append('0')
n = len(A)
m = len(B)
A.insert(0,'0')
B.insert(0,'0')
# 二维表L存放公共子序列的长度
L = [ ([0]*(m+1)) for i in range(n+1) ]
# 二维表C存放公共子序列的长度步进
C = [ ([0]*(m+1)) for i in range(n+1) ]
for x in range (0,n+1):
for y in range (0,m+1):
if (x==0 or y==0):
L[x][y] = 0
elif A[x] == B[y]:
L[x][y] = ( L[x-1][y-1] + 1 )
C[x][y] = 0
elif L[x-1][y] >= L[x][y-1]:
L[x][y] = L[x-1][y]
C[x][y] = 1
else:
L[x][y] = L[x][y-1]
C[x][y] = -1
return L[n][m],C,n,m
def printLCS(C,A,x,y):
if ( x == 0 or y == 0):
return 0
if C[x][y] == 0:
printLCS(C,A,x-1,y-1)
lcsres.append(A[x])
elif C[x][y] == 1:
printLCS(C,A,x-1,y)
else:
printLCS(C,A,x,y-1)
length,C,x,y = LCS(pred_lst, input_lst)
lcsres = []
printLCS(C,pred_lst,x,y)
ret = []
i, j, k = 1, 1, 0
word2change, substitute = [], []
while k < len(lcsres):
if pred_lst[i] == lcsres[k] and input_lst[j] == lcsres[k]:
i += 1; j += 1; k += 1
word2change, substitute = [], []
else:
while pred_lst[i] != lcsres[k]:
substitute.append(re.sub('\.|,', '', pred_lst[i]))
i += 1
while input_lst[j] != lcsres[k]:
word2change.append(re.sub('\.|,', '', input_lst[j]))
j += 1
if len(word2change) != len(substitute):
ret.append((' '.join(word2change), ' '.join(substitute), i-len(word2change)-1, len(word2change)))
else:
idx = 0
for reti in range(len(word2change)):
ret.append((word2change[reti], substitute[reti], i-len(word2change)+idx-1, 1))
idx += 1
res = []
for k, v, idx, length in ret:
if not bool(re.search(r'\d', k)) and re.sub(' ', '', k) != re.sub(' ', '', v):
res.append((k, v, idx, idx+length))
return res
def main(args):
assert args.path is not None, '--path required for generation!'
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert args.replace_unk is None or args.raw_text, \
'--replace-unk requires a raw text dataset (--raw-text)'
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
print(args)
use_cuda = torch.cuda.is_available() and not args.cpu
torch.manual_seed(args.seed)
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
print('| {} {} {} examples'.format(args.data, args.gen_subset, len(task.dataset(args.gen_subset))))
# Set dictionaries
#src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
dict = tgt_dict
# Load decoding strategy
strategy = strategies.setup_strategy(args)
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
models, _ = utils.load_ensemble_for_inference(args.path.split(':'), task, model_arg_overrides=eval(args.model_overrides))
models = [model.cuda() for model in models]
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
num_shards=args.num_shards,
shard_id=args.shard_id,
).next_epoch_itr(shuffle=False)
results = []
scorer = pybleu.PyBleuScorer()
num_sentences = 0
has_target = True
timer = TimeMeter()
with open('test.en-de.en', 'r') as f:
inputs = f.readlines()
res_dict = {}
with progress_bar.build_progress_bar(args, itr) as t:
translations = generate_batched_itr(t, strategy, models, tgt_dict, length_beam_size=args.length_beam, use_gold_target_len=args.gold_target_len)
for sample_id, src_tokens, target_tokens, hypos in translations:
has_target = target_tokens is not None
target_tokens = target_tokens.int().cpu() if has_target else None
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id)
target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id)
else:
src_str = dict.string(src_tokens, args.remove_bpe)
if args.dehyphenate:
src_str = dehyphenate(src_str)
if has_target:
target_str = dict.string(target_tokens, args.remove_bpe, escape_unk=True)
if args.dehyphenate:
target_str = dehyphenate(target_str)
if not args.quiet:
print('S-{}\t{}'.format(sample_id, inputs[sample_id].strip()))
if has_target:
print('T-{}\t{}'.format(sample_id, target_str))
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypos.int().cpu(),
src_str=src_str,
alignment= None,
align_dict=align_dict,
tgt_dict=dict,
remove_bpe=args.remove_bpe,
)
if args.dehyphenate:
hypo_str = dehyphenate(hypo_str)
if not args.quiet:
print('H-{}\t{}'.format(sample_id, hypo_str))
if args.print_alignment:
print('A-{}\t{}'.format(
sample_id,
' '.join(map(lambda x: str(utils.item(x)), alignment))
))
res = getSubstitutePairs(nltk.word_tokenize(hypo_str), nltk.word_tokenize(inputs[sample_id].strip()))
# Score only the top hypothesis
if has_target:
if align_dict is not None or args.remove_bpe is not None:
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(target_str, add_if_not_exist=True)
results.append((target_str, hypo_str))
res_dict[sample_id.cpu().tolist()] = {'input_words':nltk.word_tokenize(src_str), "pred_words":nltk.word_tokenize(hypo_str), "substitute_topk":[[[k,lpos,rpos],[v]] for k,v,lpos,rpos in res]}
num_sentences += 1
if has_target:
print('Time = {}'.format(timer.elapsed_time))
ref, out = zip(*results)
print('| Generate {} with beam={}: BLEU4 = {:2.2f}, '.format(args.gen_subset, args.beam, scorer.score(ref, out)))
finalres = {}
with open('test.en-de.idx', 'r') as f:
idxs = f.readlines()
for i in range(max(res_dict.keys())+1):
finalres[idxs[i]] = res_dict[i]
with open(args.path+'res.json', 'w') as f:
json.dump(finalres, f, indent=4)
def dehyphenate(sent):
return re.sub(r'(\S)-(\S)', r'\1 ##AT##-##AT## \2', sent).replace('##AT##', '@')
def generate_batched_itr(data_itr, strategy, models, tgt_dict, length_beam_size=None, use_gold_target_len=False, cuda=True):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
"""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if 'net_input' not in s:
continue
input = s['net_input']
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items()
if k != 'prev_output_tokens'
}
with torch.no_grad():
gold_target_len = s['target'].ne(tgt_dict.pad()).sum(-1) if use_gold_target_len else None
hypos = generate(strategy, encoder_input, models, tgt_dict, length_beam_size, gold_target_len)
for batch in range(hypos.size(0)):
src = utils.strip_pad(input['src_tokens'][batch].data, tgt_dict.pad())
ref = utils.strip_pad(s['target'][batch].data, tgt_dict.pad()) if s['target'] is not None else None
hypo = utils.strip_pad(hypos[batch], tgt_dict.pad())
example_id = s['id'][batch].data
yield example_id, src, ref, hypo
def generate(strategy, encoder_input, models, tgt_dict, length_beam_size, gold_target_len):
assert len(models) == 1
model = models[0]
src_tokens = encoder_input['src_tokens']
src_tokens = src_tokens.new(src_tokens.tolist())
bsz = src_tokens.size(0)
encoder_out = model.encoder(**encoder_input)
beam = predict_length_beam(gold_target_len, encoder_out['predicted_lengths'], length_beam_size)
max_len = beam.max().item()
length_mask = torch.triu(src_tokens.new(max_len, max_len).fill_(1).long(), 1)
length_mask = torch.stack([length_mask[beam[batch] - 1] for batch in range(bsz)], dim=0)
tgt_tokens = src_tokens.new(bsz, length_beam_size, max_len).fill_(tgt_dict.mask())
tgt_tokens = (1 - length_mask) * tgt_tokens + length_mask * tgt_dict.pad()
tgt_tokens = tgt_tokens.view(bsz * length_beam_size, max_len)
duplicate_encoder_out(encoder_out, bsz, length_beam_size)
hypotheses, lprobs = strategy.generate(model, encoder_out, tgt_tokens, tgt_dict)
hypotheses = hypotheses.view(bsz, length_beam_size, max_len)
lprobs = lprobs.view(bsz, length_beam_size)
tgt_lengths = (1 - length_mask).sum(-1)
avg_log_prob = lprobs / tgt_lengths.float()
best_lengths = avg_log_prob.max(-1)[1]
hypotheses = torch.stack([hypotheses[b, l, :] for b, l in enumerate(best_lengths)], dim=0)
return hypotheses
def predict_length_beam(gold_target_len, predicted_lengths, length_beam_size):
if gold_target_len is not None:
beam_starts = gold_target_len - (length_beam_size - 1) // 2
beam_ends = gold_target_len + length_beam_size // 2 + 1
beam = torch.stack([torch.arange(beam_starts[batch], beam_ends[batch], device=beam_starts.device) for batch in range(gold_target_len.size(0))], dim=0)
else:
beam = predicted_lengths.topk(length_beam_size, dim=1)[1]
beam[beam < 2] = 2
return beam
if __name__ == '__main__':
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser)
main(args)
|
microsoft/SmartWordSuggestions
|
code/baselines/CMLM/updates/generate_cmlm.py
|
generate_cmlm.py
|
py
| 12,369 |
python
|
en
|
code
| 18 |
github-code
|
6
|
22853474916
|
class Solution(object):
def findWords(self, board, words):
"""
:type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
trie = {}
res = set()
for word in words:
self.insert(trie, word)
for i in range(len(board)):
for j in range(len(board[0])):
self.check(board, i, j, trie, res, "")
return list(res)
def insert(self, root, word):
for char in word:
if char not in root:
root[char] = {}
root = root[char]
root[None] = None
def check(self, board, i, j, trie, res, pre):
if i < len(board) and i >= 0 and j < len(board[0]) and j >= 0 and board[i][j] != '*' and board[i][j] in trie:
#visit.add((i, j))
pre += board[i][j]
board[i][j] = '*'
subtrie = trie[pre[-1]]
if None in subtrie:
res.add(pre)
self.check(board, i, j + 1, subtrie, res, pre)
self.check(board, i, j - 1, subtrie, res, pre)
self.check(board, i + 1, j, subtrie, res, pre)
self.check(board, i - 1, j, subtrie, res, pre)
#visit.remove((i, j))
board[i][j] = pre[-1]
|
yuweishi/LeetCode
|
Algorithms/Word Search II/solution.py
|
solution.py
|
py
| 1,301 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70647233789
|
"""
This is the simplest example of training NER (named entity recognizer).
NER is responsible for recognizing "Apple" as a 'company', "George Bush" as a 'person', and so on.
THE GOAL for training model is to recognize in text 'iPhone' as a 'GADGET' (for example), and so on.
How do we learn the model to recognizing specific words in specific context?
Through showing to model a few hundreds of examples. Where we showing exactly word position and we label what it is.
For example in text 'Who is Shaka Khan?' we can label like this: {"entities": [(7, 17, "PERSON")]}
or
'I like London and Berlin.' And here: {"entities": [(7, 13, "LOC"), (18, 24, "LOC")]})
we are using tuple with text and dict inside.
"""
import plac
import random
import warnings
from pathlib import Path
import spacy
from spacy.util import minibatch, compounding
# training data
TRAIN_DATA = [
("Who is Shaka Khan?", {"entities": [(7, 17, "PERSON")]}),
("I like London and Berlin.", {"entities": [(7, 13, "LOC"), (18, 24, "LOC")]}),
]
# Here we can inject model name, output_dir and n_iter for main function, but for now we are working on empty model!
#@plac.annotations(
# model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
# output_dir=("Optional output directory", "option", "o", Path),
# n_iter=("Number of training iterations", "option", "n", int),
#)
def main(model=None, output_dir=None, n_iter=100):
"""Load the model, set up the pipeline and train the entity recognizer."""
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# create the built-in pipeline components and add them to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if "ner" not in nlp.pipe_names:
ner = nlp.create_pipe("ner")
nlp.add_pipe(ner, last=True)
# otherwise, get it so we can add labels
else:
ner = nlp.get_pipe("ner")
# add labels
for _, annotations in TRAIN_DATA:
for ent in annotations.get("entities"):
ner.add_label(ent[2])
# get names of other pipes to disable them during training
pipe_exceptions = ["ner", "trf_wordpiecer", "trf_tok2vec"]
other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]
# only train NER
with nlp.disable_pipes(*other_pipes), warnings.catch_warnings():
# show warnings for misaligned entity spans once
warnings.filterwarnings("once", category=UserWarning, module='spacy')
# reset and initialize the weights randomly – but only if we're
# training a new model
if model is None:
nlp.begin_training()
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(
texts, # batch of texts
annotations, # batch of annotations
drop=0.5, # dropout - make it harder to memorise data
losses=losses,
)
print("Losses", losses)
# test the trained model
for text, _ in TRAIN_DATA:
doc = nlp(text)
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
for text, _ in TRAIN_DATA:
doc = nlp2(text)
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
if __name__ == "__main__":
plac.call(main)
# Expected output:
# Entities [('Shaka Khan', 'PERSON')]
# Tokens [('Who', '', 2), ('is', '', 2), ('Shaka', 'PERSON', 3),
# ('Khan', 'PERSON', 1), ('?', '', 2)]
# Entities [('London', 'LOC'), ('Berlin', 'LOC')]
# Tokens [('I', '', 2), ('like', '', 2), ('London', 'LOC', 3),
# ('and', '', 2), ('Berlin', 'LOC', 3), ('.', '', 2)]
|
koualsky/dev-learning
|
spacy/train_model/full_example.py
|
full_example.py
|
py
| 4,664 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36156665423
|
import sys
N = 6
INFINITY = sys.maxsize
Kilometres = [[INFINITY,1,3,2,1,2],
[1,INFINITY,3,1,2,3],
[3,3,INFINITY,5,3,2],
[2,1,5,INFINITY,2,3],
[1,2,3,2,INFINITY,1],
[2,3,2,3,1,INFINITY]]
impaire = [0] * N
sousGraph = [[0] * N for i in range(N)]
copy_couplage = [0] * N # enregistrer le couplage
pi = 100000 # poids minimal
poids = 0 # le poids minimal de couplage maximal
Arret = [[0] * N for i in range(N)] # enregistrer les arrets qui sont lies
etiquer = [0] * N # Si le sommet a ete etique
copy_etique = [0] * N # enregistrer l'etique
nouveau_couplage = [-1] * N # Si le sommet est dans le couplage
def copyEtique():
global copy_etique, etiquer
copy_etique = etiquer.copy()
return
def copytoEtique():
global etiquer, copy_etique
etiquer = copy_etique.copy()
return
def Arret_lier():
global Arret, sousGraph
for i in range(N):
for j in range(N):
Arret[i][j] = -1
for i in range(N):
k = 0
for j in range(N):
if sousGraph[i][j] != INFINITY:
Arret[i][k] = j
k += 1
return
def refreshEtique():
global nouveau_couplage, etiquer
for i in range(N):
if nouveau_couplage[i] != -1:
etiquer[i] = 1
else:
etiquer[i] = 0
return
def init_nouvcouplage():
global nouveau_couplage
nouveau_couplage = [-1] * N
return
def init_copyCouplage():
global copy_couplage
copy_couplage = [-1] * N
return
def copyCouplage():
global nouveau_couplage, copy_couplage
copy_couplage = nouveau_couplage.copy()
return
def copyNouveauCouplage():
global nouveau_couplage, copy_couplage
nouveau_couplage = copy_couplage.copy()
return
def DFS(depart, start):
global nouveau_couplage, etiquer, Arret
etiquer[depart] = 1
for i in range(start, N):
if Arret[depart][i] != -1:
arrive = Arret[depart][i]
if etiquer[arrive] == 0:
etiquer[arrive] = 1
if nouveau_couplage[arrive] == -1 or DFS(nouveau_couplage[arrive], 0):
nouveau_couplage[arrive] = depart
nouveau_couplage[depart] = arrive
return 1
return 0
def sum_couplage(): # calculate the weight of the matching
sum = 0
for i in range(N):
if nouveau_couplage[i] != -1:
sum += sousGraph[i][nouveau_couplage[i]]
return (sum // 2)
def init_etiquer(): # initialization of the label
for i in range(N):
etiquer[i] = 0
def print_couplage(): # for verification
for i in range(N):
if nouveau_couplage[i] != -1:
print("{", i, ",", nouveau_couplage[i], "}")
def print_etique(): # for verification
for i in range(N):
if etiquer[i] != 0:
print(i, "is etique")
def couplage_poids():
global poids
global pi
global nouveau_couplage
global couplage
global etiquer
flag = 0
# initialization
Arret_lier()
init_copyCouplage()
init_nouvcouplage()
# find all augmenting paths starting from each vertex
for k in range(N):
for i in range(N):
if etiquer[i] == 0:
for j in range(N):
init_etiquer()
if DFS(i, j) == 1: # if augmenting path exists
poids = sum_couplage()
if poids < pi:
pi = poids # update minimal weight
copyNouveauCouplage() # important! Matching must be saved, otherwise DFS will have a problem
refreshEtique()
if pi == 10000: # if no augmenting path, terminate
break
for i in range(N): # search for minimal weight to update matching
if etiquer[i] == 0:
init_etiquer()
for j in range(N):
if DFS(i, j) == 1:
poids = sum_couplage()
if poids == pi: # matching found!
pi = 10000
flag = 1
copyCouplage() # new matching
copyEtique() # new label
refreshEtique()
break
else:
copyNouveauCouplage()
init_etiquer()
if flag != 0:
flag = 0
break
return poids
|
CSolatges/La-tournee-du-facteur
|
Python/HungroisC.py
|
HungroisC.py
|
py
| 4,604 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9437207469
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import glob
file_path_th = '.\data_processing\ex\*through_E*.csv'
file_path_cr = '.\data_processing\ex\*cross_E*.csv'
csv_1 = []
csv_2 = []
x_axis = []
th_sum = []
cr_sum = []
# through
for filename_1 in glob.glob(file_path_th, recursive=True):
csv_1.append(filename_1)
for file_1 in csv_1:
filename_1 = file_1.split('\\')[-1][:-4]
data_1 = pd.read_csv('.\data_processing\ex\%s.csv'%filename_1, names=['first'])
final_sum_1 = 0
integer_1 = list(range(0,list(data_1.shape)[0]))
data_1.index = integer_1
data_1.index.name = 'order'
for i in range(0,list(data_1.shape)[0]):
find_value_1 = data_1['first'][i]
find_value_1 = find_value_1.split()
key_1 = list(map(float, find_value_1))
sum_1 = 0
for x in range(0,len(key_1)):
sum_1 +=float(key_1[x])
#print('%d번째 합 = '%i,sum_1)
final_sum_1 = sum_1 + final_sum_1
#x_axis.append(int(filename_1[-3:]))
th_sum.append(final_sum_1)
#print('%s의 값 ='%filename,final_sum)
#print(x_axis)
#print(th_sum)
# cross
for filename_2 in glob.glob(file_path_cr, recursive=True):
csv_2.append(filename_2)
for file_2 in csv_2:
filename_2 = file_2.split('\\')[-1][:-4]
data_2 = pd.read_csv('.\data_processing\ex\%s.csv'%filename_2,names=['first'])
final_sum_2 = 0
integer_2 = list(range(0,list(data_2.shape)[0]))
data_2.index = integer_2
data_2.index.name = 'order'
for i in range(1,list(data_2.shape)[0]):
find_value_2 = data_2['first'][i]
find_value_2 = find_value_2.split()
key_2 = list(map(float, find_value_2))
#print(find_value_2)
sum_2 = 0
for x in range(0,len(key_2)):
sum_2 +=float(key_2[x])
#print('%d번째 합 = '%i,sum_2)
final_sum_2 = sum_2 + final_sum_2
cr_sum.append(final_sum_2)
#print('%s의 값 ='%filename_2,final_sum_2)
#print(cr_sum)
# calculation
th_sum_square_list = []
cr_sum_square_list = []
th_cr_sum_list = []
r_value_list = []
k_value_list = []
for j in range(0,len(th_sum)):
th_sum_square = th_sum[j]*th_sum[j]
cr_sum_square = cr_sum[j]*cr_sum[j]
th_sum_square_list.append(th_sum_square)
cr_sum_square_list.append(cr_sum_square)
th_cr_sum = th_sum_square + cr_sum_square
th_cr_sum_list.append(th_cr_sum)
r_value = np.sqrt(th_sum_square / th_cr_sum)
k_value = np.sqrt(cr_sum_square / th_cr_sum)
r_value_list.append(r_value)
k_value_list.append(k_value)
def run(x, y, z):
plt.scatter(x, y)
plt.scatter(x, z)
plt.plot(x,y)
plt.plot(x,z)
plt.title('Power by r,k')
plt.xlabel('distance [nm]',labelpad=10)
plt.ylabel('Power [W]',labelpad=10)
plt.legend()
plt.grid(True)
plt.show()
|
jordan-kim/r_k_graph_fitting
|
src/fitting_r_k.py
|
fitting_r_k.py
|
py
| 2,885 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25231410593
|
#! /usr/bin/env python
# encoding: utf-8
# vim: ai ts=4 sts=4 et sw=4
##
##
## @author Nadia
## [email protected]/[email protected]
##
import MySQLdb
from datetime import datetime
from mako.template import Template
from creche import settings
class SysEventService:
"""Class that will be delegated with creating events in the database, after these events have been detected in other places in the system"""
def __init__(self):
self.connection = MySQLdb.connect (host=settings.DATABASES['default']['HOST'],
user=settings.DATABASES['default']['USER'],
passwd=settings.DATABASES['default']['PASSWORD'],
db=settings.DATABASES['default']['NAME'])
self.connection.set_character_set('utf8')#important because the data we are dealing with is unicode
def createSysEvent(self, params, expressions=None):
"""Utility method that will be utilized for making an entry about an event"""
cursor = self.connection.cursor()
params['date_generated'] = datetime.now()
params['last_updated'] = datetime.now()
params['date_created'] = datetime.now()
params['processed'] = False
#in the params dict we expect the name to have been specified
fields = ', '.join(params.keys())
values = ', '.join(['%%(%s)s' % x for x in params])
query = 'INSERT INTO event (%s) VALUES (%s)' % (fields, values)
cursor.execute(query, params)
eventId = cursor.lastrowid
result = {}
result['eventId'] = eventId
cursor.close()
if expressions:
expressions['event_type_id'] = params['event_type_id']
result = self.__scheduleGenericNotification(eventId, expressions)
self.connection.commit()
return result
def scheduleEmailRecipient(self, params, language):
"""Utility method that schedules email recipients"""
cursor = self.connection.cursor()
event = None
subject_suffix = ''
#for i in range(1, 10000):
#we must get the from email and the proposed subject
cursor.execute("""SELECT et.from_email, et.email_subject
FROM event_type et
WHERE et.id = %d""" % (params['event_type_id']))
event_type = cursor.fetchone()
from_email = event_type[0]
subject = event_type[1]
if params['event_type_id'] != settings.APPLICATION_SETTINGS['TEST_EMAIL']:
cursor.execute("""SELECT e.id
FROM event e
WHERE e.entity_reference_id = %d AND e.event_type_id = %d""" % (params['entity_reference_id'], params['event_type_id']))
event = cursor.fetchone()
if event:
eventId = event[0]
else:
params['date_generated'] = datetime.now()
params['last_updated'] = datetime.now()
params['date_created'] = datetime.now()
params['processed'] = True#it ensure that we know that this is an email not an event
#in the params dict we expect the name to have been specified
fields = ', '.join(params.keys())
values = ', '.join(['%%(%s)s' % x for x in params])
query = 'INSERT INTO event (%s) VALUES (%s)' % (fields, values)
cursor.execute(query, params)
cursor.execute("""SELECT from_email
FROM event_type
WHERE id = %d""" % (params['event_type_id']))
event_type = cursor.fetchone()
eventId = cursor.lastrowid
from_email = event_type[0]
self.connection.commit()#lets commit this asap so that any other preparation request doesn't start queueing guys again.
if params['event_type_id'] == settings.APPLICATION_SETTINGS['TEST_EMAIL']:
#lets add the No. of tests to the subject of the test email
cursor.execute("""SELECT COUNT(e.id)
FROM event_type et INNER JOIN event e ON e.event_type_id = et.id
WHERE e.event_type_id = %d AND e.entity_reference_id = %d""" % (params['event_type_id'], params['entity_reference_id']))
test_event = cursor.fetchone()
subject_suffix = ' [Test No. ' + str(test_event[0]) + ']'
#schedule the emails
expressions = {}
expressions['from_email'] = from_email
expressions['message_body'] = ' '
expressions['subject'] = subject + ' ' + str(params['entity_reference_id']) + subject_suffix
expressions['scheduled_for_relay'] = False
expressions['event_id'] = eventId
expressions['last_updated'] = datetime.now()
expressions['date_created'] = datetime.now()
cursor.execute("""
SELECT wu.mail
FROM web_users wu
INNER JOIN system_user_has_event_type suhet ON suhet.system_user_id = wu.uid
INNER JOIN event_type et ON suhet.event_type_id = et.id
WHERE et.id = %d""" % params['event_type_id'])
subscribers = cursor.fetchall()
for user in subscribers:
#check to see if we already have queued this recipient/subscriber
em = user[0].replace("'", "\\'")
cursor.execute("SELECT id FROM email_schedule WHERE event_id = %d AND to_email = '%s'" % (eventId, em))
recipient = cursor.fetchone()
if not recipient:
expressions['to_email'] = user[0]
fields = ', '.join(expressions.keys())
values = ', '.join(['%%(%s)s' % x for x in expressions])
query = 'INSERT INTO email_schedule (%s) VALUES (%s)' % (fields, values)
cursor.execute(query, expressions)
self.connection.commit()#lets commit this asap so that any other preparation request doesn't start queueing guys again.
#get the total number of subscribers
cursor.execute("SELECT count(*) FROM email_schedule WHERE event_id = %d " % (eventId))
subscriber_count = cursor.fetchone()
#get the total number of queued subscribers
cursor.execute("SELECT count(*) FROM email_schedule WHERE event_id = %d AND delivery_date IS NULL" % (eventId))
queued_subscribers = cursor.fetchone()
cursor.close()
self.connection.commit()
self.connection.close()
return [str(subscriber_count[0]), str(queued_subscribers[0]), eventId]
def __scheduleGenericNotification(self, eventId, expressions):
"""Schedule email for any user who would wish to be notified of a notification."""
#first pick the template path that we are to use for building the email body
cursor = self.connection.cursor()
cursor.execute("SELECT template_path, from_email FROM event_type et INNER JOIN event e ON e.event_type_id = et.id WHERE e.id = %d" % eventId)
record = cursor.fetchone()
template = Template(filename=settings.APPLICATION_SETTINGS['COREAPP_HOME'] + record[0], input_encoding='utf-8')
template.output_encoding = 'utf-8'
params = {}
params['from_email'] = record[1]
params['subject'] = 'Creche Parentale Notification'
params['scheduled_for_relay'] = True
params['event_id'] = eventId
params['last_updated'] = datetime.now()
params['date_created'] = datetime.now()
cursor.execute("""
SELECT wu.mail, wud.full_name
FROM web_users wu
INNER JOIN system_user_has_event_type suhet ON suhet.system_user_id = wu.uid
INNER JOIN web_user_detail wud ON wud.user_id = wu.uid
INNER JOIN event_type et ON suhet.event_type_id = et.id
WHERE et.id = %d""" % expressions['event_type_id'])
subscribers = cursor.fetchall()
for user in subscribers:
recipient = 'User'
if user[1]:
recipient = user[1]
expressions['recipient'] = recipient
params['message_body'] = template.render(params=expressions)#message to be relayed to the subscribers
params['to_email'] = user[0]
fields = ', '.join(params.keys())
values = ', '.join(['%%(%s)s' % x for x in params])
query = 'INSERT INTO email_schedule (%s) VALUES (%s)' % (fields, values)
cursor.execute(query, params)
cursor.close()
return params
|
projet2019/Creche_Parentale
|
creche/coreapp/service/sys_event_service.py
|
sys_event_service.py
|
py
| 8,574 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30984032152
|
employee_file = open("employees.txt", "a") # "w" overwrites everything, "r" read only, "a" is append
employee_file.write("\nEirin - Artist")
employee_file.close()
employee_file1 = open("employees.txt", "r")
print(employee_file1.read())
employee_file1.close()
|
chrismykle/pythonBeginner
|
readingFiles.py
|
readingFiles.py
|
py
| 270 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74286055227
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 7 16:29:22 2019
@author: swh
"""
'''两个栈完成一个队列
队列先进先出,栈后进先出'''
class Queue:
def __init__(self):
self.stockA=[]
self.stockB=[]
def push(self, node):
self.stockA.append(node)
def pop(self):
if self.stockB==[]:
if self.stockA==[]:
return None
else:
for i in range(len(self.stockA)):
self.stockB.append(self.stockA.pop())
return self.stockB.pop()
if __name__=="__main__":
s=Queue()
while True:
a=input()
if a !="":
s.push(int(a))
else:
print(s.pop())
|
buptswh/coding_offer
|
offer9_两个栈实现一个队列.py
|
offer9_两个栈实现一个队列.py
|
py
| 757 |
python
|
en
|
code
| 2 |
github-code
|
6
|
2122328128
|
import stripe
from celery import task
from django.conf import settings
from users.models import Buyer
@task
def create_customer(card_token, buyer_id):
stripe.api_key = settings.STRIPE_API_KEY
buyer = Buyer.objects.get(id=buyer_id)
customer = stripe.Customer.create(
email=buyer.email,
source=card_token,
)
buyer.customer_id = customer.id
buyer.save()
return buyer.id
|
HackBulgaria/Web-Development-with-Django
|
week11/stripe_integration/payments/tasks.py
|
tasks.py
|
py
| 420 |
python
|
en
|
code
| 25 |
github-code
|
6
|
26310859237
|
from hmm import ViterbiTagger, SimpleTagger
import util
DEBUG = False
def train(train_data_filename, rare_train_data_filename, hmm_model_filename, rare_words_rule):
print ('1. train hmm model')
hmm_model = ViterbiTagger(3)
hmm_model.rare_words_rule = rare_words_rule
hmm_model.train(open(train_data_filename,'r'))
print('2. process rare words')
util.process_rare_words(
open(train_data_filename,'r'),
open(rare_train_data_filename, 'w'),
hmm_model.rare_words,
hmm_model.rare_words_rule)
print('3. train hmm model again using the new train data')
#hmm_model_rare = ViterbiTagger(3)
hmm_model_rare = SimpleTagger(3)
hmm_model_rare.train(open(rare_train_data_filename,'r'))
hmm_model_rare.write_counts(open(hmm_model_filename, 'w'))
def tag(test_data_filename, result_filename, hmm_model_filename):
print('1. load Hmm model')
tagger = ViterbiTagger(3)
tagger.read_counts(open(hmm_model_filename,'r'))
print ('2. tag test file')
tagger.tag(open(test_data_filename), open(result_filename, 'w'))
def main():
TRAIN = True
# 1. training
hmm_model_filename = 'p2.model'
train_data_filename = 'train_2'
rare_train_data_filename = 'p2.brown.train2'
if TRAIN:
train(train_data_filename, rare_train_data_filename, hmm_model_filename, util.rare_words_rule_p1)
# 2. tagging
test_data_filename = 'test_untag'
result_filename = 'brown.test.output'
tag(test_data_filename, result_filename, hmm_model_filename)
if __name__ == '__main__':
main()
|
Tuanlase02874/HMM-Demo
|
src/p2.py
|
p2.py
|
py
| 1,587 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27618070046
|
from operator import itemgetter
# ######################################## Mapper ########################################### #
class Mapper(object):
def __init__(self, mapping_m):
self.mapping_m = mapping_m
if self.mapping_m['type'] == 'packing':
self.worker_l = lambda j, w_l: self.worker_l_w_packing(j, w_l)
elif self.mapping_m['type'] == 'spreading':
self.worker_l = lambda j, w_l: self.worker_l_w_spreading(j, w_l)
def __repr__(self):
return 'Mapper[mapping_m= {}]'.format(self.mapping_m)
def worker_l_w_packing(self, job, w_l):
w_l_ = []
for w in w_l:
if job.reqed <= w.nonsched_cap():
w_l_.append(w)
return w_l_
def worker_l_w_spreading(self, job, w_l):
w_load_l = []
for w in w_l:
if job.reqed <= w.nonsched_cap():
w_load_l.append((w, w.sched_load() ) )
w_load_l.sort(key=itemgetter(1) )
return [w for w, _ in w_load_l]
|
mfatihaktas/deep-scheduler
|
mapper.py
|
mapper.py
|
py
| 936 |
python
|
en
|
code
| 12 |
github-code
|
6
|
5088106926
|
from global_settings import integration_host, integration_table
from pandas import DataFrame, concat
from cdapython import Q
df = DataFrame()
for i in (
Q("subject_identifier_system = 'GDC'")
.ORDER_BY("days_to_birth:-1")
.subject.run(show_sql=True, host=integration_host, table=integration_table)
.paginator(page_size=8000, to_df=True)
):
df = concat([i, df])
print(df)
# Q.bigquery_status()
|
CancerDataAggregator/cda-python
|
tests/paging.py
|
paging.py
|
py
| 417 |
python
|
en
|
code
| 7 |
github-code
|
6
|
12741310305
|
import sqlite3
con = sqlite3.connect('d_students.db')
cur = con.cursor()
# Create table
#cur.execute('''CREATE TABLE s_information
#(first_name text, last_name text, course text, age real)''')
# Insert a row of data
#cur.execute("INSERT INTO s_information VALUES ('Ade','Ola','product_design', 29)")
# Save (commit) the changes
con.commit()
print("successful")
s_data = [
('Ajayi', 'Bayowa', 'software development', 30,),
('Ademide', 'Benson', 'data science', 23,),
('Olawale', 'Saheed', 'UI/UX', 18,),
]
# cur.executemany('INSERT INTO s_information VALUES(?, ?, ?, ?)', s_data)
# print("execution successful")
for row in cur.execute('SELECT * FROM s_information'):
print(row)
print(cur.fetchall())
#alter table statement
#cur.execute("alter table s_info rename to s_information")
#con.commit()
#add a new column
# cur.execute("alter table s_information add column email")
# con.commit()
#update column
cur.execute(""" update s_information set email = '[email protected]' """)
con.commit()
|
kehindeorolade/Module_4_lesson_3
|
Module_4_less_3.py
|
Module_4_less_3.py
|
py
| 1,043 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.