metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "321BadgerCode/code",
"score": 2
} |
#### File: code/py/device_1.py
```python
from os import getpid
import psutil
import socket
"""from PyInstaller.utils.hooks import collect_submodules
hiddenimports=["os._libs.tslibs.getpid","psutil._libs","socket._libs"]"""
class device():
name=socket.gethostname()
ip_address=socket.gethostbyname(name)
class battery():
def time_left():
mm,ss=divmod(psutil.sensors_battery().secsleft,60)
hh,mm=divmod(mm,60)
return "%d:%02d:%02d"%(abs(hh),mm,ss)
percent:int=psutil.sensors_battery().percent
charge:bool=psutil.sensors_battery().power_plugged
def app_run():
return str(psutil.Process(getpid()).parent().name())
def cpu_usage():
return str(psutil.cpu_percent(interval=0.5))
def ram_usage():
a1=int((psutil.virtual_memory().total-psutil.virtual_memory().available)/1024/1024)
return str(a1)
def ram_total():
a1=int((psutil.virtual_memory().total/1024/1024))
return str(a1)
print("This program is currently running on: "+app_run())
print("C.P.U. percentage: "+cpu_usage()+"%")
print("R.A.M. usage: "+ram_usage()+" M.B.")
print("R.A.M. total: "+ram_total()+" M.B.")
print("-----")
print("Device name: "+device.name)
print("Device I.P. address: "+device.ip_address)
print("Device battery: "+str(device.battery.percent)+"%")
print("Device charging: "+str(device.battery.charge))
print("Device battery time left: "+device.battery.time_left())
```
#### File: code/py/encrypt_1.py
```python
class file:
name:str=''
data:str=''
is_encrypted:bool=False
def __init__(self,name:str)->None:
self.name=name
self.create()
self.read()
def __str__(self)->str:
if self.is_encrypted==True:
return self.name+"(encrypted): "+self.data
else:
return self.name+"(decrypted): "+self.data
def read(self)->None:
self.data=open(self.name,'r').read()
def write(self,a1:str)->None:
self.data=a1
open(self.name,'w').write(self.data)
def create(self)->None:
open(self.name,'a')
def close(self)->None:
open(self.name,'w').close()
def encrypt(self)->None:
self.is_encrypted=not self.is_encrypted
self.write(get_encrypt(self.data))
class data:
password:str=''
def __init__(self,password:str)->None:
self.password=password
def get_encrypt(a1:str)->str:
b1:str=''
for a in range(len(a1)):
b1+=chr(ord(a1[a%len(a1)])^len(a1)+26)
return b1
if __name__=="__main__":
f=file(".\\test_1.txt")
f.write("hello world")
f.encrypt()
print(f)
print("\n")
a1=input("password: ")
print("\n")
d=data("123")
if a1==d.password:
f.encrypt()
print(f)
else:
print("password incorrect!")
print("\n")
print("[._.]: see you later :)")
``` |
{
"source": "321core/django-allauth",
"score": 2
} |
#### File: providers/apple/views.py
```python
import json
import requests
from datetime import timedelta
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.utils.http import urlencode
from django.views.decorators.csrf import csrf_exempt
import jwt
from allauth.socialaccount.models import SocialApp, SocialToken
from allauth.socialaccount.providers.oauth2.client import OAuth2Error
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .client import AppleOAuth2Client
from .provider import AppleProvider
class AppleOAuth2Adapter(OAuth2Adapter):
provider_id = AppleProvider.id
access_token_url = "https://appleid.apple.com/auth/token"
authorize_url = "https://appleid.apple.com/auth/authorize"
public_key_url = "https://appleid.apple.com/auth/keys"
def get_public_key(self, id_token):
"""
Get the public key which matches the `kid` in the id_token header.
"""
kid = jwt.get_unverified_header(id_token)["kid"]
apple_public_key = [
d
for d in requests.get(self.public_key_url).json()["keys"]
if d["kid"] == kid
][0]
public_key = jwt.algorithms.RSAAlgorithm.from_jwk(
json.dumps(apple_public_key)
)
return public_key
def get_client_id(self, provider):
app = SocialApp.objects.get(provider=provider.id)
return [aud.strip() for aud in app.client_id.split(",")]
def parse_token(self, data):
try:
token = SocialToken(token=data["access_token"])
token.token_secret = data.get("refresh_token", "")
public_key = self.get_public_key(data["id_token"])
provider = self.get_provider()
allowed_auds = self.get_client_id(provider)
token.user_data = jwt.decode(
data["id_token"],
public_key,
algorithms=["RS256"],
verify=True,
audience=allowed_auds,
)
expires_in = data.get(self.expires_in_key, None)
if expires_in:
token.expires_at = timezone.now() + timedelta(
seconds=int(expires_in)
)
return token
except jwt.PyJWTError as e:
raise OAuth2Error("Invalid id_token") from e
def complete_login(self, request, app, token, **kwargs):
extra_data = token.user_data
login = self.get_provider().sociallogin_from_response(
request, extra_data
)
login.state["id_token"] = token.user_data
return login
class AppleOAuth2ClientMixin:
def get_client(self, request, app):
client = super().get_client(request, app)
apple_client = AppleOAuth2Client(
client.request,
client.consumer_key,
client.consumer_secret,
client.access_token_method,
client.access_token_url,
client.callback_url,
client.scope,
key=client.key,
cert=client.cert,
)
return apple_client
class AppleOAuth2LoginView(AppleOAuth2ClientMixin, OAuth2LoginView):
"""
Custom AppleOAuth2LoginView to return AppleOAuth2Client
"""
pass
class AppleOAuth2CallbackView(AppleOAuth2ClientMixin, OAuth2CallbackView):
"""
Custom OAuth2CallbackView because `Sign In With Apple`:
* returns AppleOAuth2Client
* Apple requests callback by POST
"""
def dispatch(self, request, *args, **kwargs):
if request.method == "POST":
url = request.build_absolute_uri(request.get_full_path())
params = {
"code": request.POST.get("code"),
"state": request.POST.get("state"),
}
return HttpResponseRedirect("%s?%s" % (url, urlencode(params)))
if request.method == "GET":
return super().dispatch(request, *args, **kwargs)
oauth2_login = AppleOAuth2LoginView.adapter_view(AppleOAuth2Adapter)
oauth2_callback = csrf_exempt(
AppleOAuth2CallbackView.adapter_view(AppleOAuth2Adapter)
)
``` |
{
"source": "321qwedsa000/ntubSystem",
"score": 2
} |
#### File: ntubSystem/ntubSystem/ntubsys.py
```python
import requests
from bs4 import BeautifulSoup
import re
from enum import Enum
from datetime import datetime
import xml.etree.ElementTree as ET
from prettytable import from_html_one
from concurrent.futures import ThreadPoolExecutor,as_completed
class NtubLoginFailedException(Exception):
def __init__(self,message):
super().__init__(message)
class NtubNoClassException(Exception):
def __init__(self,message):
super().__init__(message)
class NtubLoginSystem:
def __search_Asp_Utils(self,url,dic):
response = self.session.get(url)
if response.status_code == 404:
raise NtubLoginFailedException("No Internet Connection")
try:
soup = BeautifulSoup(response.content,features='lxml')
except:
soup = BeautifulSoup(response.content,features='html.parser')
dic['__VIEWSTATE'] = soup.select_one('#__VIEWSTATE')['value'] if soup.select_one('#__VIEWSTATE') != None else ''
dic['__VIEWSTATEGENERATOR'] = soup.select_one('#__VIEWSTATEGENERATOR')['value'] if soup.select_one('#__VIEWSTATEGENERATOR') != None else ''
dic['__EVENTVALIDATION'] = soup.select_one('#__EVENTVALIDATION')['value'] if soup.select_one('#__EVENTVALIDATION') != None else ''
def __init__(self,username:str,password:str):
#####################
# Ntub Related URLS #
#####################
self.LOGIN_URL = "http://ntcbadm1.ntub.edu.tw/login.aspx?Type=0" #POST url
self.CURRICULUM_URL = "http://ntcbadm1.ntub.edu.tw/STDWEB/Sel_Student.aspx" #GET url
self.MIDTERM_URL = "http://ntcbadm1.ntub.edu.tw/ACAD/STDWEB/GRD_GRDMQry.aspx" #POST url
self.SCORE_URL = "http://ntcbadm1.ntub.edu.tw/ACAD/STDWEB/GRD_GRDQry.aspx" #POST url
self.LEAVE_URL = "http://ntcbadm1.ntub.edu.tw/StdAff/STDWeb/ABS0101Add.aspx" #POST url
self.CHANGEPWD_URL = "http://ntcbadm1.ntub.edu.tw/STDWEB/STD_PwdChange.aspx" #POST url
self.LESSON_URL = "http://ntcbadm1.ntub.edu.tw/HttpRequest/SELChooseHttpXML.aspx" #POST URL
self.MAIN_PAGE_URL = "http://ntcbadm1.ntub.edu.tw/STDWEB/SelChoose/SelChooseMain.aspx" #GET URL
self.LESSON_INFO_URL = "http://ntcbadm1.ntub.edu.tw/pub/TchSchedule_Search.aspx" #POST url
self.EXPORT_CURRICULUM_URL = "http://ntcbadm1.ntub.edu.tw/STDWEB/Sel_Student_Excel.aspx" #POST url
###################
# Login into NTUB #
###################
#
self.session = requests.Session()
loginData = {
'UserID':username,
'PWD':password,
'loginbtn':''
}
self.__search_Asp_Utils(self.LOGIN_URL,loginData)
self._password = password
loginResponse = self.session.post(self.LOGIN_URL,data=loginData)
if loginResponse.text.startswith('<script>'):
raise NtubLoginFailedException(f'Login {username} failed')
self._cookies = loginResponse.cookies
try:
soup = BeautifulSoup(loginResponse.text,'lxml')
except:
soup = BeautifulSoup(loginResponse.text,'html.parser')
self._classname = soup.find('span',{'id':'ClassName'}).text
self._stdno = soup.find('span',{'id':'StdNo'}).text
self._name = soup.find('span',{'id':'Name'}).text
#
@property
def cookies(self):
return self._cookies
@property
def password(self):
return <PASSWORD>._password
@property
def className(self):
return self._classname
@property
def studentNumber(self):
return self._stdno
@property
def name(self):
return self._name
def grab_deptNo(self):
try:
mainPageResponse = self.session.get(self.MAIN_PAGE_URL)
try:
soup = BeautifulSoup(mainPageResponse.text,'lxml')
except:
soup = BeautifulSoup(mainPageResponse.text,'html.parser')
submit_data = {
"ModuleName": "InitSelDept",
"Years": soup.find("input",{"id":"SelYear"})["value"],
"Term": soup.find("input",{"id":"SelTerm"})["value"],
"Desire": '',
"EduNo": soup.find("input",{'id':'EduNo'})['value']
}
response = self.session.post(self.LESSON_URL,data=submit_data)
root = ET.fromstring(response.text)
dataDict = {}
for e in root.findall("DataItem"):
dataDict[e[1].text] = e[0].text
except KeyError:
raise NtubNoClassException("課程未開放")
return dataDict
def parse_lessons(self,deptNo,day,section):
try:
mainPageResponse = self.session.get(self.MAIN_PAGE_URL)
try:
soup = BeautifulSoup(mainPageResponse.text,'lxml')
except:
soup = BeautifulSoup(mainPageResponse.text,'html.parser')
submit_dict = {
"ModuleName": 'QueryCurData',
"EduNo": soup.find("input",{'id':'EduNo'})['value'],
"Desire": 'Y',
"DeptNo": deptNo,
"Grade": '',
"Week": day,
"Section": section,
"CosName": '',
"CurClass": ''
}
response = self.session.post(self.LESSON_URL,data=submit_dict)
root = ET.fromstring(response.text)
dic = {}
for e in root.findall("DataItem"):
dataColumn = {}
for f in e:
dataColumn[f.tag] = f.text
dic[dataColumn["Cos_ID"]] = dataColumn
return dic
except KeyError:
raise NtubNoClassException("課程未開放")
def grab_lessons(self,currentDict):
try:
mainPageResponse = self.session.get(self.MAIN_PAGE_URL)
try:
soup = BeautifulSoup(mainPageResponse.text,'lxml')
except:
soup = BeautifulSoup(mainPageResponse.text,'html.parser')
submit_dict = {
"ModuleName": "DoAddCur",
"Years": soup.find("input",{"id":"SelYear"})["value"],
"Term": soup.find("input",{"id":"SelTerm"})["value"],
"Desire": '',
"OpClass": currentDict["OP_Class"],
"Serial": currentDict["Serial"],
"CurTerm": currentDict["Cur_Term"],
"EduData": soup.find("input",{"id":"EduData"})["value"],
"Contrast": soup.find("input",{"id":"Contrast"})["value"],
"CreditData": soup.find("input",{"id":"CreditData"})["value"],
"AddData": soup.find("input",{"id":"AddData"})["value"],
"EduCourseData": soup.find("input",{"id":"EduCourseData"})["value"],
"OtherData": soup.find("input",{"id":"OtherData"})["value"],
"ConvertData": soup.find("input",{"id":"ConvertData"})["value"]
}
response = self.session.post(self.LESSON_URL,data=submit_dict)
return response.text
except KeyError:
raise NtubNoClassException("課程未開放")
def quit_lessons(self,currentDict):
mainPageResponse = self.session.get(self.MAIN_PAGE_URL)
try:
soup = BeautifulSoup(mainPageResponse.text,'lxml')
except:
soup = BeautifulSoup(mainPageResponse.text,'html.parser')
submit_dict = {
"ModuleName": "DoDelCur",
"Years": soup.find("input",{"id":"SelYear"})["value"],
"Term": soup.find("input",{"id":"SelTerm"})["value"],
"Desire": "",
"OpClass": currentDict["OP_Class"],
"Serial": currentDict["Serial"],
"CurTerm": currentDict["Cur_Term"]
}
response = self.session.post(self.LESSON_URL,data=submit_dict)
return response.text
def export_curriculum(self,thisYear:int,thisTeam:int):
from pprint import pprint
submit_dict = {
'ThisYear':thisYear,
'ThisTeam':thisTeam,
'doQuery':'Y',
'ToExcel':'Y'
}
response = self.session.get(self.CURRICULUM_URL)
try:
soup = BeautifulSoup(response.text,'lxml')
except:
soup = BeautifulSoup(response.text,'html.parser')
for e in soup.find_all("input",{"id":lambda a: a != None}):
try:
submit_dict[e["id"]] = e["value"]
except:
submit_dict[e["id"]] = ""
submit_dict["ThisYear"] = thisYear
submit_dict["ThisTeam"] = thisTeam
submit_dict["doQuery"] = 'Y'
submit_dict["ToExcel"] = 'Y'
response = self.session.post(self.EXPORT_CURRICULUM_URL,submit_dict)
print(response.text)
def search_curriculum(self,thisYear:int,thisTeam:int):
search_dict = {
'ThisYear':thisYear,
'ThisTeam':thisTeam,
'doQuery':'Y'
}
self.__search_Asp_Utils(self.CURRICULUM_URL,search_dict)
response = self.session.get(self.CURRICULUM_URL,data=search_dict,cookies=self.cookies)
try:
soup = BeautifulSoup(response.text,'lxml')
except:
soup = BeautifulSoup(response.text,'html.parser')
curriculum = soup.find('table',{'id':'bgBase'})
table = []
for row in curriculum.findAll('tr'):
column = []
for col in row.findAll('td'):
a_tag = col.find('a')
if a_tag != None:
lesson = a_tag.text
teacher = col.text[len(lesson):-4]
classroom = col.text[len(lesson)+len(teacher):]
column.append(f'{lesson}\n{teacher}\n{classroom}') #split it
else:
replaceStr = re.sub(r'\d\d:\d\d\d\d:\d\d','',col.text) #Remove class time
column.append(replaceStr)
table.append(column)
return table
def search_midtern_score(self,seayear:int,seaterm:int):
search_dict = {
'ctl00$ContentPlaceHolder1$SEA_Year':seayear,
'ctl00$ContentPlaceHolder1$SEA_Term':seaterm
}
self.__search_Asp_Utils(self.MIDTERM_URL,search_dict)
response = self.session.post(self.MIDTERM_URL,data=search_dict,cookies=self.cookies)
try:
soup = BeautifulSoup(response.text,'lxml')
except:
soup = BeautifulSoup(response.text,'html.parser')
score_dict = {}
itemTable = soup.findAll('td',attrs={'width':'380'})
scoreTable = soup.findAll('span',attrs={'id':lambda a: a and len(a) >= 8 and a[-8:] == "_Score_M"})
for i in range(len(itemTable)):
score_dict[itemTable[i].text] = float(scoreTable[i].text.replace('*','') if scoreTable[i].text != "" else "0.00")
return score_dict
def changepassword(self,newPassword:str):
if len(newPassword) < 6: return
submit_pwd = {
'txtOri_Pwd':<PASSWORD>._password,
'txtNew_Pwd':<PASSWORD>,
'txtSure_Pwd':<PASSWORD>,
'btnOK':''
}
self.__search_Asp_Utils(self.CHANGEPWD_URL,submit_pwd)
response = self.session.post(self.CHANGEPWD_URL,data=submit_pwd,cookies=self.cookies)
self._password = <PASSWORD>
def search_all_score(self,seayear:int,seaterm:int):
search_dict = {
'ctl00$ContentPlaceHolder1$SEA_Year':seayear,
'ctl00$ContentPlaceHolder1$SEA_Term':seaterm
}
self.__search_Asp_Utils(self.SCORE_URL,search_dict)
response = self.session.post(self.SCORE_URL,data=search_dict,cookies=self.cookies)
try:
soup = BeautifulSoup(response.text,'lxml')
except:
soup = BeautifulSoup(response.text,'html.parser')
scoreTable = []
itemTable = soup.findAll('td',attrs={'width':'330'})
midScore = soup.findAll('span',attrs={'id':lambda a: a and len(a) >= 8 and a[-8:] == "_Score_M"})
endScore = soup.findAll('span',attrs={'id':lambda a: a and len(a) >= 6 and a[-6:] == "_Score"})
for i in range(len(itemTable)):
scoreTable.append([itemTable[i].text,
float(midScore[i].text.replace('*','') if midScore[i].text != "" else "0.00"),
float(endScore[i].text.replace('*','') if endScore[i].text != "" else "0.00")])
return scoreTable
def getEduTypeOptions(self):
response = self.session.get(self.LESSON_INFO_URL)
try:
soup = BeautifulSoup(response.text,"lxml")
except:
soup = BeautifulSoup(response.text,"html.parser")
result = soup.find("select",{"id":"ddlEdu"}).find_all("option")
res = dict()
for e in result:
res[e.text] = e["value"]
return res
def getDeptTypeOptions(self,EduTypeOption:str) -> tuple:
response,submit_dict = self.__getEduInfo(EduTypeOption)
try:
soup = BeautifulSoup(response.text,'lxml')
except:
soup = BeautifulSoup(response.text,'html.parser')
result = soup.find("select",{"id":"ddlDept"}).find_all("option")
res = dict()
for e in result:
res[e.text] = e["value"]
return res
def getClassTypeOptions(self,EduTypeOption:str,DeptTypeOption:str):
response,submit_dict=self.__getDeptInfo(EduTypeOption,DeptTypeOption)
try:
soup = BeautifulSoup(response.text,'lxml')
except:
soup = BeautifulSoup(response.text,'html.parser')
result = soup.find("select",{"id":"ddlClass"}).find_all("option")
res = dict()
for e in result:
res[e.text] = e["value"]
return res
def __getEduInfo(self,EduTypeOption:str) -> tuple:
submit_dict = dict()
response = self.session.get(self.LESSON_INFO_URL)
try:
soup = BeautifulSoup(response.text,'lxml')
except:
soup = BeautifulSoup(response.text,'html.parser')
inputData = soup.find_all("input")
for e in inputData:
try:
submit_dict[e["name"]] = e["value"]
except:
submit_dict[e["name"]] = ""
submit_dict["__EVENTTARGET"] = "ddEdu"
submit_dict["ddlEdu"] = self.getEduTypeOptions()[EduTypeOption]
return (self.session.post(self.LESSON_INFO_URL,submit_dict),submit_dict)
def __getDeptInfo(self,EduTypeOption:str,DeptTypeOption:str):
response,submit_dict = self.__getEduInfo(EduTypeOption)
try:
soup = BeautifulSoup(response.text,'lxml')
except:
soup = BeautifulSoup(response.text,'html.parser')
inputData = soup.find_all("input")
for e in inputData:
try:
submit_dict[e["name"]] = e["value"]
except:
submit_dict[e["name"]] = ""
submit_dict["__EVENTTARGET"] = "ddlDept"
submit_dict["ddlDept"] = self.getDeptTypeOptions(EduTypeOption)[DeptTypeOption]
return (self.session.post(self.LESSON_INFO_URL,submit_dict),submit_dict)
def __getClassInfo(self,Years:int,Term:int,EduTypeOption:str,DeptTypeOption:str,ClassTypeOption:str):
response,submit_dict = self.__getDeptInfo(EduTypeOption,DeptTypeOption)
try:
soup = BeautifulSoup(response.text,'lxml')
except:
soup = BeautifulSoup(response.text,'html.parser')
inputData = soup.find_all("input")
for e in inputData:
try:
submit_dict[e["name"]] = e["value"]
except:
submit_dict[e["name"]] = ""
submit_dict["ddlClass"] = self.getClassTypeOptions(EduTypeOption,DeptTypeOption)[ClassTypeOption]
submit_dict['txtYears'] = str(Years)
submit_dict['txtTerm'] = str(Term)
return (self.session.post(self.LESSON_INFO_URL,submit_dict),submit_dict)
def get_lesson_info(self,Years:int,Term:int,EduTypeOption:str,DeptTypeOption:str,ClassTypeOption):
response,submit_dict = self.__getClassInfo(Years,Term,EduTypeOption,DeptTypeOption,ClassTypeOption)
lst = []
try:
soup = BeautifulSoup(response.text,"lxml")
except:
soup = BeautifulSoup(response.text,"html.parser")
table = soup.find("table",{"id":"dsCurList"})
for row in table.find_all("tr"):
newRow = []
if len(row.find_all("td")) == 11: continue
for column in row.find_all("td"):
newRow.append(column.text.replace('\n','').replace('\r','').replace(' ',''))
lst.append(newRow)
return lst
if __name__ == "__main__":
from __main__ import main
main()
``` |
{
"source": "323135jx/meiduo_mall",
"score": 2
} |
#### File: apps/goods/views.py
```python
from django.http import JsonResponse
from django.shortcuts import render
from django.views import View
from .models import SKU, GoodsCategory
from django.core.paginator import Paginator,EmptyPage
from .utils import get_breadcrumb
# Create your views here.
# sku商品列表数据返回(分页)
from goods.models import SKU
# def get_breadcrumb(category_id):
# ret_dict = {}
# category = GoodsCategory.objects.get(pk=category_id)
# if not category.parent:
# ret_dict['cat1'] = category.name
# elif not category.parent.parent:
# ret_dict['cat1'] = category.parent.name
# ret_dict['cat2'] = category.name
# elif not category.parent.parent:
# ret_dict['cat1'] = category.parent.parent.name
# ret_dict['cat2'] = category.parent.name
# ret_dict['cat3'] = category.name
class ListView(View):
def get(self, request, category_id):
page = request.GET.get('page')
page_size = request.GET.get('page_size')
ordering = request.GET.get('ordering')
# 1.获取sku商品数据--排序
skus = SKU.objects.filter(
category_id=category_id,
is_launched=True
).order_by(ordering)
# 2.分页 -- 根据page和page-size分页
try:
paginator = Paginator(skus,page_size)
cur_page = paginator.page(page)
except EmptyPage as e:
print(e)
return JsonResponse({
'code':400,
'errmsg':'空页!'
})
ret_list = []
for sku in cur_page:
ret_list.append({
'id':sku.id,
'default_image_url':sku.default_image_url.url,
'name':sku.name,
'price':sku.price
})
# 返回响应
return JsonResponse({
'code':0,
'errmsg':'ok',
'breadcrumb':get_breadcrumb(category_id),
'list':ret_list,
'count':paginator.num_pages # 总页数
})
# 热销商品
class HotGoodsView(View):
def get(self, request, category_id):
# 1、获取热销商品(取销量最高的2个)
skus = SKU.objects.filter(
category_id=category_id,
is_launched=True
).order_by('-sales')[:2]
# 2、构建响应返回
ret_list = []
for sku in skus:
ret_list.append({
'id':sku.id,
'name':sku.name,
'price':sku.price,
'default_image_url':sku.default_image_url.url
})
return JsonResponse({
'code':0,
'errmsg':'ok',
'hot_skus':ret_list
})
# 使用Haystack提供的搜索视图,实现搜索
# SerachView搜索视图基类,
# 请求方式:GET
# 请求路径: search/
# 请求参数: ?q=华为&page=1&page_size=3
# 响应数据:默认返回的是完整的html页面;不符合我们的借口需求,调整返回值
from haystack.views import SearchView
class MySerachView(SearchView):
# 精确搜索
def create_response(self):
# 默认SearchView搜索视图逻辑
context = self.get_context()
sku_list = []
for search_result in context['page'].object_list:
sku = search_result.object
sku_list.append({
'id': sku.id,
'name': sku.name,
'price':sku.price,
'default_image_url': sku.default_image_url.url,
'searchkey':context['query'],
'page_size':context['paginator'].per_page,
'count':context['paginator'].count,
})
return JsonResponse(sku_list, safe=False)
```
#### File: meiduo_mall/utils/views.py
```python
from django.http import JsonResponse
# 定义一个装饰器,验证是否登录
def login_required(func):
def wrapper(request, *args, **kwargs):
if request.user.is_authenticated:
return func(request, *args, **kwargs)
else:
return JsonResponse({
'code':400,
'errmsg':'您未登录'
})
# 完整解释
# return JsonResponse({
# 'code':400,
# 'errmsg':'您未登录'
# }, status = 401)
return wrapper
``` |
{
"source": "3243-poker/poker-agent",
"score": 3
} |
#### File: 3243-poker/poker-agent/consoleplayer.py
```python
from pypokerengine.players import BasePokerPlayer
from time import sleep
import pypokerengine.utils.visualize_utils as U
import pprint
class ConsolePlayer(BasePokerPlayer):
def declare_action(self, valid_actions, hole_card, round_state):
print(U.visualize_declare_action2(valid_actions, hole_card, round_state, self.uuid))
action = self._receive_action_from_console(valid_actions)
print("CONSOLE PLAYER HAS ACTED")
return action
def receive_game_start_message(self, game_info):
print(U.visualize_game_start(game_info, self.uuid))
self._wait_until_input()
def receive_round_start_message(self, round_count, hole_card, seats):
print(U.visualize_round_start(round_count, hole_card, seats, self.uuid))
self._wait_until_input()
def receive_street_start_message(self, street, round_state):
print(U.visualize_street_start(street, round_state, self.uuid))
self._wait_until_input()
def receive_game_update_message(self, new_action, round_state):
print(U.visualize_game_update(new_action, round_state, self.uuid))
self._wait_until_input()
def receive_round_result_message(self, winners, hand_info, round_state):
print(U.visualize_round_result(winners, hand_info, round_state, self.uuid))
self._wait_until_input()
def _wait_until_input(self):
input("Enter some key to continue ...")
# FIXME: This code would be crash if receives invalid input.
# So you should add error handling properly.
def _receive_action_from_console(self, valid_actions):
action = input("Enter action to declare: raise, fold, call >> ")
return action
```
#### File: 3243-poker/poker-agent/garbagealgorithm.py
```python
from pypokerengine.players import BasePokerPlayer
from pypokerengine.engine.card import Card
from pypokerengine.utils.card_utils import gen_cards, estimate_hole_card_win_rate, gen_deck
import testrun
import random
NB_SIMULATION = 400
rate = {}
class GeneticPlayer(BasePokerPlayer):
coeff = [0, 0, 0, 0, 0] #curr_round, money_diff, rng, win_rate, curr_street
def __init__(self, config):
self.coeff = config[:]
# self.rate = table
self.curr_round = 0
self.curr_money_diff = 0
self.curr_street = 0
self.rng = 0
self.win_rate = 0
def declare_action(self, valid_actions, hole_card, round_state):
self.rng = random.randint(0, 10)
community_card = round_state['community_card']
# if (len(community_card) == 0):
# hole_card = sorted(hole_card, key = lambda x: Card.from_str(x).to_id())
# #print(Card.from_str(hole_card[0]).to_id(), Card.from_str(hole_card[1]).to_id())
# self.win_rate = self.rate[hole_card[0] + hole_card[1]]
# else:
self.win_rate = estimate_hole_card_win_rate(
nb_simulation=NB_SIMULATION,
nb_player=2,
hole_card=gen_cards(hole_card),
community_card=gen_cards(community_card)
)
act = [0, 0, 0]
for i in range(3):
act[i] += self.coeff[i * 5 + 0] * self.normalize(self.curr_round, 0, 1000)
act[i] += self.coeff[i * 5 + 1] * self.normalize(self.curr_money_diff, -10000, 10000)
act[i] += self.coeff[i * 5 + 2] * self.normalize(self.curr_street, 1, 4)
act[i] += self.coeff[i * 5 + 3] * self.normalize(self.rng, 0, 10)
act[i] += self.coeff[i * 5 + 4] * self.normalize(self.win_rate, 0, 1)
if len(valid_actions) == 3:
action = valid_actions[act.index(max(act))]
else:
#len = 2
action = valid_actions[act.index(max(act[:2]))]
print(action['action'])
return action['action']
'''
if win_rate >= 0.66 and len(valid_actions) == 3:
action = valid_actions[2]
elif win_rate >= 0.33:
action = valid_actions[1] # fetch CALL action info
else:
action = valid_actions[0] # fetch FOLD action info
return action['action']
'''
def normalize(self, v, small, big):
return (v - ((big + small) / 2)) / (big - small)
def receive_game_start_message(self, game_info):
pass
#self.nb_player = game_info['player_num']
def receive_round_start_message(self, round_count, hole_card, seats):
self.current_round = round_count
pass
def receive_street_start_message(self, street, round_state):
if street == "preflop":
self.curr_street = 1
elif street == "flop":
self.curr_street = 2
elif street == "turn":
self.curr_street = 3
elif street == "river":
self.curr_street = 4
def receive_game_update_message(self, action, round_state):
pass
def receive_round_result_message(self, winners, hand_info, round_state):
player1 = round_state["seats"][0]["stack"]
player2 = round_state["seats"][1]["stack"]
self.curr_money_diff = player2 - player1 # I assume I'm always player 2.
#print(curr_money_diff)
class GeneticTrainer():
# def __init__(self, table):
# self.table = table
# self.coeff = []
# self.fitness = []
# self.cycles = 10
def __init__(self):
self.coeff = []
self.cycles = 10
def main(self):
for i in range(10):
self.coeff += [[[random.random() - 0.5, random.random() - 0.5, random.random() - 0.5, random.random() - 0.5, random.random() - 0.5,
random.random() - 0.5, random.random() - 0.5, random.random() - 0.5, random.random() - 0.5, random.random() - 0.5,
random.random() - 0.5, random.random() - 0.5, random.random() - 0.5, random.random() - 0.5, random.random() - 0.5], 0]]
print(self.coeff)
for i in range(self.cycles):
self.fight()
self.reproduce()
self.mutate()
print(self.coeff)
def fight(self):
for i in range(10):
for j in range(i+1, 10):
print("Player", str(i), self.coeff[i])
print("Player", str(j), self.coeff[j])
perf = testrun.testperf(self.coeff[i][0], self.coeff[j][0])
self.coeff[i][1] += perf
self.coeff[j][1] += -1 * perf
def reproduce(self):
self.coeff = sorted(self.coeff, key = lambda x: int(x[1]))[:5]
newlist = []
for i in range(5):
for j in range(i+1, 5):
newlist += [[self.cross(i,j), 0]]
self.coeff = newlist[:]
print(self.coeff)
def cross(self, i, j):
child = []
crossoverpt = random.randint(1, 13)
for k in range(crossoverpt):
child += [self.coeff[i][0][k]]
for k in range(crossoverpt, 15):
child += [self.coeff[j][0][k]]
return child
def mutate(self):
for i in range(10):
r = random.randint(0, 100)
if r > 80:
#mutate
idx = random.randint(0, 14)
mult = (random.random() - 0.5) * 4
self.coeff[i][0][idx] *= mult
self.coeff[i][0][idx] = min(1, self.coeff[i][0][idx])
self.coeff[i][0][idx] = max(-1, self.coeff[i][0][idx])
def precompute():
rate = {}
suit = ['C', 'D', 'H', 'S']
val = ['2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A']
card = []
for i in suit:
for j in val:
card += [i + j]
print(card)
for i in range(52):
for j in range(52):
if i == j:
continue
card1 = card[i]
card2 = card[j]
print(card1, card2)
win_rate = estimate_hole_card_win_rate(
nb_simulation=1000,
nb_player=2,
hole_card=gen_cards([card1, card2]),
)
rate[card1 + card2] = win_rate
print(len(rate))
return rate;
if __name__ == '__main__':
#rate = precompute()
#x = GeneticTrainer(rate)
x = GeneticTrainer()
x.main()
def my_handler(event, context):
context1 = event['config1']
context2 = event['config2']
perf = testrun.testperf(context1, context2)
return {
'perf': perf
}
``` |
{
"source": "3-24/Competitive-Programming",
"score": 3
} |
#### File: Competitive-Programming/baekjoon/11726.py
```python
def main():
num = int(raw_input())
print tiling(num) % 10007
def tiling(n):
T = []
for i in range (n):
if i == 0:
T.append(1)
elif i == 1:
T.append(2)
else:
T.append(T[i-1]+T[i-2])
return T[n-1]
main()
```
#### File: Competitive-Programming/baekjoon/1463.py
```python
def min_operation(X):
R = [0,0]
for i in range(2,X+1):
r1, r2, r3 = i+1, i+1, i+1
if i % 3 == 0:
r1 = R[i/3]+1
if i % 2 == 0:
r2 = R[i/2]+1
r3 = R[i-1]+1
R.append(min(r1, r2, r3))
return R[X]
X = int(raw_input())
print min_operation(X)
```
#### File: Competitive-Programming/baekjoon/1707.py
```python
from sys import stdin
input = stdin.readline
test_nbr = int(input()) # number of test cases
def isBipartite():
# v and e is number of vertexes and edges, respectively.
v,e = map(int,input().split())
# None if not colored
# If colored, there are two bool cases: False and True.
colorArr = [None for _ in range(v+1)]
adjacentPoints = [[] for _ in range(v+1)]
for i in range(e):
v1, v2 = map(int, input().split()) # edge (v1, v2) is expected input
adjacentPoints[v1].append(v2)
adjacentPoints[v2].append(v1)
queue = []
node = set([i for i in range(1,v+1)]) # for non-connected graph
while bool(node): # node is nonempty
c = node.pop()
queue.append(c)
while bool(queue): # queue is nonempty
start = queue[0]
for u in adjacentPoints[start]:
if colorArr[u] is not None:
if colorArr[u] is colorArr[start]:
return False
else:
colorArr[u] = not colorArr[start]
queue.append(u)
del queue[0]
if start != c:
node.remove(start)
return True
for _ in range(test_nbr):
print('YES' if isBipartite() else 'NO')
```
#### File: Competitive-Programming/baekjoon/1932.py
```python
def main(tri,n):
T = []
for i in range (n):
if i == 0:
T.append(tri[0])
else:
T.append(tri[i])
for j in range (i+1):
if j == 0:
T[i][j] += T[i-1][0]
elif j == i:
T[i][j] += T[i-1][j-1]
else:
T[i][j] += max(T[i-1][j-1],T[i-1][j])
return max(T[n-1])
size = int(raw_input())
triangle = []
for i in range (size):
tri_temp = map(int,raw_input().split())
triangle.append(tri_temp)
print main(triangle,size)
'''
3
1
1 2
1 2 3
'''
```
#### File: Competitive-Programming/baekjoon/2156.py
```python
def wine_point1(li):
sum = 0
for i in li:
sum += i
R=[]
l = len(li)
for i in range (l):
if i == 0:
R.append([0,0,li[i]])
elif i == 1:
R.append([0,li[i-1],li[i]])
elif i == 2:
R.append([li[i-2],li[i-1],li[i]])
else:
r1 = min(R[i-3])+li[i-2]
r2 = min(R[i-3][1:3])+li[i-1]
r3 = R[i-3][2]+li[i]
R.append([r1,r2,r3])
return sum - min(R[l-1])
def wine_point2(A):
n = int(raw_input())
if n <= 2:
return sum(A)
else:
P = [0] * n
Q = [0] * n
P[0], P[1], Q[0], Q[1] = A[0], A[1], A[0], A[0] + A[1]
for i in range(2, n):
P[i] = max(P[i - 2], Q[i - 2], Q[i - 3]) + A[i]
Q[i] = P[i - 1] + A[i]
return max(P[n - 2], P[n - 1], Q[n - 2], Q[n - 1]))
num = int(raw_input())
wine = []
for i in range (num):
wine.append(int(raw_input()))
print wine_point2(wine)
```
#### File: Competitive-Programming/leetcode/2.py
```python
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
def addTwoNumbers(list1, list2, node, digit):
n = digit
if (list1 != None):
n += list1.val
if (list2 != None):
n += list2.val
q = n // 10
r = n % 10
node.val = r
node.next = None
if (list1 == None):
next_list1 = None
else:
next_list1 = list1.next
if (list2 == None):
next_list2 = None
else:
next_list2 = list2.next
if (next_list1 == None and next_list2 == None and q == 0):
return
node.next = ListNode(0)
addTwoNumbers(next_list1, next_list2, node.next, q)
startNode = ListNode(0)
addTwoNumbers(l1, l2, startNode, 0)
return startNode
```
#### File: Competitive-Programming/nypc/laserlamp.py
```python
def main():
ine = raw_input().split()
W = int(ine[0])
H = int(ine[1])
map = [[None]*W for i in xrange(H)]
for i in xrange (H):
s = str(raw_input())
for j in xrange (W):
if s[j] == '.':
map[i][j] = 0
elif s[j] == '/':
map[i][j] = 1
elif s[j] == '\\':
map[i][j] = 2
elif str(s[j]) == 'O':
map[i][j] = 3
a, b = i,j
def pathfinder(I):
x,y,dir = I[0],I[1],I[2]
if x is None:
return [None]*3
if dir == 'U':
while True:
y -= 1
if map[y][x] == 1:
return [x,y,'R']
elif map[y][x] == 2:
return [x,y,'L']
elif map[y][x] == 0 and y == 0:
return [x,-1,'U']
elif map[y][x] == 3 and y != I[1]:
return [None]*3
elif dir == 'D':
while True:
y += 1
if map[y][x] == 1:
return [x,y,'L']
elif map[y][x] == 2:
return [x,y,'R']
elif map[y][x] == 0 and y == H-1:
return [x,-1,'D']
elif map[y][x] == 3 and y != I[1]:
return [None]*3
elif dir == 'L':
while True:
x -= 1
if map[y][x] == 1:
return [x,y,'D']
elif map[y][x] == 2:
return [x,y,'U']
elif map[y][x] == 0 and x == 0:
return [-1,y,'L']
elif map[y][x] == 3 and x != I[0]:
return [None]*3
elif dir == 'R':
while True:
x += 1
if map[y][x] == 1:
return [x,y,'U']
elif map[y][x] == 2:
return [x,y,'D']
elif map[y][x] == 0 and x == W-1:
return [-1,y,'R']
elif map[y][x] == 3 and x != I[0]:
return [None]*3
guide = [[a,b,'U'],[a,b,'D'],[a,b,'L'],[a,b,'R']]
r = -1
while r == -1:
print guide
for i in xrange (4):
guide[i] = pathfinder(guide[i])
if -1 in guide[i]:
r = i
print guide[r]
if guide[r][2] == 'U' or guide[r][2] == 'R':
print guide[r][2], guide[r][1]
else:
print guide[r][2], guide[r][0]
main()
```
#### File: Competitive-Programming/nypc/validdeck.py
```python
def main():
N = int(raw_input())
stack = set([])
for i in xrange(N):
stack.add(raw_input())
if len(stack) <= 3:
print 'valid'
else:
print 'invalid'
main()
``` |
{
"source": "3-24/id0-rsa.pub",
"score": 3
} |
#### File: id0-rsa.pub/01_Hello_PGP/solution.py
```python
from subprocess import run, PIPE
def check(password,filedata):
print("Trying passphrase={:s}".format(password))
cmd = run("gpg --pinentry-mode loopback --passphrase '{:s}' -d {:s}".format(password,filedata), shell=True, stdout=PIPE)
if cmd.returncode == 0:
output = cmd.stdout.decode('utf-8')
print('plaintext:')
print(output)
return True
else:
return False
def main():
f = open('/usr/share/dict/words','r')
lines = f.readlines()
for word in lines:
if "'" in word:
continue
word = word.strip()
if check(word,'message.txt'):
break
main()
```
#### File: id0-rsa.pub/05_Affine_Cipher/solution.py
```python
f = open('ciphertext.txt','r')
ciphertext = f.read()[:-1]
f.close()
alphabets = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ ,.'
p = len(alphabets) # (mod p)
###############################
#### DECRYPT BY OCCURRENCE ####
###############################
alphabets_count = {}
for c in ciphertext:
if c not in alphabets_count:
alphabets_count[c] = 1
else:
alphabets_count[c] += 1
print(alphabets_count)
c = max(alphabets_count, key = alphabets_count.get)
print('{:s} has maximum occurrence. It would be space.'.format(c))
x = alphabets.index(c)
y = alphabets.index(' ')
print('#{:s}{:d}# decrypted to #{:s}{:d}#'.format(c,x,' ', y))
#############################################
#### BRUTE-FORCE ON a OF LINEAR EQUATION ####
#############################################
# y = ax+b (mod p)
def decrypt(text,a,b):
plaintext = ''
for i in range (len(text)):
c = text[i]
c_index = alphabets.index(c)
plaintext += str(alphabets[(a*c_index+b)%p])
return plaintext
for a in range (1,p):
b = (y - a*x) % p
print(a,b,decrypt(ciphertext,a,b)[:20])
# a=21, b=10 looks fine
plaintext = decrypt(ciphertext,21,10)
print(plaintext)
import hashlib
print(hashlib.md5(plaintext.encode('utf-8')).hexdigest())
```
#### File: id0-rsa.pub/18_Intro_to_Hashing/solution.py
```python
import hashlib
def md5_string(string):
return hashlib.md5(string.encode('utf-8')).hexdigest()
def sha256_string(string):
return hashlib.sha256(string.encode('utf-8')).hexdigest()
hash1 = sha256_string('id0-rsa.pub')
hash2 = md5_string(hash1)
print(hash2)
``` |
{
"source": "3-24/nalgang.py",
"score": 2
} |
#### File: 3-24/nalgang.py/api.py
```python
from flask import Flask, request
from attendance import Member
app = Flask(__name__)
@app.route('/')
def hello_world():
return "Welcome to youngseok's server."
@app.route('/nalgang', methods=['GET'])
def nalgang_point():
userID = request.args.get('id', type=int)
userGuild = request.args.get('guild', type=int)
m = Member(None)
m.id = userID
m.guild = userGuild
return str(m.get_point())
if __name__ == '__main__':
app.run(host='0.0.0.0', port='50000')
``` |
{
"source": "3-24/uncertainly.mirror",
"score": 3
} |
#### File: 3-24/uncertainly.mirror/changeMathjaxDelimeter.py
```python
def checkInline(s):
i = 0
while True:
try:
if s[i] == '$':
return i
except IndexError:
return -1
i += 1
def checkOutline(s):
return s == "<p class='md-math-block'>$"
def main():
f = open("./output.txt", "w")
while True:
s = input()
if s == ':qa!':
f.close()
break
else:
if checkOutline(s):
f.write('<p>\[\n')
while True:
t = input()
if t == '$</p>':
f.write('\]</p>\n')
break
else:
f.write(t+'\n')
elif checkInline(s) >= 0:
converted_string = str()
while checkInline(s) >= 0:
i = checkInline(s)
converted_string += s[:i] + '\('
s = s[i+1:]
j = checkInline(s)
converted_string += s[:j] + '\)'
s = s[j+1:]
converted_string += s
f.write(converted_string + '\n')
else:
f.write(s+'\n')
main()
``` |
{
"source": "32-52/IT-VSC",
"score": 2
} |
#### File: 32-52/IT-VSC/dataimporter.py
```python
import datetime
import json
from requests.auth import HTTPBasicAuth
from concurrent.futures import ThreadPoolExecutor
from tkinter import filedialog
import logging
import tkinter as tk
import requests
APP_NAME = 'VDS'
__version__ = '20180601'
APP_EMAIL = '<EMAIL>'
HEADERS = {'User-Agent': f'{APP_NAME}/{__version__} ({APP_EMAIL})',
'Content-Type': 'application/json'}
LOGGER_STR_FORMAT = u'%(filename)s[LINE:%(lineno)d] Level:%(levelname)-8s [%(asctime)s] %(message)s'
THREAD_POOL_SIZE: int = 16
LOGGER_LEVEL = logging.INFO
LOG_FILENAME = 'dataimporter.log'
ELASTIC_SEARCH_HOST = 'localhost'
ELASTIC_SEARCH_PORT = 9200
ELASTIC_SEARCH_INDEX_NAME = 'vds_old'
ELASTIC_SEARCH_INDEX_DOC = 'doc'
ELASTIC_SEARCH_LOGIN = 'admin'
ELASTIC_SEARCH_PASSWORD = '<PASSWORD>'
ELASTIC_AUTH = HTTPBasicAuth(ELASTIC_SEARCH_LOGIN, ELASTIC_SEARCH_PASSWORD)
def get_filepath() -> str:
root = tk.Tk()
root.withdraw()
return filedialog.askopenfilename()
def create_index(index_name: str):
index_data = {
"mappings": {
"doc": {
"properties": {
"address_geodata": {
"type": "geo_point"
},
"address_metro_geodata": {
"type": "geo_point"
}
}
}
}
}
es_response = requests.put(url=f'https://{ELASTIC_SEARCH_HOST}:{ELASTIC_SEARCH_PORT}/{index_name}',
headers=HEADERS,
auth=ELASTIC_AUTH,
json=index_data,
verify=False)
if es_response.status_code is not 201:
logging.warning(f'Code:{es_response.status_code} {es_response.text}')
def load_vacancy_data_in_es(data: str, elsaticsearch_index_name: str):
json_data: dict = json.loads(data.encode('utf-8'))
try:
address_lat = json_data['address']['lat']
address_lng = json_data['address']['lng']
json_data['address_geodata'] = f"{address_lat},{address_lng}"
except TypeError:
logging.warning('No address geodata')
try:
address_metro_lat = json_data['address']['metro']['lat']
address_metro_lng = json_data['address']['metro']['lng']
json_data['address_metro_geodata'] = f"{address_metro_lat},{address_metro_lng}"
except TypeError:
logging.warning('No metro address geodata')
es_response = requests.post(url=f'https://{ELASTIC_SEARCH_HOST}:{ELASTIC_SEARCH_PORT}/'
f'{elsaticsearch_index_name}/{ELASTIC_SEARCH_INDEX_DOC}',
headers=HEADERS,
auth=ELASTIC_AUTH,
json=json_data,
verify=False)
if es_response.status_code is not 201:
logging.warning(f'Code:{es_response.status_code} {es_response.text}')
if __name__ == '__main__':
logging.basicConfig(format=LOGGER_STR_FORMAT,
level=LOGGER_LEVEL,
filename=LOG_FILENAME)
logging.info('Starting vds...')
logging.info(f'Loading vacancies data...')
current_datetime = datetime.datetime.now()
es_index_name = f'{ELASTIC_SEARCH_INDEX_NAME}-' \
f'{current_datetime.year}' \
f'{str(current_datetime.month).zfill(2)}' \
f'{str(current_datetime.day).zfill(2)}'
logging.info(f'Creating index {es_index_name}')
create_index(es_index_name)
with open(file=get_filepath(), encoding='utf-8') as file:
with ThreadPoolExecutor(max_workers=THREAD_POOL_SIZE) as executor:
for line in file.readlines():
executor.submit(load_vacancy_data_in_es, line, es_index_name)
executor.shutdown()
logging.info(f'Loading vacancies data complete')
``` |
{
"source": "32-52/LanitBusScheduleBot",
"score": 3
} |
#### File: 32-52/LanitBusScheduleBot/bus_schedule.py
```python
from models import Destinations, Locations
from settings import logging
from datetime import datetime
import requests
import settings
class LanitBusInfo:
@staticmethod
def get_nearest_bus(location: Locations, destination: Destinations) -> str:
logging.info('Getting nearest bus started...')
location_data = None
if location == Locations.MARINA_ROSHHA:
location_data = 'm'
elif location == Locations.PLOSHHAD_ILICHA:
location_data = 'p'
elif location == Locations.RIZHSKAJA:
location_data = 'r'
destination_data = None
if destination == Destinations.TO_METRO:
destination_data = 'to_metro'
elif destination == Destinations.TO_OFFICE:
destination_data = 'to_office'
response = requests.get(
f'https://transport.lanit.ru/api/times/{location_data}').json()
message_format = f'Сейчас {settings.days[datetime.today().weekday()]} {response["info"]["now"]}\n' \
f'Метро: {location.value}\n' \
f'Куда: {destination.value}\n'
if datetime.today().weekday() > 4:
logging.debug(
f'message_format {type(message_format)} = {message_format}')
logging.info('Getting nearest bus completed')
message_format += 'Сегодня маршруток не будет'
return message_format
elif response['time'][destination_data]['nearest'] is not False:
message_format += f'Ближайшая маршрутка будет через {response["time"][destination_data]["left"]} ' \
f'в {response["time"][destination_data]["nearest"]}\n'
if response["time"][destination_data]["next"] is not False:
message_format += f'Следующая будет в {response["time"][destination_data]["next"]}\n'
else:
message_format += f'Маршруток больше сегодня не будет\n'
if response['info']['warning'] is not False:
message_format += f"Важно: {response['info'][destination_data]['warning']}"
logging.debug(
f'message_format {type(message_format)} = {message_format}')
logging.info('Getting nearest bus completed')
return message_format
elif response['time'][destination_data]['nearest'] is False:
message_format += f'Сегодня маршруток не будет.\n'
if response['info']['warning'] is not False:
message_format += f"Предупреждение: {response['info'][destination_data]['warning']}"
logging.debug(
f'message_format {type(message_format)} = {message_format}')
logging.info('Getting nearest bus completed')
return message_format
else:
message_format = 'К сожалению не удалось получить расписание\n'
return message_format
```
#### File: 32-52/LanitBusScheduleBot/views.py
```python
from settings import logging, debug_mode, user_sessions
from models import Models, Locations, Destinations
from bus_schedule import LanitBusInfo
from telebot import types, TeleBot
# -=-=-=-=-=-=-=-=-=-=-=-=- COMMONS VIEWS -=-=-=-=-=-=-=-=-=-=-=-=-
class View():
def __init__(self, user_session: dict = None):
logging.debug(f'{type(self).__name__} | Init started...')
self._user_session = user_session
self._keyboard = types.InlineKeyboardMarkup()
self._create_keyboard_header()
self._create_keyboard_content()
self._create_keyboard_footer()
self._message_text = self._set_message_text()
logging.debug(f'{type(self).__name__} | Init complited')
def _create_keyboard_header(self):
logging.debug(
f'{type(self).__name__} | Creating keyboard header started...')
buttons = []
logging.warning('!!!!!!!!!!!!!!!!!!!', debug_mode, type(debug_mode))
if debug_mode:
user_session_button = types.InlineKeyboardButton(
text="user session", callback_data=encode_data(GetUserInfo.__name__))
buttons.append(user_session_button)
self._keyboard.add(*buttons)
logging.debug(
f'{type(self).__name__} | Creating keyboard header completed')
def _create_keyboard_content(self):
logging.debug(
f'{type(self).__name__} | Create keyboard content started...')
# Put your action here
logging.debug(
f'{type(self).__name__} | Create keyboard content completed')
def _create_keyboard_footer(self):
logging.debug(
f'{type(self).__name__} | Create keyboard footer started...')
buttons = []
if type(self).__name__ is not HelpMenu.__name__:
buttons.append(types.InlineKeyboardButton(
text="Помощь", callback_data=encode_data(HelpMenu.__name__)))
if type(self).__name__ is not StartMenu.__name__:
buttons.append(types.InlineKeyboardButton(
text="На главную", callback_data=encode_data(StartMenu.__name__)))
self._keyboard.add(*buttons)
logging.debug(
f'{type(self).__name__} | Create keyboard footer completed')
def _set_message_text(self):
return 'BASIC VIEW'
def get_message_text(self):
return self._message_text
def get_keyboard(self):
return self._keyboard
class StartMenu(View):
def _create_keyboard_content(self):
logging.debug(
f'{type(self).__name__} | Create keyboard content started...')
select_destination_button = types.InlineKeyboardButton(
text="Узнать расписание", callback_data=encode_data(GetBusSchedule.__name__))
self._keyboard.add(select_destination_button)
logging.debug(
f'{type(self).__name__} | Create keyboard content completed')
def _set_message_text(self):
return "Данный бот предоставляет расписание маршруток компании ЛАНИТ (г. Москва, ул. Мурманский проезд 14к1)."
class HelpMenu(View):
def _create_keyboard_content(self):
logging.debug(
f'{type(self).__name__} | Create keyboard content started...')
switch_button = types.InlineKeyboardButton(
text='Создатель', url="https://t.me/ASvetlov92")
btn_my_site = types.InlineKeyboardButton(
text='GitHub', url='https://github.com/32-52/LanitBusScheduleBot')
self._keyboard.add(switch_button, btn_my_site)
logging.debug(
f'self._keyboard {type(self._keyboard)} = {self._keyboard}')
logging.debug(
f'{type(self).__name__} | Create keyboard content completed')
def _set_message_text(self):
return "Если возникли проблемы с ботом или есть предложения по улучшению, то свяжитесь со мной\nЕсли этот бот оказался полезен, то буду очень рад звездочке"
class GetUserInfo(View):
def _create_keyboard_content(self):
logging.debug(
f'{type(self).__name__} | Create keyboard content started...')
logging.debug(
f'self._keyboard {type(self._keyboard)} = {self._keyboard}')
logging.debug(
f'{type(self).__name__} | Create keyboard content completed')
def _set_message_text(self):
return str(user_sessions)
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# -=-=-=-=-=-=-=-=-=-=-=-=- BUSSCHEDULES VIEWS -=-=-=-=-=-=-=-=-=-=-=-=-
class GetBusSchedule(View):
pass
class SelectDestination(GetBusSchedule):
def _create_keyboard_content(self):
logging.debug(
f'{type(self).__name__} | Create keyboard content started...')
buttons = []
for destination in Destinations:
buttons.append(types.InlineKeyboardButton(
text=destination.value, callback_data=encode_data(GetBusSchedule.__name__, destination)))
self._keyboard.add(*buttons)
logging.debug(
f'self._keyboard {type(self._keyboard)} = {self._keyboard}')
logging.debug(
f'{type(self).__name__} | Create keyboard content completed')
def _set_message_text(self):
return "Куда нужно ехать?"
class SelectLocation(GetBusSchedule):
def _create_keyboard_content(self):
logging.debug(
f'{type(self).__name__} | Create keyboard content started...')
buttons = []
for location in Locations:
buttons.append(types.InlineKeyboardButton(
text=location.value, callback_data=encode_data(GetBusSchedule.__name__, location)))
self._keyboard.add(*buttons)
logging.debug(
f'self._keyboard {type(self._keyboard)} = {self._keyboard}')
logging.debug(
f'{type(self).__name__} | Create keyboard content completed')
def _set_message_text(self):
return "Какое метро нам нужно?"
class ShowSheduleResult(GetBusSchedule):
def _create_keyboard_content(self):
shedule_site_button = types.InlineKeyboardButton(
text='Посмотреть на сайте', url='https://transport.lanit.ru/')
self._keyboard.add(shedule_site_button)
def _set_message_text(self):
return LanitBusInfo.get_nearest_bus(location=self._user_session['Locations'],
destination=self._user_session['Destinations'])
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# -=-=-=-=-=-=-=-=-=-=-=-=- ENCODE DECODE CALLBACK DATA -=-=-=-=-=-=-=-=-=-=-=-=-
def decode_data(data: str):
logging.debug(f'data {type(data)} = {data}')
try:
splitted_data = data.split('|')
view_class_name_str = splitted_data[0]
logging.debug(
f'view_class_name_str {type(view_class_name_str)} = {view_class_name_str}')
model_class_name_str = splitted_data[1]
logging.debug(
f'model_class_name_str {type(model_class_name_str)} = {model_class_name_str}')
model_value_str = splitted_data[2]
logging.debug(
f'model_value_str {type(model_value_str)} = {model_value_str}')
except IndexError:
return(StartMenu, None)
view_class = None
model_class = None
model_value = None
for cls in View.__subclasses__():
if cls.__name__ in view_class_name_str:
view_class = cls
break
if model_class_name_str is not None and model_value_str is not None:
for cls in Models.__subclasses__():
if cls.__name__ in model_class_name_str:
model_value = cls(model_value_str)
break
return (view_class, model_value)
else:
return(view_class, None)
def encode_data(view_class: View, model_class: Models = None):
logging.debug(f'view_class {view_class}')
logging.debug(f'model_class {type(model_class)} = {model_class}')
if model_class is not None:
return f'{view_class}|{type(model_class).__name__}|{model_class.value}'
else:
return f'{view_class}|None|None'
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
``` |
{
"source": "326th/Roughness-measurement-respository",
"score": 3
} |
#### File: Data acquisition/GPS and Roughness Automation/Roughness_measure.py
```python
import time
import network
from machine import Pin
from umqtt.robust import MQTTClient
import imu
TIMES = 100
SLEEP = 0.1
## imu set up
acc = imu.acc
save = [0,0,0]
def diff(start, end):
re = [0,0,0]
for x in range (3):
re[x] = start[x] - end[x]
return re
def copy(target):
re = [0,0,0]
for x in range (3):
re[x] = target[x]
return re
def get_acc():
global acc, save
save = copy(acc)
imu.update()
return abs(diff(save, acc)[1])
def get_avg_acc(times,sleep_time):
avg = 0
for itera in range(times):
avg += get_acc()
time.sleep(sleep_time)
return avg/times
## connect wifi
led_iot = Pin(12, Pin.OUT)
led_wifi = Pin(2, Pin.OUT)
led_wifi.value(1) # turn it off
led_iot.value(1)
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
print("*** Connecting to WiFi...")
wlan.connect("WIFI_NAME","WIFI_PASSWORD")
while not wlan.isconnected():
time.sleep(0.5)
print("*** Wifi connected")
led_wifi.value(0)
## connect broker
mqtt = MQTTClient("Acc","MQTT_BROKER")
print("*** Connecting to MQTT broker...")
mqtt.connect()
print("*** MQTT broker connected")
led_iot.value(0)
while True:
mqtt.publish("ku/daq2020/cosmic/acc",str(get_avg_acc(TIMES,SLEEP)))
``` |
{
"source": "327585419/bitcash",
"score": 2
} |
#### File: tests/network/test_services.py
```python
import pytest
import bitcash
from bitcash.network.services import (
BitcoinDotComAPI, BitcoreAPI, NetworkAPI, set_service_timeout
)
from tests.utils import (
catch_errors_raise_warnings, decorate_methods, raise_connection_error
)
MAIN_ADDRESS_USED1 = 'qrg2nw20kxhspdlec82qxrgdegrq23hyuyjx2h29sy'
MAIN_ADDRESS_USED2 = 'qpr270a5sxphltdmggtj07v4nskn9gmg9yx4m5h7s4'
MAIN_ADDRESS_UNUSED = 'qzxumj0tjwwrep698rv4mnwa5ek3ddsgxuvcunqnjx'
TEST_ADDRESS_USED1 = 'qrnuzdzleru8c6qhpva20x9f2mp0u657luhfyxjep5'
TEST_ADDRESS_USED2 = 'qprralpnpx6zrx3w2aet97u0c6rcfrlp8v6jenepj5'
TEST_ADDRESS_USED3 = 'qpjm4n7m4r6aufkxxy5nqm5letejdm4f5sn6an6rsl'
TEST_ADDRESS_UNUSED = 'qpwn6qz29s5rv2uf0cxd7ygnwdttsuschczaz38yc5'
def all_items_common(seq):
initial_set = set(seq[0])
intersection_lengths = [len(set(s) & initial_set) for s in seq]
return all_items_equal(intersection_lengths)
def all_items_equal(seq):
initial_item = seq[0]
return all(item == initial_item for item in seq if item is not None)
def test_set_service_timeout():
original = bitcash.network.services.DEFAULT_TIMEOUT
set_service_timeout(3)
updated = bitcash.network.services.DEFAULT_TIMEOUT
assert original != updated
assert updated == 3
set_service_timeout(original)
class MockBackend(NetworkAPI):
IGNORED_ERRORS = NetworkAPI.IGNORED_ERRORS
GET_BALANCE_MAIN = [raise_connection_error]
GET_TRANSACTIONS_MAIN = [raise_connection_error]
GET_UNSPENT_MAIN = [raise_connection_error]
GET_BALANCE_TEST = [raise_connection_error]
GET_TRANSACTIONS_TEST = [raise_connection_error]
GET_UNSPENT_TEST = [raise_connection_error]
class TestNetworkAPI:
def test_get_balance_main_equal(self):
results = [call(MAIN_ADDRESS_USED2) for call in NetworkAPI.GET_BALANCE_MAIN]
assert all(result == results[0] for result in results)
def test_get_balance_main_failure(self):
with pytest.raises(ConnectionError):
MockBackend.get_balance(MAIN_ADDRESS_USED2)
def test_get_balance_test_equal(self):
results = [call(TEST_ADDRESS_USED2) for call in NetworkAPI.GET_BALANCE_TEST]
assert all(result == results[0] for result in results)
def test_get_balance_test_failure(self):
with pytest.raises(ConnectionError):
MockBackend.get_balance_testnet(TEST_ADDRESS_USED2)
# FIXME: Bitcore.io only returns unspents
# def test_get_transactions_main_equal(self):
# results = [call(MAIN_ADDRESS_USED1)[:100] for call in NetworkAPI.GET_TRANSACTIONS_MAIN]
# assert all_items_common(results)
def test_get_transactions_main_failure(self):
with pytest.raises(ConnectionError):
MockBackend.get_transactions(MAIN_ADDRESS_USED1)
def test_get_transactions_test_equal(self):
results = [call(TEST_ADDRESS_USED2)[:100] for call in NetworkAPI.GET_TRANSACTIONS_TEST]
assert all_items_common(results)
def test_get_transactions_test_failure(self):
with pytest.raises(ConnectionError):
MockBackend.get_transactions_testnet(TEST_ADDRESS_USED2)
def test_get_unspent_main_equal(self):
results = [call(MAIN_ADDRESS_USED2) for call in NetworkAPI.GET_UNSPENT_MAIN]
assert all_items_equal(results)
def test_get_unspent_main_failure(self):
with pytest.raises(ConnectionError):
MockBackend.get_unspent(MAIN_ADDRESS_USED1)
def test_get_unspent_test_equal(self):
results = [call(TEST_ADDRESS_USED3) for call in NetworkAPI.GET_UNSPENT_TEST]
assert all_items_equal(results)
def test_get_unspent_test_failure(self):
with pytest.raises(ConnectionError):
MockBackend.get_unspent_testnet(TEST_ADDRESS_USED2)
@decorate_methods(catch_errors_raise_warnings, NetworkAPI.IGNORED_ERRORS)
class TestBitcoinDotComAPI:
def test_get_balance_return_type(self):
assert isinstance(BitcoinDotComAPI.get_balance(MAIN_ADDRESS_USED1), int)
def test_get_balance_main_used(self):
assert BitcoinDotComAPI.get_balance(MAIN_ADDRESS_USED1) > 0
def test_get_balance_main_unused(self):
assert BitcoinDotComAPI.get_balance(MAIN_ADDRESS_UNUSED) == 0
def test_get_balance_test_used(self):
assert BitcoinDotComAPI.get_balance_testnet(TEST_ADDRESS_USED2) > 0
def test_get_balance_test_unused(self):
assert BitcoinDotComAPI.get_balance_testnet(TEST_ADDRESS_UNUSED) == 0
def test_get_transactions_return_type(self):
assert iter(BitcoinDotComAPI.get_transactions(MAIN_ADDRESS_USED1))
def test_get_transactions_main_used(self):
assert len(BitcoinDotComAPI.get_transactions(MAIN_ADDRESS_USED1)) >= 218
def test_get_transactions_main_unused(self):
assert len(BitcoinDotComAPI.get_transactions(MAIN_ADDRESS_UNUSED)) == 0
def test_get_transactions_test_used(self):
assert len(BitcoinDotComAPI.get_transactions_testnet(TEST_ADDRESS_USED2)) >= 444
def test_get_transactions_test_unused(self):
assert len(BitcoinDotComAPI.get_transactions_testnet(TEST_ADDRESS_UNUSED)) == 0
def test_get_unspent_return_type(self):
assert iter(BitcoinDotComAPI.get_unspent(MAIN_ADDRESS_USED1))
def test_get_unspent_main_used(self):
assert len(BitcoinDotComAPI.get_unspent(MAIN_ADDRESS_USED2)) >= 1
def test_get_unspent_main_unused(self):
assert len(BitcoinDotComAPI.get_unspent(MAIN_ADDRESS_UNUSED)) == 0
def test_get_unspent_test_used(self):
assert len(BitcoinDotComAPI.get_unspent_testnet(TEST_ADDRESS_USED2)) >= 194
def test_get_unspent_test_unused(self):
assert len(BitcoinDotComAPI.get_unspent_testnet(TEST_ADDRESS_UNUSED)) == 0
@decorate_methods(catch_errors_raise_warnings, NetworkAPI.IGNORED_ERRORS)
class TestBitcoreAPI:
def test_get_balance_return_type(self):
assert isinstance(BitcoreAPI.get_balance(MAIN_ADDRESS_USED1), int)
def test_get_balance_main_used(self):
assert BitcoreAPI.get_balance(MAIN_ADDRESS_USED1) > 0
def test_get_balance_main_unused(self):
assert BitcoreAPI.get_balance(MAIN_ADDRESS_UNUSED) == 0
def test_get_balance_test_used(self):
assert BitcoreAPI.get_balance_testnet(TEST_ADDRESS_USED2) > 0
def test_get_balance_test_unused(self):
assert BitcoreAPI.get_balance_testnet(TEST_ADDRESS_UNUSED) == 0
def test_get_transactions_return_type(self):
assert iter(BitcoreAPI.get_transactions(MAIN_ADDRESS_USED1))
# FIXME: Bitcore.io only returns 10 elements
# def test_get_transactions_main_used(self):
# assert len(BitcoreAPI.get_transactions(MAIN_ADDRESS_USED1)) >= 218
def test_get_transactions_main_unused(self):
assert len(BitcoreAPI.get_transactions(MAIN_ADDRESS_UNUSED)) == 0
# FIXME: Bitcore.io only returns 10 elements
# def test_get_transactions_test_used(self):
# assert len(BitcoreAPI.get_transactions_testnet(TEST_ADDRESS_USED2)) >= 444
def test_get_transactions_test_unused(self):
assert len(BitcoreAPI.get_transactions_testnet(TEST_ADDRESS_UNUSED)) == 0
def test_get_unspent_return_type(self):
assert iter(BitcoreAPI.get_unspent(MAIN_ADDRESS_USED1))
def test_get_unspent_main_used(self):
assert len(BitcoreAPI.get_unspent(MAIN_ADDRESS_USED2)) >= 1
def test_get_unspent_main_unused(self):
assert len(BitcoreAPI.get_unspent(MAIN_ADDRESS_UNUSED)) == 0
# FIXME: Bitcore.io only returns 10 elements
# def test_get_unspent_test_used(self):
# assert len(BitcoreAPI.get_unspent_testnet(TEST_ADDRESS_USED2)) >= 194
def test_get_unspent_test_unused(self):
assert len(BitcoreAPI.get_unspent_testnet(TEST_ADDRESS_UNUSED)) == 0
``` |
{
"source": "327qabot/chad-CI-python",
"score": 2
} |
#### File: backend/login/test_backend_login.py
```python
import pytest
import requests
from qa327_test.conftest import base_url
import qa327.backend as bn
from qa327.models import User
# Mock a sample user
test_user = User(
email='<EMAIL>',
name='Test User',
password='<PASSWORD>'
)
@pytest.mark.usefixtures('server')
def test_backend_login_input1():
# Set up valid user account
bn.register_user(test_user.email, test_user.name, test_user.password, test_user.password)
# Test input partition #1 (valid email, correct password)
result = bn.login_user(test_user.email, test_user.password)
assert result is not None
assert result.name == test_user.name
@pytest.mark.usefixtures('server')
def test_backend_login_input2():
# Test input partition #2 (valid email, incorrect password)
result = bn.login_user(test_user.email, "<PASSWORD>!")
assert result == None
@pytest.mark.usefixtures('server')
def test_backend_login_input3():
# Test input partition #3 (invalid email, correct password)
result = bn.login_user("<EMAIL>", test_user.password)
assert result == None
@pytest.mark.usefixtures('server')
def test_backend_login_input4():
# Test input partition #4 (invalid email, incorrect password)
result = bn.login_user("<EMAIL>", "<PASSWORD>!")
assert result == None
```
#### File: integration/buy/test_buy_user_walk_through.py
```python
import pytest
from seleniumbase import BaseCase
from qa327_test.conftest import base_url
# integration testing: the test case interacts with the
# browser, and test the whole system (frontend+backend).
@pytest.mark.usefixtures('server')
class BuyTicket(BaseCase):
def register(self):
"""register new user"""
self.open(base_url + '/register')
self.type("#email", "<EMAIL>")
self.type("#name", "test0")
self.type("#password", "<PASSWORD>")
self.type("#password2", "<PASSWORD>")
self.click('input[type="submit"]')
def login(self):
""" Login to Swag Labs and verify that login was successful. """
self.open(base_url + '/login')
self.type("#email", "<EMAIL>")
self.type("#password", "<PASSWORD>")
self.click('input[type="submit"]')
def sell_ticket(self):
""" Add a ticket to be sold """
self.open(base_url + '/')
self.type("#sell_name", "Avengers")
self.type("#sell_quantity", "3")
self.type("#sell_price", "35")
self.type("#sell_exp_date", "2022\t12-20")
self.click("#sell_submit")
def logout(self):
""" Logout of user we used to create ticket """
self.open(base_url + '/logout')
def register2(self):
"""register new user to buy ticket """
self.open(base_url + '/register')
self.type("#email", "<EMAIL>")
self.type("#name", "test2")
self.type("#password", "<PASSWORD>")
self.type("#password2", "<PASSWORD>")
self.click('input[type="submit"]')
def login2(self):
""" Login to Swag Labs and verify that login was successful. """
self.open(base_url + '/login')
self.type("#email", "<EMAIL>")
self.type("#password", "<PASSWORD>")
self.click('input[type="submit"]')
def buy_ticket(self):
""" Add a ticket to be sold """
self.open(base_url + '/')
self.click("#ticket-avengers-buy", timeout=5)
self.click("#buy_submit", timeout=5)
def test_buy_ticket(self):
""" This test checks the implemented sell
ticket feature """
self.register()
self.login()
self.sell_ticket()
self.logout()
self.register2()
self.login2()
self.buy_ticket()
self.open(base_url + "/")
self.assert_element_absent("#ticket-Avengers")
``` |
{
"source": "3282/python_training",
"score": 3
} |
#### File: python_training/model/contact.py
```python
class Contact:
def __init__(self, firstname, middlename, lastname, nickname, company, address, home, mobile, fax,
email, email2, email3, bday, bmonth, byear, address2):
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.nickname = nickname
self.company = company
self.address = address
self.home = home
self.mobile = mobile
self.fax = fax
self.email = email
self.email2 = email2
self.email3 = email3
self.bday = bday
self.bmonth = bmonth
self.byear = byear
self.address2 = address2
``` |
{
"source": "3284180393/ccodappmanger",
"score": 3
} |
#### File: ccodappmanger/app_base/ssh_utils.py
```python
from fabric.api import *
from datetime import datetime
from common.log import logger
def create_connection(host_ip, ssh_port, user, password):
"""
创建一个ssh连接
:param host_ip:主机ip
:param ssh_port: ssh端口
:param user: 登录用户
:param password: <PASSWORD>
:return: 如果创建连接成功返回连接,连接超时返回'TIMEOUT',如果认证失败返回"AUTH_FAIL"
"""
logger.info("准备创建ssh连接,host_p=%s,ssh_port=%d,user=%s,password=%s" %(host_ip, ssh_port, user, password))
```
#### File: ccodappmanger/home_application/views.py
```python
from common.mymako import render_mako_context
from django.http import HttpResponse
from app_base.models import AppPriStbyCfg
from oracle.dg_switch import check_dg_oracle_status
from oracle.dg_switch import start_pri_stby_switch
import json
def home(request):
"""
首页
"""
return render_mako_context(request, '/home_application/home.html')
def dev_guide(request):
"""
开发指引
"""
return render_mako_context(request, '/home_application/dev_guide.html')
def contactus(request):
"""
联系我们
"""
return render_mako_context(request, '/home_application/contact.html')
def get_app_pri_stby_cfg(request, platform_id, app_type):
"""
获取某个平台的某个应用类型的所有主备配置
:param request: http请求
:param platform_name: 平台名
:param app_type:应用类型
:return:查询结果
"""
query_list = AppPriStbyCfg.objects.filter(platform__platform_id=platform_id, app_template__app_type=app_type)
ret_list = list()
for cfg in query_list:
app_cfg = dict()
app_cfg['platform_name'] = cfg.platform.platform_name
app_cfg['platform_id'] = cfg.platform.platform_id
app_cfg['app_template_id'] = cfg.app_template.id
app_cfg['app_type'] = cfg.app_template.app_type
app_cfg['domain_name'] = cfg.domain.domain_name
app_cfg['domain_id'] = cfg.domain.domain_id
app_cfg['nj_agent_host_ip'] = cfg.nj_agent_server.host_ip
app_cfg['nj_agent_host_name'] = cfg.nj_agent_server.host_name
app_cfg['nj_agent_server_id'] = cfg.nj_agent_server.id
app_cfg['nj_agent_user'] = cfg.nj_agent_user.login_name
app_cfg['nj_agent_user_password'] = <PASSWORD>
app_cfg['nj_agent_user_id'] = cfg.nj_agent_user.id
app_cfg['available_ip'] = cfg.available_ip
app_cfg['primary_app_cfg'] = __get_app_config(cfg.primary_app)
app_cfg['standby_app_cfg'] = __get_app_config(cfg.standby_app)
ret_list.append(app_cfg)
return HttpResponse(json.dumps(ret_list, ensure_ascii=False), content_type="application/json,charset=utf-8")
def get_app_pri_stby_status(request, cfg_id):
app_pri_stby_cfg = AppPriStbyCfg.objects.get(id=cfg_id)
if not app_pri_stby_cfg:
return HttpResponse('错误的主备配置id', content_type="application/json,charset=utf-8")
pri_ora_cfg = __get_app_config(app_pri_stby_cfg.primary_app)
stby_ora_cfg = __get_app_config(app_pri_stby_cfg.standby_app)
ora_dg_status = check_dg_oracle_status(pri_ora_cfg, stby_ora_cfg)
return HttpResponse(json.dumps(ora_dg_status, ensure_ascii=False), content_type="application/json,charset=utf-8")
def start_pri_stby_switch(request, app_pri_stby_cfg_id, switch_method):
app_pri_stby_cfg = AppPriStbyCfg.objects.get(id=app_pri_stby_cfg_id)
if app_pri_stby_cfg:
return HttpResponse('错误的主备配置id', content_type="application/json,charset=utf-8")
pri_ora_cfg = __get_app_config(app_pri_stby_cfg.primary_app)
stby_ora_cfg = __get_app_config(app_pri_stby_cfg.standby_app)
ret = start_pri_stby_switch(pri_ora_cfg, stby_ora_cfg, switch_method, app_pri_stby_cfg.availble_ip)
return ret
def __get_app_config(app_cfg):
"""
将数据库存储的应用配置转换成容易使用的dict()
:param app_cfg: 数据库存储的应用配置
:return: 易于使用的用dict类型应用配置
"""
cfg = dict()
cfg['app_template_id'] = app_cfg.app_template.id
cfg['app_type'] = app_cfg.app_type
cfg['app_name'] = app_cfg.app_name
cfg['app_alias'] = app_cfg.app_alias
cfg['base_path'] = app_cfg.base_path
cfg['host_ip'] = app_cfg.server.host_ip
cfg['host_name'] = app_cfg.server.host_name
cfg['root_user'] = app_cfg.root_user.login_name
cfg['root_password'] = app_cfg.root_user.pass_word
cfg['app_user'] = app_cfg.app_user.login_name
cfg['app_user_password'] = <PASSWORD>_cfg.<PASSWORD>_<PASSWORD>
cfg['app_cfg_id'] = app_cfg.id
cfg['server_id'] = app_cfg.server.id
cfg['root_user_id'] = app_cfg.root_user.id
cfg['app_user_id'] = app_cfg.app_user.id
cfg['service_name'] = app_cfg.service_name
cfg['ssh_port'] = app_cfg.server.ssh_port
return cfg
```
#### File: ccodappmanger/oracle/dg_switch.py
```python
import sys
import logging as logger
import datetime
import re
import subprocess
import json
import signal,functools
class TimeoutError(Exception):pass
import paramiko
from paramiko.ssh_exception import AuthenticationException
reload(sys)
sys.setdefaultencoding('utf8')
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logger.basicConfig(filename='my.log', level=logger.DEBUG, format=LOG_FORMAT)
stby_cfg = {'app_user_password': '<PASSWORD>', 'app_type': 'oracle', 'app_name': 'ccod', 'ssh_port': 22, 'service_name': 'db_wending', 'server_id': 1, 'base_path': '/home/oracle/oracle10g/product/10.2.0/oracle/db_1', 'host_ip': '10.130.41.159', 'app_alias': 'WENDING', 'root_user_id': 1, 'app_user_id': 2, 'host_name': 'localhost', 'app_cfg_id': 1, 'root_user': 'root', 'app_template_id': 1, 'app_user': 'oracle', 'root_password': '<PASSWORD>'}
pri_cfg = {'app_user_password': '<PASSWORD>', 'app_type': 'oracle', 'app_name': 'ccod', 'ssh_port': 22, 'service_name': 'db_phystdby', 'server_id': 2, 'base_path': '/home/oracle/oracle10g/product/10.2.0/db_1', 'host_ip': '10.130.41.161', 'app_alias': 'PHYSTDBY', 'root_user_id': 3, 'app_user_id': 4, 'host_name': 'ccod-oracle1', 'app_cfg_id': 2, 'root_user': 'root', 'app_template_id': 1, 'app_user': 'oracle', 'root_password': '123456'}
default_exec_command_timeout = 10
ssh_connect_timeout = 20
script_24_support_version = '^Python 2\.4\.3$'
script_26_support_version = '^Python 2\.6\.6$'
script_3_support_version = '||'
ip_regex = '.*(2(5[0-5]{1}|[0-4]\\d{1})|[0-1]?\\d{1,2})\\.(2(5[0-5]{1}|[0-4]\\d{1})|[0-1]?\\d{1,2})\\.(2(5[0-5]{1}|[0-4]\\d{1})|[0-1]?\\d{1,2})\\.(2(5[0-5]{1}|[0-4]\\d{1})|[0-1]?\\d{1,2}).*'
def timeout(seconds, error_message="Timeout Error: the cmd 30s have not finished."):
def decorated(func):
result = ""
def _handle_timeout(signum, frame):
global result
result = error_message
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
global result
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return result
return functools.wraps(func)(wrapper)
return decorated
def __ping_test(host_ip):
"""
ping一个ip检查该ip是否可以ping通
:param host_ip: 需要被ping的ip
:return: SUCC该ip可以ping通,FAIL该ip无法ping通
"""
logger.info("准备ping ip:" + host_ip)
ret = {}
command = 'ping ' + host_ip + ' -c 4'
# command = 'ping ' + host_ip
key = 'PING'
ret['startTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
ret['command'] = command
ret['desc'] = '服务器是否在线'
ret['key'] = key
ret['command_type'] = 'local/runtime'
ret['exec_success'] = False
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = process.communicate()[0]
if process.stdin:
process.stdin.close()
if process.stdout:
process.stdout.close()
if process.stderr:
process.stderr.close()
try:
process.kill()
except OSError:
pass
# command_result = out.decode()
command_result = out.decode('gbk')
str_info = re.compile("(\n)*$")
command_result = str_info.sub('', command_result)
ret['endTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
ret['output'] = command_result
if re.match(".*ttl=.*", command_result, re.DOTALL) and re.match(".*time=.*", command_result, re.DOTALL):
ret[key] = 'PING_SUCC'
ret['exec_success'] = True
else:
ret[key] = 'PING_FAIL'
logger.info("ping " + host_ip + "result : " + ret[key])
return ret
@timeout(ssh_connect_timeout, "Timeout Error:fail to create connection in " + str(ssh_connect_timeout) + " seconds")
def __create_ssh_connect(host_ip, ssh_port, user, password):
logger.info("创建ssh连接host_ip=%s, ssh_port=%d, user=%s, password=%s" % (host_ip, ssh_port, user, password))
ret = {}
command = 'create ssh connect for ' + user + '@' + host_ip
key = 'CREATE_SSH_CONNECT'
ret['startTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
ret['command'] = command
ret['desc'] = '为' + user + '创建ssh连接'
ret['key'] = key
ret['exec_success'] = False
ret[key] = 'CREATE_CONN_FAIL'
ssh_client = None
try:
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(host_ip, ssh_port, user, password)
ret['output'] = "create ssh connect for " + user + ":SUCC"
ret[key] = 'CREATE_CONN_SUCCESS'
ret['exec_success'] = True
except AuthenticationException, ae:
logger.error(user + '登录' + host_ip + '认证失败', ae)
ret['output'] = str(ae)
ret[key] = 'AUTH_FAIL'
except Exception, e:
logger.error(user + '登录' + host_ip + '失败', e)
ret['output'] = str(e)
ret[key] = 'FAIL'
ret['endTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
logger.info('ssh连接创建结果:' + ret[key])
return ret, ssh_client
def command_result_regex_match(command_result, regex, command_type='bash/shell'):
"""
解析命令输出结果是否满足某个正则表达式,所有的命令结果在解析前需要删掉空行,命令输出结果的最后一个\n也要被去掉,
bash/shell类型采用多行模式进行匹配,oracle/sql则需要把所有结果拼成一行进行匹配,特别注意的是在拼成一行之前需要把每行前面的空格去掉
:param command_result:
:param regex:
:param command_type:
:return:
"""
input_str = re.compile('\s+\n').sub('\n', command_result)
input_str = re.compile('\n{2,}').sub('\n', input_str)
input_str = re.compile('\n$').sub('', input_str)
input_str = re.compile('^\n').sub('', input_str)
if command_type == 'bash/shell':
if re.match(regex, input_str, flags=re.DOTALL):
return True
else:
return False
elif command_type == 'oracle/sql':
input_str = re.compile('^\s+').sub('', input_str)
input_str = re.compile('\n').sub('', input_str)
if re.match(regex, input_str):
return True
else:
return False
else:
logger.error('目前还不支持' + command_type + '类型命令结果解析')
return False
@timeout(default_exec_command_timeout, error_message="in " + str(default_exec_command_timeout) + " without command return")
def __timeout_exec_command(conn, command, key, result_regex_map, desc, script_type='bash/shell'):
"""
通过ssh在linux服务器上执行一条命令,命令执行结果会匹配result_regex_map中的正则,
如果能够匹配某个正则那么返回的key等于正则表达式对应的关键字否则返回失败
装饰器控制函数决定了执行命令的超时时长
:param conn: ssh连接会话
:param command: 需要执行的命令
:param key: 执行结果对应的key
:param result_regex_map: 配置命令执行结果的正则表达式
:param desc: 命令描述
:param script_type: 命令类型,目前支持bash/shell和oracle/sql
:return: {'output', 'command', 'key', key, 'desc', 'output', 'startTime', 'endTime'}
"""
ret = dict()
ret['startTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
stdin, stdout, stderr = conn.exec_command('. ./.bash_profile;' + command)
exec_result = stdout.read()
err = stderr.read()
if err:
exec_result = err
ret['out'] = exec_result
logger.info(command + ' exec result:' + ret['out'])
ret['output'] = result
ret['command'] = command
ret['key'] = key
ret['desc'] = desc
ret['exec_success'] = False
ret[key] = 'UNKNOWN'
for k, v in result_regex_map.iteritems():
if command_result_regex_match(exec_result, v, script_type):
logger.info("命令输出结果满足正则表达式:" + v + "," + key + "将返回" + k)
ret[key] = k
ret['exec_success'] = True
break
ret['endTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
logger.info('命令执行结果exec_success=' + str(ret['exec_success']) + '输出关键字' + key + '=' + ret[key])
return ret
def __exec_command(conn, command, key, result_regex_map, desc, command_type):
"""
通过ssh在linux服务器上执行一条命令,命令执行结果会匹配result_regex_map中的正则,
如果能够匹配某个正则那么返回的key等于正则表达式对应的关键字否则返回失败
:param conn: ssh连接会话
:param command: 需要执行的命令
:param key: 执行结果对应的key
:param result_regex_map: 配置命令执行结果的正则表达式
:param desc: 命令描述
:param command_type: 脚本类型目前只支持bash/shell和oracle/sql两类
:return: {'output', 'command', 'key', key, 'desc', 'output', 'startTime', 'endTime'}
"""
ret = dict()
ret['startTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
stdin, stdout, stderr = conn.exec_command('. ./.bash_profile;' + command)
exec_result = stdout.read()
err = stderr.read()
if err:
exec_result = err
ret['out'] = exec_result
logger.info(command + ' exec result:' + ret['out'])
ret['output'] = result
ret['command'] = command
ret['key'] = key
ret['desc'] = desc
ret['exec_success'] = False
ret[key] = 'UNKNOWN'
for k,v in result_regex_map.iteritems():
if command_result_regex_match(exec_result, v, command_type):
logger.info("命令输出结果满足正则表达式:" + v + "," + key + "将返回" + k)
ret[key] = k
ret['exec_success'] = True
break
ret['endTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
logger.info('命令执行结果exec_success=' + str(ret['exec_success']) + '输出关键字' + key + '=' + ret[key])
return ret
def __exec_check(conn, check_item):
"""
在目标机器上执行某项检查
:param conn: 连接目标机器的ssh连接
:param check_item: 需要检查的内容
:return: 检查结果,返回结果包含:执行命令ret['key'],执行检查项的key以及对应的结果ret['key']、ret[key],执行描述ret['desc']
是否执行检查成功ret['exec_success'],用来执行的命令类型ret['command_type'],执行检查的开始时间和结束时间ret['startTime'], ret['endTime']
"""
logger.info('开始执行某项检查,检查内容:' + str(check_item))
command = check_item['command']
key = check_item['key']
desc = check_item['desc']
result_regex_map = check_item['result_regex_map']
command_type = check_item['command_type']
exec_ret = dict()
exec_ret['startTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
exec_ret['command'] = command
exec_ret['key'] = key
exec_ret['desc'] = desc
exec_ret['command_type'] = command_type
exec_ret['exec_success'] = False
try:
if [check_item['timeout']]:
exec_ret = __timeout_exec_command(conn, command, key, result_regex_map, desc, command_type)
else:
exec_ret = __exec_check(conn, command, key, result_regex_map, desc, command_type)
except Exception, err:
logger.error(desc + "检查异常", err)
exec_ret['output'] = str(err)
exec_ret[key] = 'FALSE'
print(check_item['accept_result'])
print(exec_ret[key])
if re.match(check_item['accept_result'], exec_ret[key]):
exec_ret['exec_success'] = True
exec_ret['endTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
logger.info(desc + " check result:" + key + "=" + exec_ret[key] + ',exec_success=' + str(exec_ret['exec_success']))
return exec_ret
def __timeout_exec_check(conn, check_item):
"""
在目标机器上执行某项检查
:param conn: 连接目标机器的ssh连接
:param check_item: 需要检查的内容
:return: 检查结果,返回结果包含:执行命令ret['key'],执行检查项的key以及对应的结果ret['key']、ret[key],执行描述ret['desc']
是否执行检查成功ret['exec_success'],用来执行的命令类型ret['command_type'],执行检查的开始时间和结束时间ret['startTime'], ret['endTime']
"""
logger.info('开始执行某项检查,检查内容:' + str(check_item))
command = check_item['command']
key = check_item['key']
desc = check_item['desc']
result_regex_map = check_item['result_regex_map']
command_type = check_item['command_type']
exec_ret = dict()
exec_ret['startTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
exec_ret['command'] = command
exec_ret['key'] = key
exec_ret['desc'] = desc
exec_ret['command_type'] = command_type
exec_ret['exec_success'] = False
try:
if [check_item['timeout']]:
exec_ret = __timeout_exec_command(conn, command, key, result_regex_map, desc, command_type)
else:
exec_ret = __exec_check(conn, command, key, result_regex_map, desc, command_type)
except TimeoutError, detail:
logger.error(desc + "检查超时", detail)
exec_ret['output'] = str(detail)
exec_ret[key] = 'TIMEOUT'
except Exception, err:
logger.error(desc + "检查异常", err)
exec_ret['output'] = str(err)
exec_ret[key] = 'FALSE'
print(json.dump(exec_ret, ensure_ascii=False))
if re.match(check_item['accept_result'], exec_ret[key]):
exec_ret['exec_success'] = True
exec_ret['endTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
logger.info(desc + " check result:" + key + "=" + exec_ret[key] + ',exec_success=' + str(exec_ret['exec_success']))
return exec_ret
def check_dg_primary_oracle_status(pri_ora_cfg, stby_ora_cfg):
dg_status_ret = dict()
dg_status_ret['status'] = 'FALSE'
dg_status_ret['key'] = 'DG_PRIMARY_ORACLE_STATUS_CHECK'
dg_status_ret['startTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
dg_status_ret['desc'] = '检查dg oracle中primary oracle的运行状态以及相关配置是否同standby oracle配置匹配'
dg_status_ret['comment'] = ''
item_list = list()
dg_status_ret['item_list'] = item_list
ora_cfg = pri_ora_cfg
rltd_ora_cfg = stby_ora_cfg
host_ip = ora_cfg['host_ip']
check_result = __ping_test(host_ip)
item_list.append(check_result)
if check_result[check_result['key']] != 'PING_SUCC':
logger.error(host_ip + '服务器不在线,停止检查')
dg_status_ret['comment'] = '服务器不在线,停止检查'
dg_status_ret['endTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
return dg_status_ret
root_usr = ora_cfg['root_user']
root_pwd = ora_cfg['root_password']
ssh_port = ora_cfg['ssh_port']
check_result, conn = __create_ssh_connect(host_ip, ssh_port, root_usr, root_pwd)
item_list.append(check_result)
if not check_result['exec_success']:
logger.error('为' + root_usr + '创建ssh客户端失败,停止检查')
dg_status_ret['comment'] = '为' + root_usr + '创建ssh客户端失败,停止检查'
dg_status_ret['endTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
return dg_status_ret
server_check_items = [
{'desc': '检查服务器终端是否能够正确返回执行结果', 'key': 'CONSOLE', 'command': 'echo 1234567890', 'result_regex_map': {'CONSOLE_RIGHT': '^1234567890$'}, 'accept_result': '^CONSOLE_RIGHT$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': True},
{'desc': '检查服务器主机名是否配置正确', 'key': 'HOST_NAME', 'command': 'hostname', 'result_regex_map': {'HOST_NAME_RIGHT': '^' + ora_cfg['host_name'] + '$'}, 'accept_result': '^HOST_NAME_RIGHT$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
{'desc': '检查服务器python版本', 'key': 'PYTHON_VERSION', 'command': 'python -V| grep -oP "(?<=^Python )\S+$"', 'result_regex_map': {'V24': script_24_support_version, 'V26': script_26_support_version}, 'accept_result' : '^(V24|V26)$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': True},
{'desc': '确认hosts文件存在', 'key': 'HOSTS_FILE', 'command': 'ls /etc/hosts', 'result_regex_map': {'HOSTS_EXIST': '^/etc/hosts$'}, 'accept_result': '^HOSTS_EXIST$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
{'desc': 'hosts没有配置备库ip', 'key': 'HOSTS_CFG', 'command': 'grep "' + rltd_ora_cfg['host_ip'] + '" /etc/hosts | grep -v "^\s*#"', 'result_regex_map': {'NOT_CONTENT_STANDBY_IP': '^\s*$'}, 'accept_result': '^NOT_CONTENT_STANDBY_IP$', 'command_type': 'bash/shell', 'timeout': False, 'depend_cond': {}, 'not_succ_to_break': False},
{'desc': '确认网卡是bond0模式', 'key': 'BOND0', 'command': 'ls /etc/sysconfig/network-scripts/ifcfg-bond0', 'result_regex_map': {'BOND0_EXIST': '^/etc/sysconfig/network-scripts/ifcfg-bond0$'}, 'accept_result': '^BOND0_EXIST$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
{'desc': '确认NetworkManager服务未被启动', 'key': 'NETWORKMANAGER_SERVICE', 'command': 'service NetworkManager status|grep -oP "(?<=pid ).*(?=\))" | grep -oP "\d+"', 'result_regex_map': {'SERVICE_NOT_START': '^$', 'SERVICE_START': '^\d+$'}, 'accept_result': '^SERVICE_NOT_START$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
]
check_k_v = dict()
err_msg = ''
for check_item in server_check_items:
can_exec_check = True
for k, v in check_item['depend_cond'].iteritems():
if not check_k_v.has_key(k) or check_k_v[k] != v:
logger.info(check_item['desc'] + '依赖的的条件:' + k + '=' + v + '不满足,该步骤将不执行')
can_exec_check = False
break
if can_exec_check:
item_result = __exec_check(conn, check_item)
item_list.append(item_result)
check_k_v[check_item['key']] = item_result[check_item['key']]
if not item_result['exec_success']:
err_msg = err_msg + check_item['desc'] + '检查失败;'
if check_item['not_succ_to_break']:
logger.error(check_item['desc'] + '执行失败且not_succ_to_break=True,退出检查流程,当前流程执行结果:' + err_msg)
dg_status_ret['comment'] = err_msg
conn.close()
return dg_status_ret
conn.close()
base_path = ora_cfg['base_path']
bin_path = (base_path + "/bin/lsnrctl").replace("//", "/")
tnsnames_path = (base_path + '/network/admin/tnsnames.ora').replace("//", "/")
listener_path = (base_path + '/network/admin/listener.ora').replace("//", "/")
ora_check_items = [
{'desc': '检查base_path配置是否配置正确', 'key': 'BASE_PATH', 'command': 'ls ' + bin_path, 'result_regex_map': {'BASE_PATH_RIGHT': '^' + bin_path + '$'}, 'accept_result': '^BASE_PATH_RIGHT$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': True},
{'desc': '检查tnsnames.ora文件存在', 'key': 'TNSNAMES_FILE', 'command': 'ls ' + tnsnames_path, 'result_regex_map': {'TNSNAMES_EXIST': '^' + tnsnames_path + '$'}, 'accept_result': '^TNSNAMES_EXIST$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
{'desc': '确认tnsnames.ora中的HOST只能是ip地址', 'key': 'TNSNAMES_CFG', 'command': 'grep -v "\s*#" ' + tnsnames_path + '|grep -oP "(?<=HOST)\s*=\s*\S+\s*(?=\))"|grep -oP "(?<==).*"|grep -vP "^\s*\d+\.\d+\.\d+\.\d+\s*"', 'result_regex_map': {'TNSNAMES_CFG_HOST_USE_IP': '^$'}, 'accept_result' : '^TNSNAMES_CFG_HOST_USE_IP$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': True},
{'desc': '确认listener.ora文件存在', 'key': 'LISTENER_FILE', 'command': 'ls ' + listener_path, 'result_regex_map': {'LISTENER_EXIST': '^' + listener_path + '$', 'LISTENER_NOT_EXIST': '^ls:.*'}, 'accept_result': '^(LISTENER_EXIST|LISTENER_NOT_EXIST)$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
{'desc': '确认listener.ora的HOST没有用ip表示', 'key': 'LISTENER_CFG', 'command': 'grep -v "^\s*#" ' + listener_path + '|grep -oP "(?<=HOST)\s*=\s*\S+\s*(?=\))"|grep -oP "(?<==).*"|grep -E "^\s*(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\s*$"', 'result_regex_map': {'LISTENER_CFG_HOST_NOT_USE_IP': '^$'}, 'accept_result': '^LISTENER_CFG_HOST_NOT_USE_IP$', 'command_type': 'bash/shell', 'timeout': False, 'depend_cond': {'LISTENER_EXIST' : 'LISTENER_EXIST'}, 'not_succ_to_break': False},
{'desc': '检查tns服务状态', 'key': 'TNS_SERVICE', 'command': 'lsnrctl status', 'result_regex_map': {'TNS_START': '.*The command completed successfully.*', 'TNS_NOT_START':'.*TNS-12541.*'}, 'accept_result': '^TNS_START$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
{'desc': '检查数据库启动状态', 'key': 'DATABASE_STATUS', 'command': 'sqlplus -s / as sysdba <<EOF\nselect status,database_status from v\\$instance;\nEOF', 'result_regex_map': {'MOUNTED': '.*MOUNTED.*ACTIVE.*', 'OPEN': '.*OPEN.*ACTIVE.*', 'SHUTDOWN' : '.*ORA-01034.*'}, 'accept_result': '^OPEN$', 'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql', 'not_succ_to_break': False},
{'desc': '检查oracle的实例名', 'key': 'INSTANCE', 'command': 'sqlplus -s / as sysdba <<EOF\nselect value from v\\$parameter where name=\'instance_name\';\nEOF',
'result_regex_map': {'INSTANCE_RIGHT': '.*' + ora_cfg['app_name'] + '$'}, 'accept_result': '^INSTANCE_RIGHT$',
'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': True},
{'desc': '检查oracle的unique name(别名)', 'key': 'UNIQUE_NAME', 'command': 'sqlplus -s / as sysdba <<EOF\nselect value from v\\$parameter where name=\'db_unique_name\';\nEOF',
'result_regex_map': {'UNIQUE_NAME_RIGHT': '.*' + ora_cfg['app_alias'] + '.*'}, 'accept_result': '^UNIQUE_NAME_RIGHT$',
'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
{'desc': '检查主库配置的备库的DB_UNIQUE_NAME是否和备库的unique name一致', 'key': 'DB_UNIQUE_NAME',
'command': 'sqlplus -s / as sysdba <<EOF\nselect value from v\\$parameter where name=\'log_archive_dest_2\';\nEOF',
'result_regex_map': {'DB_UNIQUE_NAME_RIGHT': '.*(DB_UNIQUE_NAME|db_unique_name)=' + rltd_ora_cfg['app_alias'] + '(\s|$)'}, 'accept_result': '^DB_UNIQUE_NAME_RIGHT$',
'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql', 'not_succ_to_break': True},
{'desc': '检查主库配置的备库service是否和备库定义的service_name一致', 'key': 'ORA_DG_SERVICE', 'command': 'sqlplus -s / as sysdba <<EOF\nselect value from v\\$parameter where name=\'log_archive_dest_2\';\nEOF',
'result_regex_map': {'ORA_DG_SERVICE': '.*(SERVICE|service)='+ rltd_ora_cfg['service_name'] + '(\s|$)'}, 'accept_result': '^ORA_DG_SERVICE_RIGHT$',
'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql', 'not_succ_to_break': False},
{'desc': '确认listener.ora的HOST没有用ip表示', 'key': 'LISTENER_CFG',
'command': 'grep -v "^\s*#" ' + listener_path + '|grep -oP "(?<=HOST)\s*=\s*\S+\s*(?=\))"|grep -oP "(?<==).*"|grep -E "^\s*(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\s*$"',
'result_regex_map': {'LISTENER_CFG_HOST_NOT_USE_IP': '^$'}, 'accept_result': '^LISTENER_CFG_HOST_NOT_USE_IP$',
'command_type': 'bash/shell', 'timeout': False, 'depend_cond': {'LISTENER_EXIST': 'LISTENER_EXIST'},
'not_succ_to_break': False},
{'desc': '检查tns服务状态', 'key': 'TNS_SERVICE', 'command': 'lsnrctl status',
'result_regex_map': {'TNS_START': '.*The command completed successfully.*', 'TNS_NOT_START': '.*TNS-12541.*'},
'accept_result': '^TNS_START$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell',
'not_succ_to_break': False},
{'desc': '检查数据库实例运行状态', 'key': 'DATABASE_INSTANCE_STATUS',
'command': 'sqlplus -s / as sysdba <<EOF\nselect instance_name,status,database_status from v\\$instance;\nEOF',
'result_regex_map': {'MOUNTED': '.*' + ora_cfg['app_name'] + '.*MOUNTED.*ACTIVE.*', 'OPEN': '.*OPEN.*ACTIVE.*', 'SHUTDOWN': '.*ORA-01034.*'},
'accept_result': '^OPEN$', 'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql',
'not_succ_to_break': True},
{'desc': '检查oracle的database role', 'key': 'DATABASE_ROLE',
'command': 'sqlplus -s / as sysdba <<EOF\nselect database_role from v\\$database;\nEOF',
'result_regex_map': {'PRIMARY': '.*PRIMARY.*', 'STANDBY': '.*PHYSICAL STANDBY.*'},
'accept_result': '^PRIMARY$',
'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql', 'not_succ_to_break': False},
{'desc': '检查oracle的switchover status', 'key': 'SWITCHOVER_STATUS',
'command': 'sqlplus -s / as sysdba <<EOF\nselect switchover_status from v\\$database;\nEOF',
'result_regex_map': {'TO_PRIMARY': '.*TO PRIMART.*', 'TO_STANDBY': '.*TO STANDBY.*', 'SESSIONS_ACTIVE': '.*SESSIONS ACTIVE.*', 'ORA_ERROR': '.*ORA-\\d{5,5}(\\D|$)'},
'accept_result': '^(TO_STANDBY|SESSIONS_ACTIVE)$',
'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql', 'not_succ_to_break': False},
{'desc': '检查oracle的archive_gap', 'key': 'ARCHIVE_GAP',
'command': 'sqlplus -s / as sysdba <<EOF\nselect * from v\\$archive_gap;\nEOF',
'result_regex_map': {'ARCHIVE_GAP_EMPTY': '.*no rows selected.*', 'ARCHIVE_GAP_NOT_EMPTY': '^((?!no rows selected).)*$'},
'accept_result': '^ARCHIVE_GAP_EMPTY$',
'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql', 'not_succ_to_break': False},
{'desc': '检查oracle的archive log', 'key': 'ARCHIVE_LOG',
'command': 'sqlplus -s / as sysdba <<EOF\nselect (select max(SEQUENCE#) from V\\$archived_log where applied=\'NO\')-(select max(SEQUENCE#) from V\\$archived_log where applied=\'YES\') as archived_log from dual;\nEOF',
'result_regex_map': {'ARCHIVE_LOG_OK': '.*\D(0|1)$'}, 'accept_result': '^ARCHIVE_LOG_OK$',
'command_type': 'oracle/sql', 'timeout': False, 'depend_cond': {},
'not_succ_to_break': False},
{'desc': '检查oracle的表空间', 'key': 'TABLE_SPACE', 'command': 'sqlplus -s / as sysdba <<EOF\nselect TABLESPACE_NAME,FILE_NAME,STATUS from dba_temp_files;\nEOF',
'result_regex_map': {'TABLE_SPACE_OK': '.*AVAILABLE.*', 'TABLE_SPACE_ERROR': '.*ORA-01110: data file 201.*'},
'accept_result': '^TABLE_SPACE_OK$', 'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql',
'not_succ_to_break': False}
]
check_result, conn = __create_ssh_connect(host_ip, ssh_port, ora_cfg['app_user'], ora_cfg['app_user_password'])
item_list.append(check_result)
if not check_result['exec_success'] :
logger.error('为' + ora_cfg['app_user'] + '创建ssh客户端失败,停止检查')
dg_status_ret['comment'] = '为' + ora_cfg['app_user'] + '创建ssh客户端失败,停止检查'
dg_status_ret['endTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
return dg_status_ret
for check_item in ora_check_items:
can_exec_check = True
for k, v in check_item['depend_cond'].iteritems():
if not check_k_v.has_key(k) or check_k_v[k] != v:
logger.info(check_item['desc'] + '依赖的的条件:' + k + '=' + v + '不满足,该步骤将不执行')
can_exec_check = False
break
if can_exec_check:
item_result = __exec_check(conn, check_item)
item_list.append(item_result)
if not item_result['exec_success']:
err_msg = err_msg + check_item['desc'] + '检查失败;'
if check_item['not_succ_to_break']:
logger.error(check_item['desc'] + '执行失败且not_succ_to_break=True,退出检查流程,当前流程执行结果:' + err_msg)
dg_status_ret['comment'] = err_msg
conn.close()
return dg_status_ret
if err_msg == '':
dg_status_ret['status'] = 'SUCCESS'
dg_status_ret['comment'] = '主oracle状态检查通过'
else:
dg_status_ret['comment'] = err_msg
conn.close()
logger.info('主oracle状态检查结果' + dg_status_ret['status'] + ':' + dg_status_ret['comment'])
return dg_status_ret
def check_dg_stby_oracle_status(pri_ora_cfg, stby_ora_cfg):
dg_status_ret = dict()
dg_status_ret['status'] = 'FALSE'
dg_status_ret['key'] = 'DG_STANDBY_ORACLE_STATUS_CHECK'
dg_status_ret['startTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
dg_status_ret['desc'] = '检查dg oracle中primary oracle的运行状态以及相关配置是否同standby oracle配置匹配'
dg_status_ret['comment'] = ''
item_list = list()
dg_status_ret['item_list'] = item_list
ora_cfg = stby_ora_cfg
rltd_ora_cfg = pri_ora_cfg
host_ip = ora_cfg['host_ip']
check_result = __ping_test(host_ip)
item_list.append(check_result)
if check_result[check_result['key']] != 'PING_SUCC':
logger.error(host_ip + '服务器不在线,停止检查')
dg_status_ret['comment'] = '服务器不在线,停止检查'
dg_status_ret['endTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
return dg_status_ret
root_usr = ora_cfg['root_user']
root_pwd = ora_cfg['<PASSWORD>']
ssh_port = ora_cfg['ssh_port']
check_result, conn = __create_ssh_connect(host_ip, ssh_port, root_usr, root_pwd)
item_list.append(check_result)
if not check_result['exec_success']:
logger.error('为' + root_usr + '创建ssh客户端失败,停止检查')
dg_status_ret['comment'] = '为' + root_usr + '创建ssh客户端失败,停止检查'
dg_status_ret['endTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
return dg_status_ret
server_check_items = [
{'desc': '检查服务器终端是否能够正确返回执行结果', 'key': 'CONSOLE', 'command': 'echo 1234567890', 'result_regex_map': {'CONSOLE_RIGHT': '^1234567890$'}, 'accept_result': '^CONSOLE_RIGHT$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': True},
{'desc': '检查服务器主机名是否配置正确', 'key': 'HOST_NAME', 'command': 'hostname', 'result_regex_map': {'HOST_NAME_RIGHT': '^' + ora_cfg['host_name'] + '$'}, 'accept_result': '^HOST_NAME_RIGHT$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
{'desc': '检查服务器python版本', 'key': 'PYTHON_VERSION', 'command': 'python -V| grep -oP "(?<=^Python )\S+$"', 'result_regex_map': {'V24': script_24_support_version, 'V26': script_26_support_version}, 'accept_result' : '^(V24|V26)$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': True},
{'desc': '确认hosts文件存在', 'key': 'HOSTS_FILE', 'command': 'ls /etc/hosts', 'result_regex_map': {'HOSTS_EXIST': '^/etc/hosts$'}, 'accept_result': '^HOSTS_EXIST$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
{'desc': 'hosts没有配置备库ip', 'key': 'HOSTS_CFG', 'command': 'grep "' + rltd_ora_cfg['host_ip'] + '" /etc/hosts | grep -v "^\s*#"', 'result_regex_map': {'NOT_CONTENT_STANDBY_IP': '^\s*$'}, 'accept_result': '^NOT_CONTENT_STANDBY_IP$', 'command_type': 'bash/shell', 'timeout': False, 'depend_cond': {}, 'not_succ_to_break': False},
{'desc': '确认网卡是bond0模式', 'key': 'BOND0', 'command': 'ls /etc/sysconfig/network-scripts/ifcfg-bond0', 'result_regex_map': {'BOND0_EXIST': '^/etc/sysconfig/network-scripts/ifcfg-bond0$'}, 'accept_result': '^BOND0_EXIST$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
{'desc': '确认NetworkManager服务未被启动', 'key': 'NETWORKMANAGER_SERVICE', 'command': 'service NetworkManager status|grep -oP "(?<=pid ).*(?=\))" | grep -oP "\d+"', 'result_regex_map': {'SERVICE_NOT_START': '^$', 'SERVICE_START': '^\d+$'}, 'accept_result': '^SERVICE_NOT_START$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
]
check_k_v = dict()
err_msg = ''
for check_item in server_check_items:
can_exec_check = True
for k, v in check_item['depend_cond'].iteritems():
if not check_k_v.has_key(k) or check_k_v[k] != v:
logger.info(check_item['desc'] + '依赖的的条件:' + k + '=' + v + '不满足,该步骤将不执行')
can_exec_check = False
break
if can_exec_check:
item_result = __exec_check(conn, check_item)
item_list.append(item_result)
check_k_v[check_item['key']] = item_result[check_item['key']]
if not item_result['exec_success']:
err_msg = err_msg + check_item['desc'] + '检查失败;'
if check_item['not_succ_to_break']:
logger.error(check_item['desc'] + '执行失败且not_succ_to_break=True,退出检查流程,当前流程执行结果:' + err_msg)
dg_status_ret['comment'] = err_msg
conn.close()
return dg_status_ret
conn.close()
base_path = ora_cfg['base_path']
bin_path = (base_path + "/bin/lsnrctl").replace("//", "/")
tnsnames_path = (base_path + '/network/admin/tnsnames.ora').replace("//", "/")
listener_path = (base_path + '/network/admin/listener.ora').replace("//", "/")
ora_check_items = [
{'desc': '检查base_path配置是否配置正确', 'key': 'BASE_PATH', 'command': 'ls ' + bin_path, 'result_regex_map': {'BASE_PATH_RIGHT': '^' + bin_path + '$'}, 'accept_result': '^BASE_PATH_RIGHT$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': True},
{'desc': '检查tnsnames.ora文件存在', 'key': 'TNSNAMES_FILE', 'command': 'ls ' + tnsnames_path, 'result_regex_map': {'TNSNAMES_EXIST': '^' + tnsnames_path + '$'}, 'accept_result': '^TNSNAMES_EXIST$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
{'desc': '确认tnsnames.ora中的HOST只能是ip地址', 'key': 'TNSNAMES_CFG', 'command': 'grep -v "\s*#" ' + tnsnames_path + '|grep -oP "(?<=HOST)\s*=\s*\S+\s*(?=\))"|grep -oP "(?<==).*"|grep -vP "^\s*\d+\.\d+\.\d+\.\d+\s*"', 'result_regex_map': {'TNSNAMES_CFG_HOST_USE_IP': '^$'}, 'accept_result' : '^TNSNAMES_CFG_HOST_USE_IP$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': True},
{'desc': '确认listener.ora文件存在', 'key': 'LISTENER_FILE', 'command': 'ls ' + listener_path, 'result_regex_map': {'LISTENER_EXIST': '^' + listener_path + '$', 'LISTENER_NOT_EXIST': '^ls:.*'}, 'accept_result': '^(LISTENER_EXIST|LISTENER_NOT_EXIST)$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
{'desc': '确认listener.ora的HOST没有用ip表示', 'key': 'LISTENER_CFG', 'command': 'grep -v "^\s*#" ' + listener_path + '|grep -oP "(?<=HOST)\s*=\s*\S+\s*(?=\))"|grep -oP "(?<==).*"|grep -E "^\s*(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\s*$"', 'result_regex_map': {'LISTENER_CFG_HOST_NOT_USE_IP': '^$'}, 'accept_result': '^LISTENER_CFG_HOST_NOT_USE_IP$', 'command_type': 'bash/shell', 'timeout': False, 'depend_cond': {'LISTENER_EXIST' : 'LISTENER_EXIST'}, 'not_succ_to_break': False},
{'desc': '检查tns服务状态', 'key': 'TNS_SERVICE', 'command': 'lsnrctl status', 'result_regex_map': {'TNS_START': '.*The command completed successfully.*', 'TNS_NOT_START':'.*TNS-12541.*'}, 'accept_result': '^TNS_START$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
{'desc': '检查数据库启动状态', 'key': 'DATABASE_STATUS', 'command': 'sqlplus -s / as sysdba <<EOF\nselect status,database_status from v\\$instance;\nEOF', 'result_regex_map': {'MOUNTED': '.*MOUNTED.*ACTIVE.*', 'OPEN': '.*OPEN.*ACTIVE.*', 'SHUTDOWN' : '.*ORA-01034.*'}, 'accept_result': '^OPEN$', 'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql', 'not_succ_to_break': False},
{'desc': '检查oracle的实例名', 'key': 'INSTANCE', 'command': 'sqlplus -s / as sysdba <<EOF\nselect value from v\\$parameter where name=\'instance_name\';\nEOF',
'result_regex_map': {'INSTANCE_RIGHT': '.*' + ora_cfg['app_name'] + '$'}, 'accept_result': '^INSTANCE_RIGHT$',
'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': True},
{'desc': '检查oracle的unique name(别名)', 'key': 'UNIQUE_NAME', 'command': 'sqlplus -s / as sysdba <<EOF\nselect value from v\\$parameter where name=\'db_unique_name\';\nEOF',
'result_regex_map': {'UNIQUE_NAME_RIGHT': '.*' + ora_cfg['app_alias'] + '.*'}, 'accept_result': '^UNIQUE_NAME_RIGHT$',
'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell', 'not_succ_to_break': False},
{'desc': '检查备库配置的备库的DB_UNIQUE_NAME是否和主库的unique name一致', 'key': 'DB_UNIQUE_NAME',
'command': 'sqlplus -s / as sysdba <<EOF\nselect value from v\\$parameter where name=\'log_archive_dest_2\';\nEOF',
'result_regex_map': {'DB_UNIQUE_NAME_RIGHT': '.*(DB_UNIQUE_NAME|db_unique_name)=' + rltd_ora_cfg['app_alias'] + '(\s|$)'}, 'accept_result': '^DB_UNIQUE_NAME_RIGHT$',
'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql', 'not_succ_to_break': True},
{'desc': '检查备库配置的主库service是否和主库定义的service_name一致', 'key': 'ORA_DG_SERVICE', 'command': 'sqlplus -s / as sysdba <<EOF\nselect value from v\\$parameter where name=\'log_archive_dest_2\';\nEOF',
'result_regex_map': {'ORA_DG_SERVICE': '.*(SERVICE|service)='+ rltd_ora_cfg['service_name'] + '(\s|$)'}, 'accept_result': '^ORA_DG_SERVICE_RIGHT$',
'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql', 'not_succ_to_break': False},
{'desc': '确认listener.ora的HOST没有用ip表示', 'key': 'LISTENER_CFG',
'command': 'grep -v "^\s*#" ' + listener_path + '|grep -oP "(?<=HOST)\s*=\s*\S+\s*(?=\))"|grep -oP "(?<==).*"|grep -E "^\s*(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\s*$"',
'result_regex_map': {'LISTENER_CFG_HOST_NOT_USE_IP': '^$'}, 'accept_result': '^LISTENER_CFG_HOST_NOT_USE_IP$',
'command_type': 'bash/shell', 'timeout': False, 'depend_cond': {'LISTENER_EXIST': 'LISTENER_EXIST'},
'not_succ_to_break': False},
{'desc': '检查tns服务状态', 'key': 'TNS_SERVICE', 'command': 'lsnrctl status',
'result_regex_map': {'TNS_START': '.*The command completed successfully.*', 'TNS_NOT_START': '.*TNS-12541.*'},
'accept_result': '^TNS_START$', 'timeout': False, 'depend_cond': {}, 'command_type': 'bash/shell',
'not_succ_to_break': False},
{'desc': '检查数据库实例运行状态', 'key': 'DATABASE_INSTANCE_STATUS',
'command': 'sqlplus -s / as sysdba <<EOF\nselect instance_name,status,database_status from v\\$instance;\nEOF',
'result_regex_map': {'MOUNTED': '.*' + ora_cfg['app_name'] + '.*MOUNTED.*ACTIVE.*', 'OPEN': '.*OPEN.*ACTIVE.*', 'SHUTDOWN': '.*ORA-01034.*'},
'accept_result': '^(OPEN|MOUNTED)$', 'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql',
'not_succ_to_break': True},
{'desc': '检查oracle的database role', 'key': 'DATABASE_ROLE',
'command': 'sqlplus -s / as sysdba <<EOF\nselect database_role from v\\$database;\nEOF',
'result_regex_map': {'PRIMARY': '.*PRIMARY.*', 'STANDBY': '.*PHYSICAL STANDBY.*'},
'accept_result': '^STANDBY$',
'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql', 'not_succ_to_break': False},
{'desc': '检查oracle的switchover status', 'key': 'SWITCHOVER_STATUS',
'command': 'sqlplus -s / as sysdba <<EOF\nselect switchover_status from v\\$database;\nEOF',
'result_regex_map': {'TO_PRIMARY': '.*TO PRIMART.*', 'TO_STANDBY': '.*TO STANDBY.*', 'SESSIONS_ACTIVE': '.*SESSIONS ACTIVE.*', 'ORA_ERROR': '.*ORA-\\d{5,5}(\\D|$)'},
'accept_result': '^(TO_PRIMARY|SESSIONS_ACTIVE)$',
'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql', 'not_succ_to_break': False},
{'desc': '检查oracle的archive_gap', 'key': 'ARCHIVE_GAP',
'command': 'sqlplus -s / as sysdba <<EOF\nselect * from v\\$archive_gap;\nEOF',
'result_regex_map': {'ARCHIVE_GAP_EMPTY': '.*no rows selected.*', 'ARCHIVE_GAP_NOT_EMPTY': '^((?!no rows selected).)*$'},
'accept_result': '^ARCHIVE_GAP_EMPTY$',
'timeout': False, 'depend_cond': {}, 'command_type': 'oracle/sql', 'not_succ_to_break': False}
]
check_result, conn = __create_ssh_connect(host_ip, ssh_port, ora_cfg['app_user'], ora_cfg['app_user_password'])
item_list.append(check_result)
if not check_result['exec_success'] :
logger.error('为' + ora_cfg['app_user'] + '创建ssh客户端失败,停止检查')
dg_status_ret['comment'] = '为' + ora_cfg['app_user'] + '创建ssh客户端失败,停止检查'
dg_status_ret['endTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
return dg_status_ret
for check_item in ora_check_items:
can_exec_check = True
for k, v in check_item['depend_cond'].iteritems():
if not check_k_v.has_key(k) or check_k_v[k] != v:
logger.info(check_item['desc'] + '依赖的的条件:' + k + '=' + v + '不满足,该步骤将不执行')
can_exec_check = False
break
if can_exec_check:
item_result = __exec_check(conn, check_item)
item_list.append(item_result)
if not item_result['exec_success']:
err_msg = err_msg + check_item['desc'] + '检查失败;'
if check_item['not_succ_to_break']:
logger.error(check_item['desc'] + '执行失败且not_succ_to_break=True,退出检查流程,当前流程执行结果:' + err_msg)
dg_status_ret['comment'] = err_msg
conn.close()
return dg_status_ret
if err_msg == '':
dg_status_ret['status'] = 'SUCCESS'
dg_status_ret['comment'] = '主oracle状态检查通过'
else:
dg_status_ret['comment'] = err_msg
conn.close()
logger.info('主oracle状态检查结果' + dg_status_ret['status'] + ':' + dg_status_ret['comment'])
return dg_status_ret
def check_dg_oracle_status(pri_ora_cfg, stby_ora_cfg):
dg_status_ret = dict()
dg_status_ret['status'] = 'SUCCESS'
dg_status_ret['key'] = 'DG_STANDBY_ORACLE_STATUS_CHECK'
dg_status_ret['startTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
dg_status_ret['desc'] = '检查dg oracle运行装态以及配置信息'
dg_status_ret['comment'] = ''
logger.info('开始检查primary oracle的dg状态:host_ip=' + pri_ora_cfg['host_ip'] + ',host_name=' + pri_ora_cfg['host_name'])
pri_status = check_dg_primary_oracle_status(pri_ora_cfg, stby_ora_cfg)
logger.info('开始检查standby oracle的dg状态:host_ip=' + stby_ora_cfg['host_ip'] + ',host_name=' + stby_ora_cfg['host_name'])
stby_status = check_dg_stby_oracle_status(pri_ora_cfg, stby_ora_cfg)
dg_status_ret['endTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
dg_status_ret['pri_ora_dg_status'] = pri_status
dg_status_ret['stby_ora_dg_status'] = stby_status
return dg_status_ret
def start_pri_stby_switch(pri_ora_cfg, stby_ora_cfg, switch_method, avaible_ip):
return None
"""
ora_cfg = AppCfg.objects.get(id=1)
cfg = dict()
cfg['app_template_id'] = ora_cfg.app_template.id
cfg['app_type'] = ora_cfg.app_type
cfg['app_name'] = ora_cfg.app_name
cfg['app_alias'] = ora_cfg.app_alias
cfg['base_path'] = ora_cfg.base_path
cfg['host_ip'] = ora_cfg.server.host_ip
cfg['host_name'] = ora_cfg.server.host_name
cfg['root_user'] = ora_cfg.root_user.login_name
cfg['root_password'] = <PASSWORD>.root_user.<PASSWORD>
cfg['app_user'] = ora_cfg.app_user.login_name
cfg['app_user_password'] = ora_cfg.app_user.pass_word
cfg['app_cfg_id'] = ora_cfg.id
cfg['server_id'] = ora_cfg.server.id
cfg['root_user_id'] = ora_cfg.root_user.id
cfg['app_user_id'] = ora_cfg.app_user.id
cfg['service_name'] = ora_cfg.server_name
{'app_user_password': u'<PASSWORD>', 'app_type': u'oracle', 'app_name': u'ccod', 'service_name': u'db_wending', 'server_id': 1, 'host_ip': u'10.130.41.159', 'base_path
': u'/home/oracle/oracle10g/product/10.2.0/db_1', 'app_template_id': 1, 'app_alias': u'WENDING', 'root_user_id': 1, 'app_user_id': 2, 'host_name': u'ccod-oracle5',
'app_cfg_id': 1, 'app_user': u'oracle', 'root_password': u'<PASSWORD>', 'root_user': u'root'}
[{'timeout': True, 'depend_cond': {'TERMINAL_CONSOLE': 'SUCC'}, 'result_regex_map': {'SUCC': '.*The command completed successfully.*', 'TNS_NOT_START': '.*TNS-12541.*'}, 'command': 'lsnrctl status', 'key': 'TNS_START', 'command_type': 'bash/shell', 'not_succ_to_break': True, 'desc': '检查oracle的tns服务是否启动'}]
"""
logger.info('$$$$$$$$$$$$开始测试$$$$$$$$$$$')
test_ret = check_dg_oracle_status(pri_cfg, stby_cfg)
print(test_ret)
``` |
{
"source": "3288103265/pytorch-lightning-template",
"score": 2
} |
#### File: pytorch-lightning-template/model/loss.py
```python
import numpy as np
import torch
import torch.nn as nn
from torchvision import models
class Vgg19(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
class VGGLoss(nn.Module):
def __init__(self):
super(VGGLoss, self).__init__()
self.vgg = Vgg19().cuda()
self.criterion = nn.L1Loss()
self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
def forward(self, x, y):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
for i in range(len(x_vgg)):
loss += self.weights[i] * \
self.criterion(x_vgg[i], y_vgg[i].detach())
return loss
def make_mask(labels, n_cls, mask_negatives):
# return a one-hot likt mask. can use scatter instead.
device = labels.device
labels = labels.detach().cpu().numpy()
n_samples = labels.shape[0]
if mask_negatives:
mask_multi, target = np.zeros([n_cls, n_samples]), 1.0
else:
mask_multi, target = np.ones([n_cls, n_samples]), 0.0
for c in range(n_cls):
c_indices = np.where(labels == c)
mask_multi[c, c_indices] = target
return torch.tensor(mask_multi).type(torch.long).to(device)
def set_temperature(conditional_strategy, tempering_type, start_temperature, end_temperature, step_count, tempering_step, total_step):
if conditional_strategy == 'ContraGAN':
if tempering_type == 'continuous':
t = start_temperature + step_count * \
(end_temperature - start_temperature)/total_step
elif tempering_type == 'discrete':
tempering_interval = total_step//(tempering_step + 1)
t = start_temperature + \
(step_count//tempering_interval) * \
(end_temperature-start_temperature)/tempering_step
else:
t = start_temperature
else:
t = 'no'
return t
class Conditional_Contrastive_loss(torch.nn.Module):
def __init__(self, batch_size, pos_collected_numerator):
super(Conditional_Contrastive_loss, self).__init__()
self.batch_size = batch_size
self.pos_collected_numerator = pos_collected_numerator
self.calculate_similarity_matrix = self._calculate_similarity_matrix()
self.cosine_similarity = torch.nn.CosineSimilarity(dim=-1)
# self.device = device
def _calculate_similarity_matrix(self):
return self._cosine_simililarity_matrix
def remove_diag(self, M):
h, w = M.shape
assert h == w, "h and w should be same"
# mask = np.ones((h, w)) - np.eye(h)
mask = torch.ones(h, w)-torch.eye(h)
mask = (mask).type(torch.bool)
return M[mask].view(h, -1)
def _cosine_simililarity_matrix(self, x, y):
v = self.cosine_similarity(x.unsqueeze(1), y.unsqueeze(0))
return v
def forward(self, inst_embed, proxy, negative_mask, labels, temperature, margin):
# inst_embed: instance feature: l(x) in paper Eq.8
# proxy: label embeding -> e(y) in Eq.8
# negative_mask: shape?
negative_mask = negative_mask.T # batch first.
similarity_matrix = self.calculate_similarity_matrix(
inst_embed, inst_embed)
instance_zone = torch.exp(
(self.remove_diag(similarity_matrix) - margin)/temperature) # 分母第二项
inst2proxy_positive = torch.exp((self.cosine_similarity(
inst_embed, proxy) - margin)/temperature) # 分子和分母第一项
if self.pos_collected_numerator:
mask_4_remove_negatives = negative_mask[labels]
mask_4_remove_negatives = self.remove_diag(mask_4_remove_negatives)
inst2inst_positives = instance_zone*mask_4_remove_negatives # 分子第二项
numerator = inst2proxy_positive + inst2inst_positives.sum(dim=1)
else:
numerator = inst2proxy_positive # no data-to-date, paper Eq.7
denominator = torch.cat(
[torch.unsqueeze(inst2proxy_positive, dim=1), instance_zone], dim=1).sum(dim=1)
criterion = -torch.log(temperature*(numerator/denominator)).mean()
return criterion
class LossManager(object):
def __init__(self):
self.total_loss = None
self.all_losses = {}
def add_loss(self, loss, name, weight=1.0, use_loss=True):
cur_loss = loss * weight
if use_loss:
if self.total_loss is not None:
self.total_loss += cur_loss
else:
self.total_loss = cur_loss
self.all_losses[name] = cur_loss.data.item()
def items(self):
return self.all_losses.items()
```
#### File: 3288103265/pytorch-lightning-template/utils.py
```python
import os
from pathlib2 import Path
def load_model_path(root=None, version=None, v_num=None, best=False):
""" When best = True, return the best model's path in a directory
by selecting the best model with largest epoch. If not, return
the last model saved. You must provide at least one of the
first three args.
Args:
root: The root directory of checkpoints. It can also be a
model ckpt file. Then the function will return it.
version: The name of the version you are going to load.
v_num: The version's number that you are going to load.
best: Whether return the best model.
"""
def sort_by_epoch(path):
name = path.stem
epoch=int(name.split('-')[1].split('=')[1])
return epoch
def generate_root():
if root is not None:
return root
elif version is not None:
return str(Path('lightning_logs', version, 'checkpoints'))
else:
return str(Path('lightning_logs', f'version_{v_num}', 'checkpoints'))
if root==version==v_num==None:
return None
root = generate_root()
if Path(root).is_file():
return root
if best:
files=[i for i in list(Path(root).iterdir()) if i.stem.startswith('best')]
files.sort(key=sort_by_epoch, reverse=True)
res = str(files[0])
else:
res = str(Path(root) / 'last.ckpt')
return res
def load_model_path_by_args(args):
return load_model_path(root=args.load_dir, version=args.load_ver, v_num=args.load_v_num)
``` |
{
"source": "329124/PyCE",
"score": 3
} |
#### File: 329124/PyCE/engine.py
```python
from tkinter import Tk
from tkinter import Canvas
from PIL import Image
from PIL import ImageTk
class Window:
def __init__(self, width, height, title):
self.width = width
self.height = height
self.root = Tk()
self.root.title(title)
self.root.resizable(False, False)
self.canvas = Canvas(self.root, width = width, height = height, highlightthickness = 0)
self.canvas.pack()
self.entityList = []
def addEntity(self, entity):
self.entityList.append(entity)
def update(self):
for entity in self.entityList:
entity.updateComponents()
self.root.update()
class InputManager:
def __init__(self, window):
self.window = window
window.canvas.bind_all("<KeyPress>", self.cbKeyPressEvent)
window.canvas.bind_all("<KeyRelease>", self.cbKeyReleaseEvent)
window.canvas.bind("<Motion>", self.cbMotionEvent)
window.canvas.bind("<Button-1>", self.cbLeftButtonPressEvent)
window.canvas.bind("<ButtonRelease-1>", self.cbLeftButtonReleaseEvent)
window.canvas.bind("<Button-3>", self.cbRightButtonPressEvent)
window.canvas.bind("<ButtonRelease-3>", self.cbRightButtonReleaseEvent)
self.newlyActiveKeys = []
self.activeKeys = []
self.mouseData = self.MouseData()
class MouseData:
def __init__(self):
self.x = 0
self.y = 0
self.leftActive = False
self.leftNewlyActive = False
self.rightActive = False
self.rightNewlyActive = False
def cbKeyPressEvent(self, event):
if event.char not in self.newlyActiveKeys and event.char not in self.activeKeys:
self.newlyActiveKeys.append(event.char)
def cbKeyReleaseEvent(self, event):
if event.char in self.newlyActiveKeys:
self.newlyActiveKeys.remove(event.char)
if event.char in self.activeKeys:
self.activeKeys.remove(event.char)
def cbMotionEvent(self, event):
self.mouseData.x = event.x
self.mouseData.y = self.window.height - event.y - 1
def cbLeftButtonPressEvent(self, event):
if not self.mouseData.leftActive:
self.mouseData.leftNewlyActive = True
self.mouseData.leftActive = True
def cbLeftButtonReleaseEvent(self, event):
self.mouseData.leftActive = False
self.mouseData.leftNewlyActive = False
def cbRightButtonPressEvent(self, event):
if not self.mouseData.rightActive:
self.mouseData.rightNewlyActive = True
self.mouseData.rightActive = True
def cbRightButtonReleaseEvent(self, event):
self.mouseData.rightActive = False
self.mouseData.rightNewlyActive = False
def getMousePosition(self):
"""Returns tuple of the mouse's x and y position."""
return (self.mouseData.x, self.mouseData.y)
def getMouseLeftDown(self):
"""Will only return true once per left mouse button press."""
if self.mouseData.leftNewlyActive:
self.mouseData.leftNewlyActive = False
return True
else:
return False
def getMouseLeft(self):
"""Will always return true if left mouse button is held down."""
return self.mouseData.leftActive
def getMouseRightDown(self):
"""Will only return true once per right mouse button press."""
if self.mouseData.rightNewlyActive:
self.mouseData.rightNewlyActive = False
return True
else:
return False
def getMouseRight(self):
"""Will always return true if right mouse button is held down."""
return self.mouseData.rightActive
def getKeyDown(self, key):
"""Will only return true once per key press."""
if key in self.newlyActiveKeys:
self.newlyActiveKeys.remove(key)
self.activeKeys.append(key)
return True
else:
return False
def getKey(self, key):
"""Will always return true if key is held down."""
if key in self.newlyActiveKeys:
self.newlyActiveKeys.remove(key)
self.activeKeys.append(key)
return True
elif key in self.activeKeys:
return True
else:
return False
class Sprite:
def __init__(self, path, scale):
baseImage = Image.open(path)
resizedImage = baseImage.resize((baseImage.width * scale, baseImage.height * scale), Image.NEAREST)
self.width = resizedImage.width
self.height = resizedImage.height
self.photoImage = ImageTk.PhotoImage(resizedImage)
``` |
{
"source": "3299/2016",
"score": 4
} |
#### File: 2016/components/beltAxis.py
```python
class BeltAxis(object):
def __init__(self, output):
self.output = output
self.direction = 1
self.running = False
def run(self, on, value, stop1, stop2):
# stop1 is top switch, stop2 is bottom stop1 = True?
"""
if (stop1 == False or stop2 == False):
self.running = False
self.direction = self.direction * -1
if (onV == True and self.running == False):
self.running = True
if (self.running == True):
self.output.set(self.direction)
"""
if (on == True):
if (stop2 == False and value < 0):
self.output.set(0)
elif (stop1 == False and value > 0):
self.output.set(0)
else:
self.output.set(value)
else:
self.output.set(0)
def set(self, value):
self.output.set(value)
```
#### File: 2016/components/belt.py
```python
class Belt(object):
def __init__(self, output):
self.output = output
def run(self, forward, backward):
if (forward == True):
self.output.set(-1)
elif (backward == True):
self.output.set(1)
else:
self.output.set(0)
def set(self, value):
self.output.set(value)
``` |
{
"source": "3299/2017",
"score": 3
} |
#### File: 2017/components/gear.py
```python
class GearSol(object):
def __init__(self, sol):
self.sol = sol
def run(self, trigger):
if (trigger == True):
self.sol.set(self.sol.Value.kForward)
else:
self.sol.set(self.sol.Value.kReverse)
```
#### File: 2017/components/sonic.py
```python
import statistics
import wpilib
class Sonic(object):
def __init__(self, sensor):
self.sensor = sensor
self.lastMeasurement = {'left': 0, 'right': 0}
self.lastSensor = 'left'
self.sensorOn = False
self.timer = wpilib.Timer()
self.lastTime = 0
self.period = 200
def run(self):
if ((self.timer.getMsClock() - self.lastTime) > self.period):
if (self.sensorOn == False):
if (self.lastSensor == 'left'):
self.sensor['rightR'].pulse(255)
if (self.lastSensor == 'right'):
self.sensor['leftR'].pulse(255)
self.sensorOn = True
else:
if (self.lastSensor == 'left'):
averageVoltage = self.sensor['rightS'].getAverageVoltage()
distance = (averageVoltage * 1000)/4.9
self.sensor['rightR'].pulse(0)
self.lastMeasurement['right'] = distance
self.lastSensor = 'right'
if (self.lastSensor == 'right'):
averageVoltage = self.sensor['leftS'].getAverageVoltage()
distance = (averageVoltage * 1000)/4.9
self.sensor['leftR'].pulse(0)
self.lastMeasurement['left'] = distance
self.lastSensor = 'left'
self.sensorOn = False
self.lastTime = self.timer.getMsClock()
def getCm(self, side):
return self.lastMeasurement[side]
``` |
{
"source": "3299/2018",
"score": 3
} |
#### File: 2018/components/lights.py
```python
import json
import wpilib
import hal
"""
Facilitates communication between roboRIO and Arduino (for lights).
Handled entirely in the main loop. Do not pass in an instance to
components.
"""
class Lights(object):
def __init__(self):
# Init I2C for communication with Arduino
if (hal.isSimulation()):
# import stub for simulation
from components.i2cstub import I2CStub
self.arduinoC = wpilib.I2C(wpilib.I2C.Port.kOnboard, 4, I2CStub())
else:
self.arduinoC = wpilib.I2C(wpilib.I2C.Port.kOnboard, 4)
self.allianceColor = 'red'
def setColor(self, color):
self.allianceColor = color
def run(self, options):
if (options['effect'] != 'rainbow'):
# default to alliance color
options.setdefault('color', self.allianceColor)
if (options['color'] == 'blue' or options['color'] == wpilib.DriverStation.Alliance.Blue):
commandC = 'b'
else:
commandC = 'r'
options.setdefault('fade', False)
if (options['fade'] == True):
commandF = 't'
else:
commandF = 'f'
options.setdefault('speed', '')
commandS = str(options['speed'])
if (options['effect'] == 'stagger'):
commandByte = 's'
elif (options['effect'] == 'flash'):
commandByte = 'f'
value = commandByte + commandC + commandF + commandS
elif (options['effect'] == 'rainbow'):
value = 'r'
self.send(value)
def send(self, data):
try:
self.arduinoC.writeBulk(bytes(data + '\n', encoding="ASCII"))
except:
pass
```
#### File: 3299/2018/robot.py
```python
import wpilib
from inits import Component
import helpers
from components.chassis import Chassis
from autonomous import Autonomous
from components.lights import Lights
from components.metabox import MetaBox
from components.winch import Winch
from components.pdb import Power
from networktables import NetworkTables
class Randy(wpilib.TimedRobot):
def robotInit(self):
self.C = Component() # Components inits all connected motors, sensors, and joysticks. See inits.py.
# Setup subsystems
self.driverStation = wpilib.DriverStation.getInstance()
self.drive = Chassis(self.C.driveTrain, self.C.gyroS, self.C.driveYEncoderS)
self.lights = Lights()
self.metabox = MetaBox(self.C.elevatorEncoderS,
self.C.elevatorLimitS,
self.C.jawsLimitS,
self.C.metaboxLimitS,
self.C.jawsM,
self.C.elevatorM,
self.C.intakeM,
self.C.jawsSol)
self.winch = Winch(self.C.winchM)
self.power = Power()
# Joysticks
self.joystick = wpilib.XboxController(0)
self.leftJ = wpilib.Joystick(1)
# default to rainbow effect
self.lights.run({'effect': 'rainbow'})
self.sd = NetworkTables.getTable('SmartDashboard')
self.sd.putNumber('station', 2)
def teleopPeriodic(self):
"""This function is called periodically during operator control."""
'''Components'''
# Rumble
averageDriveCurrent = self.power.getAverageCurrent([0, 1, 14, 15])
if (averageDriveCurrent > 8):
self.joystick.setRumble(0, 1)
else:
self.joystick.setRumble(0, 0)
print(self.metabox.getEncoder())
'''
TODO: calibrate sparks
'''
# Drive
self.drive.run(self.joystick.getRawAxis(0), self.joystick.getRawAxis(1), self.joystick.getRawAxis(4))
# MetaBox
self.metabox.run(self.leftJ.getY(), # elevator rate of change
self.leftJ.getRawButton(1), # run intake wheels in
self.leftJ.getRawButton(3), # open jaws
self.leftJ.getRawButton(2), # run intake wheels out
self.leftJ.getRawButton(4), # go to bottom
self.leftJ.getRawAxis(2), # set angle of jaws
self.leftJ.getRawButton(8)) # calibrate elevator
# Lights
self.lights.setColor(self.driverStation.getAlliance())
if (self.driverStation.getMatchTime() < 30 and self.driverStation.getMatchTime() != -1):
self.lights.run({'effect': 'flash', 'fade': True, 'speed': 200})
elif (helpers.deadband(self.leftJ.getY(), 0.1) != 0):
self.lights.run({'effect': 'stagger'})
elif (self.leftJ.getRawButton(1) or self.leftJ.getRawButton(2)):
self.lights.run({'effect': 'flash', 'fade': False, 'speed': 20})
else:
self.lights.run({'effect': 'rainbow'})
def teleopInit(self):
"""This function is run once each time the robot enters teleop mode."""
# reset gyro
self.C.gyroS.reset()
# reset encoder
self.C.driveYEncoderS.reset()
def autonomousInit(self):
"""This function is run once each time the robot enters autonomous mode."""
self.lights.run({'effect': 'flash', 'fade': True, 'speed': 400})
# reset gyro
self.C.gyroS.reset()
# reset encoder
self.C.driveYEncoderS.reset()
# Init autonomous
self.autonomousRoutine = Autonomous(self.drive, self.C.driveYEncoderS, self.C.gyroS, self.metabox, self.driverStation)
# reset state
self.autonomousRoutine.state = 0
def autonomousPeriodic(self):
self.autonomousRoutine.run() # see autonomous.py
def test(self):
# reset gyro
self.C.gyroS.reset()
# reset encoder
self.C.driveYEncoderS.reset()
if __name__ == "__main__":
wpilib.run(Randy)
``` |
{
"source": "3299/2019",
"score": 2
} |
#### File: 3299/2019/cameras.py
```python
from cscore import CameraServer
def main():
cs = CameraServer.getInstance()
#cs.enableLogging()
usb1 = cs.startAutomaticCapture(dev=0)
#usb2 = cs.startAutomaticCapture(dev=1)
#usb1.setConnectionStrategy(kKeepOpen)
#usb2.setConnectionStrategy(kKeepOpen)
usb1.setResolution(160, 120)
#usb2.setResolution(160, 120)
#print("before forever")
#cs.waitForever()
#print("yay! it works kinda ish maybe")
```
#### File: 2019/components/chassis.py
```python
import helpers
import wpilib
import math
from networktables import NetworkTables
class Chassis(object):
def __init__(self, drive, gyro, encoderY):
self.drive = drive
self.gyro = gyro
self.encoderY = encoderY
self.jDeadband = 0.06
self.sd = NetworkTables.getTable('SmartDashboard')
# PID loop for angle
self.pidAngleDefault = {'p': 0.01, 'i': 0, 'd': 0.004}
self.sd.putNumber('pidAngleP', self.pidAngleDefault['p'])
self.sd.putNumber('pidAngleI', self.pidAngleDefault['i'])
self.sd.putNumber('pidAngleD', self.pidAngleDefault['d'])
self.pidAngle = wpilib.PIDController(self.pidAngleDefault['p'], self.pidAngleDefault['i'], self.pidAngleDefault['d'], self.gyro, self.updateAnglePID)
self.pidAngle.setAbsoluteTolerance(2)
self.pidRotateRate = 0
self.wasRotating = False
# PID loop for Cartesian Y direction
self.pidYDefault = {'p': 0.15, 'i': 0, 'd': 0.05}
self.sd.putNumber('pidYP', self.pidYDefault['p'])
self.sd.putNumber('pidYI', self.pidYDefault['i'])
self.sd.putNumber('pidYD', self.pidYDefault['d'])
self.pidY = wpilib.PIDController(self.pidYDefault['p'], self.pidYDefault['i'], self.pidYDefault['d'], self.encoderY.getDistance, self.updateYPID)
self.pidYRate = 0
self.toDistanceFirstCall = True
self.toAngleFirstCall = True
self.toTimeFirstCall = True
self.lastAngle = 0
self.timer = wpilib.Timer()
def run(self, x, y, rotation):
'''Intended for use in telelop. Use .cartesian() for auto.'''
# Map joystick values to curve
x = self.curve(helpers.deadband(x, 0.1))
y = self.curve(helpers.deadband(y, 0.1))
rotation = helpers.deadband(-rotation * 0.5, 0.1)
# write manipulated values to motors
self.cartesian(-x, y, rotation)
def cartesian(self, x, y, rotation):
# assign speeds
speeds = [0] * 4
speeds[0] = x + y + rotation # front left
speeds[1] = -x + y - rotation # front right
speeds[2] = -x + y + rotation # back left
speeds[3] = x + y - rotation # back right
# scales all speeds if one is in range
# (-inf, -1) U (1, inf)
maxSpeed = max(abs(x) for x in speeds)
if maxSpeed > 1.0:
for i in range(0, 4):
speeds[i] = speeds[i] / maxSpeed
# write speeds to controllers
for i in range(0, 4):
self.drive[i].set(speeds[i])
def updateAnglePID(self, value):
self.pidAngle.setP(self.sd.getNumber('pidAngleP', self.pidAngleDefault['p']))
self.pidAngle.setI(self.sd.getNumber('pidAngleI', self.pidAngleDefault['i']))
self.pidAngle.setD(self.sd.getNumber('pidAngleD', self.pidAngleDefault['d']))
self.pidRotateRate = value
def updateYPID(self, value):
self.pidY.setP(self.sd.getNumber('pidYP', self.pidYDefault['p']))
self.pidY.setI(self.sd.getNumber('pidYI', self.pidYDefault['i']))
self.pidY.setD(self.sd.getNumber('pidYD', self.pidYDefault['d']))
self.pidYRate = value
def curve(self, value):
"""Because this divides by sin(1), an input
in range [-1, 1] will always have an output
range of [-1, 1]. """
value = helpers.deadband(helpers.raiseKeepSign(value, 1), self.jDeadband)
return (math.sin(value) / math.sin(1));
def toAngle(self, angle, reset=False):
"""Intended for use in auto."""
if (self.toAngleFirstCall and reset == True):
self.gyro.reset()
self.toAngleFirstCall = False
self.pidAngle.setSetpoint(angle)
self.pidAngle.enable()
#print(self.pidAngle.getError())
if (self.pidAngle.getError() < 0.5):
self.pidAngle.disable()
self.toAngleFirstCall = True
self.lastAngle = angle
return True
else:
self.cartesian(0, 0, -self.pidRotateRate)
return False
def toDistance(self, distance):
"""Intended for use in auto."""
if (self.toDistanceFirstCall):
self.encoderY.reset()
self.toDistanceFirstCall = False
self.pidY.setContinuous(False)
self.pidY.setSetpoint(distance)
self.pidY.enable()
# simple P for rotation
rotation = helpers.remap((self.lastAngle - self.gyro.getAngle()), -180, 180, -1, 1)
rotation = rotation * 1
#print(self.pidY.getError())
rotation = 0
if (self.pidY.getError() < 0.05):
self.pidY.disable()
self.cartesian(0, 0, 0)
self.toDistanceFirstCall = True
return True
else:
self.cartesian(0, -self.pidYRate, -rotation)
return False
def toTime(self, time, power):
if (self.toTimeFirstCall):
self.timer.start()
self.toTimeFirstCall = False
if (self.timer.hasPeriodPassed(time)):
self.cartesian(0, 0, 0)
return True
else:
self.cartesian(0, -power, 0)
return False
``` |
{
"source": "32av32/yatube",
"score": 2
} |
#### File: yatube/posts/models.py
```python
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Post(models.Model):
text = models.TextField(verbose_name='Text', )
pub_date = models.DateTimeField(verbose_name='Publish date', auto_now_add=True)
author = models.ForeignKey(User, verbose_name='Author', on_delete=models.CASCADE, related_name='posts')
group = models.ForeignKey('Group', blank=True, null=True, verbose_name='Group', on_delete=models.CASCADE,
related_name='posts')
image = models.ImageField(verbose_name='Image', upload_to='posts/', blank=True, null=True)
class Group(models.Model):
title = models.CharField(max_length=200, verbose_name='Title')
slug = models.SlugField(verbose_name='Slug', unique=True)
description = models.TextField(verbose_name='Description')
def __str__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey('Post', verbose_name='Post', on_delete=models.CASCADE, related_name='comments')
author = models.ForeignKey(User, verbose_name='Author', on_delete=models.CASCADE, related_name='comments')
text = models.TextField(verbose_name='Text', )
created_date = models.DateTimeField(verbose_name='Created_date', auto_now_add=True)
class Follow(models.Model):
user = models.ForeignKey(User, verbose_name='Follower', on_delete=models.CASCADE, related_name='follower')
author = models.ForeignKey(User, verbose_name='Following', on_delete=models.CASCADE, related_name='following')
class Meta:
unique_together = ['user', 'author']
```
#### File: yatube/posts/views.py
```python
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.urls import reverse_lazy
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin, AccessMixin
from django.urls import reverse
from .models import Post, Group, Comment, Follow
from .forms import PostForm, CommentForm
from django.views.generic import (ListView, DetailView, CreateView, UpdateView, DeleteView)
User = get_user_model()
class IndexView(ListView):
model = Post
queryset = Post.objects.all().select_related('author', 'group').prefetch_related('comments')
template_name = 'index.html'
paginate_by = 10
ordering = '-pub_date'
class GroupView(ListView):
model = Post
template_name = 'group.html'
paginate_by = 10
ordering = '-pub_date'
def get_queryset(self):
return Post.objects.filter(group__slug=self.kwargs['slug'])\
.select_related('author', 'group').prefetch_related('comments')
def get_context_data(self, *, object_list=None, **kwargs):
group = get_object_or_404(Group, slug=self.kwargs['slug'])
context_data = super().get_context_data(object_list=object_list, **kwargs)
context_data['group'] = group
return context_data
class ProfileView(ListView):
model = Post
template_name = 'profile.html'
paginate_by = 10
ordering = '-pub_date'
def get_queryset(self):
return Post.objects.filter(author__username=self.kwargs['username']) \
.select_related('author', 'group')
@property
def extra_context(self):
if self.request.user.is_authenticated:
return {
'author': get_object_or_404(User, username=self.kwargs['username']),
'following': Follow.objects.filter(user=self.request.user).filter(author__username=self.kwargs['username'])
}
return {
'author': get_object_or_404(User, username=self.kwargs['username']),
}
class PostView(DetailView):
model = Post
queryset = Post.objects.all().select_related('author', 'group').prefetch_related('comments')
template_name = 'post.html'
pk_url_kwarg = 'post_id'
context_object_name = 'post'
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
context_data['form'] = CommentForm()
return context_data
class NewPostView(LoginRequiredMixin, CreateView):
model = Post
form_class = PostForm
template_name = 'new.html'
success_url = '/'
def form_valid(self, form):
form.instance.author = self.request.user
form.save()
return super().form_valid(form)
class PostEditView(LoginRequiredMixin, AccessMixin, UpdateView):
model = Post
pk_url_kwarg = 'post_id'
form_class = PostForm
template_name = 'new.html'
context_object_name = 'post'
def get_success_url(self):
return reverse('post',
kwargs={
'username': self.kwargs['username'],
'post_id': self.kwargs['post_id'],
})
@property
def extra_context(self):
return {'username': self.kwargs['username']}
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
if request.user.username != self.kwargs['username']:
return render(request, 'alert.html', {'message': 'У вас нет доступа на редактирование поста!'})
return super().dispatch(request, *args, **kwargs)
return redirect('login')
class PostDeleteView(DeleteView):
model = Post
pk_url_kwarg = 'post_id'
template_name = 'delete_post.html'
def get_success_url(self):
return reverse_lazy('profile', kwargs={'username': self.kwargs['username']})
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
if request.user.username != self.kwargs['username']:
return render(request, 'alert.html', {'message': 'У вас нет доступа на удаление поста!'})
return super().dispatch(request, *args, **kwargs)
return redirect('login')
class AddCommentView(LoginRequiredMixin, CreateView):
model = Comment
form_class = CommentForm
template_name = 'comments.html'
def form_valid(self, form):
form.instance.post = Post.objects.select_related('author', 'group').get(id=self.kwargs['post_id'])
form.instance.author = self.request.user
form.save()
return super().form_valid(form)
def get_success_url(self):
return reverse('post',
kwargs={
'username': self.kwargs['username'],
'post_id': self.kwargs['post_id'],
})
@property
def extra_context(self):
return {
'username': get_object_or_404(User, username=self.kwargs['username']),
'post_id': self.kwargs['post_id']
}
class SubscriptionPostsView(LoginRequiredMixin, ListView):
model = Post
template_name = 'follow.html'
paginate_by = 10
def get_queryset(self):
posts = Post.objects.filter(author__following__user=self.request.user)
return posts
@login_required
def profile_follow(request, username):
author = get_object_or_404(User, username=username)
if request.user != author:
Follow.objects.get_or_create(user=request.user, author=author)
return redirect('profile', username=username)
@login_required
def profile_unfollow(request, username):
author = get_object_or_404(User, username=username)
Follow.objects.get(user=request.user, author=author).delete()
return redirect('profile', username=username)
def page_not_found(request, exception):
return render(request, 'misc/404.html', {'path': request.path}, status=404)
def server_error(request):
return render(request, 'misc/500.html', status=500)
``` |
{
"source": "32bitbradley/Taskr-agent",
"score": 2
} |
#### File: 32bitbradley/Taskr-agent/task.py
```python
import logging
import yaml
import hashlib
import os
import subprocess
import yaml
import json
import json_log_formatter
import requests
# Load configuration files
with open("config/config.yaml", mode="r") as f:
config = yaml.safe_load(f.read())
if config == None:
print("[Error] No config file could be loaded.")
exit(1)
with open("config/internal_settings.yaml", mode="r") as f:
internal_settings = yaml.safe_load(f.read())
if config == None:
print("[Error] No internal settings file could be loaded.")
exit(1)
logger = logging.getLogger()
try:
logger.setLevel(getattr(logging,str(config['logging']['level']).upper()))
except:
logging.NOTSET
class Query:
def __init__(self, manager_address, manager_port, agent_uuid):
self.manager_address = manager_address
self.manager_port = manager_port
self.agent_uuid = agent_uuid
self.host_base = f"http://{manager_address}:{manager_port}/api"
self.request_headers = {"Accept": "application/json"}
def query_for_tasks(self):
"""Will query the manager for outstanding tasks for this agent
Params:
Returns:
A list of task dictionaries or en empty list
"""
# Request tasks from manager via task GET endpoint using query parameters
self.request_host = self.host_base + f"/task"
logger.debug('Sending GET query request to the manager', extra={'request_host': self.request_host, 'request_headers': self.request_headers})
self.request_params = {"target_agent": self.agent_uuid, "status_status": "pending,stopped", "expiration_expired": 'false'}
request_response = requests.get(self.request_host, headers=self.request_headers, params=self.request_params)
logger.debug('Received response from the manager', extra={'body': request_response.text, 'status_code': request_response.status_code})
request_response_json = request_response.json()
if request_response.status_code == 200:
# If the request did contain tasks, add it to a list, else return an empty list.
if len(request_response_json['data']['results']) > 0:
logger.debug('Outstanding tasks have been received from the manager', extra={'tasks': request_response_json['data']['results']})
data = []
for task in request_response_json['data']['results']:
data.append(task)
return data
else:
logger.debug('No outstanding tasks received from the manager', extra={'tasks': request_response_json['data']['results']})
data = []
return data
else:
logger.error('The manager did not respond with 200 when querying for tasks', extra={'tasks': request_response_json['data']['results']})
data = []
return data
def get_task(self, task_id):
"""Get a specific task from the manager
Params:
task_id: The ID of the task to update
Returns:
* A single dictionary of 1 task's details
"""
# Request tasks from manager via task query endpoint
self.task_id = task_id
self.task_details = {}
self.request_host = self.host_base + f"/task/{self.task_id}"
logger.debug('Sending GET query request to the manager', extra={'request_host': self.request_host, 'request_headers': self.request_headers})
request_response = requests.get(self.request_host, headers=self.request_headers)
if request_response.status_code == 200:
logger.debug('Received response from the manager', extra={'body': request_response.text, 'status_code': request_response.status_code})
request_response_json = request_response.json()
# If the request did contain a single task, add it to a list, else return an empty list.
if len(request_response_json['data']['results']) == 1:
logger.debug('Received 1 task from the manager', extra={'results': request_response_json['data']['results']})
self.task_details = request_response_json['data']['results'][0]
return self.task_details
elif len(request_response_json['data']['results']) == 0:
logger.debug('The manager did not respond with a task for that ID', extra={'tasks': request_response_json['data']['results'], 'task_id':self.task_id, 'request_host':self.request_host})
return {}
else:
logger.error('The manager responded with more than 1 task for a specific task ID', extra={'tasks': request_response_json['data']['results']})
return {}
else:
logger.error('The manager did not respond with 200 when querying for tasks', extra={'tasks': request_response_json['data']['results']})
return {}
def get_type(self, type_id):
"""Will query the manager for a specific task type.
Params:
type_id: The task type id to query for
Returns:
A dictionary of task information
"""
self.type_id = type_id
self.type_details = {}
self.request_host = self.host_base + f"/type/{self.type_id}"
logger.debug('Sending GET query request to the manager', extra={'request_host': self.request_host, 'request_headers': self.request_headers})
request_response = requests.get(self.request_host, headers=self.request_headers)
logger.debug('Received response from the manager', extra={'body': request_response.text, 'status_code': request_response.status_code})
if request_response.status_code == 200:
request_response_json = request_response.json()
# If the request did contain type information, add it to a list, else return an empty list.
if len(request_response_json['data']['results']) == 1:
logger.debug('The manager did return a type', extra={'type': request_response_json['data']['results'][0]})
self.type_details = request_response_json['data']['results'][0]
return self.type_details
else:
logger.error('The manager returned more than 1 type for a specific type ID', extra={'results': request_response_json['data']['results'][0]})
return {}
else:
logger.error('The manager did not return 1 task', extra={'type': request_response_json['data']['results'][0]})
return {}
def update_task(self, task_id, task_status, task_output):
"""Will update a task status on the manager using the provided info
Params:
task_id: The ID of the task to update
task_status: The exist status of the task, either [completed, failed]
task_output: A dictionary to be passwd as the task output
Returns:
True if successful
False if failed or error
"""
self.task_id = task_id
# Make sure have the latest task details stored
self.get_task(self.task_id)
if len(self.task_details) == 0:
return False
if task_status != None:
self.task_details['status']['status'] = task_status
if task_output != None:
self.task_details['response'] = task_output
self.request_host = self.host_base + f"/task"
logger.debug('Sending PATCH request to the manager', extra={'request_host': self.request_host, 'request_headers': self.request_headers, 'json':self.task_details})
request_response = requests.patch(self.request_host, headers=self.request_headers, json=self.task_details)
logger.debug('Received response from the manager', extra={'body': request_response.text, 'status_code': request_response.status_code})
if request_response.status_code == 200:
request_response_json = request_response.json()
logger.debug('Task updated successfully', extra={'request_response_json': request_response_json})
return True
else:
logger.error('The manager did not respond with 200 when updating task', extra={'request_response_text': request_response.text, 'request_host':self.request_host})
return False
class Task:
def __init__(self, manager_address, manager_port, task_id):
self.task_id = task_id
self.manager_address = manager_address
self.manager_port = manager_port
# Query the manager for task and type deails
query = Query(self.manager_address, self.manager_port, None)
task_details = query.get_task(self.task_id)
self.type_id = task_details['task']['type']
self.paramaters = task_details['parameters']
type_details = query.get_type(self.type_id)
self.bin_name = type_details['bin']['name']
self.bin_shasum = type_details['shasum']
self.input_type = type_details['bin']['input']
self.output_type = type_details['bin']['output']
if 'exec' in type_details['bin']:
self.bin_exec = type_details['bin']['exec']
else:
self.bin_exec = None
# Set the Task status to accepted once we have everything
self.status = "accepted"
self.output = None
query.update_task(self.task_id,self.status,self.output)
# Needed for Task class to download bins
self.host_base = f"http://{manager_address}:{manager_port}/api"
self.request_headers = {"Accept": "application/json"}
def __str__(self):
return f"Task {self.task_id} executing {self.bin_name}"
def __repr__(self):
return f"{self.__class__.__name__}(task_id={self.task_id}, bin_name={self.bin_name}, bin_shasum={self.bin_shasum}, input_type={self.input_type}, paramaters={self.paramaters}, output_type={self.output_type})"
def verify_bin(self):
"""Will check if a bin file exists on the syste and if the sha256 sum katches the one provided by the manager
Params:
type_bin_name: The name of the file to check
type_bin_hash: The hash of the file, as reported by the manager to verify the file.
Returns:
* True if successfull
* False if unsuccessfull
"""
if os.path.isfile(f"types/{self.bin_name}"):
logger.debug('Task type bin file exists on the system', extra={'type_bin_name': self.bin_name})
existing_file_sha256_hash = hashlib.sha256()
with open(f"types/{self.bin_name}","rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096),b""):
existing_file_sha256_hash.update(byte_block)
if str(existing_file_sha256_hash.hexdigest()) == str(self.bin_shasum):
logger.debug('The existing bin file is at the latest version', extra={'type_bin_name': self.bin_name, 'hash':existing_file_sha256_hash})
return True
else:
logger.debug('The existing bin file is not latest version', extra={'type_bin_name': self.bin_name, 'file_hash':existing_file_sha256_hash, 'manager_hash':self.bin_shasum})
return False
else:
logger.debug('Task type bin file does not exist on the system', extra={'type_bin_name': self.bin_name})
return False
def download_bin(self):
"""Will download the task bin file from the manager, and the check it using the provided sha256
Params:
type_id: The ID of thr task type to download
task_bin_name: The name of the file to save as
type_bin_hash: The hash of the file to verify against as proivided by the manager
Retruns:
* True if successful
* False if unsuccessful
"""
#Delete the file if it already exists
if os.path.isfile(f"types/{self.bin_name}"):
logger.debug('Task type bin file exists on the system, deleting', extra={'type_bin_name': self.bin_name})
else:
logger.debug('Task type bin file does not exist on the system, deleting', extra={'type_bin_name': self.bin_name})
# Request task type bin from manager via task type download endpoint
self.request_host = self.host_base + f"/type/download/{self.type_id}"
self.request_headers = {"Accept": "*/*"}
logger.debug('Sending GET query request to the manager', extra={'request_host': self.request_host, 'request_headers': self.request_headers})
request_response = requests.get(self.request_host, headers=self.request_headers)
logger.debug('Received response from the manager', extra={'body': request_response.text, 'status_code': request_response.status_code, 'headers':request_response.headers})
# We might get a JSON response if something went wrong, so just check here, log and return false
if request_response.headers.get('content-type') == 'application/json':
request_response_json = request_response.json()
logger.debug('Received JSON response from manager rather than a file when downloading type bin', extra={'type_id': self.type_id, 'json':request_response_json})
return False
else:
if request_response.status_code == 200:
# Save the downaloded file to the types dir then check the hash matches
with open(f'types/{self.bin_name}', 'wb') as target_file:
target_file.write(request_response.content)
# Generate SHA256 sum for the bin file
logger.debug('Generating SHA256 hash for bin', extra={'bin':self.bin_name})
target_file_sha256_hash = hashlib.sha256()
with open(f"types/{self.bin_name}","rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096),b""):
target_file_sha256_hash.update(byte_block)
logger.debug('Generated SHA256 hash', extra={'hash':target_file_sha256_hash.hexdigest()})
if self.verify_bin():
logger.debug('Downloaded bin has been verified', extra={'file_hash':target_file_sha256_hash.hexdigest()})
return True
else:
logger.debug('Downloaded bin could not be verified. Download not successfull', extra={'file_hash':target_file_sha256_hash.hexdigest()})
return False
elif request_response.status_code == 404:
logger.debug('Requesting a bin to download that did not exist, got 404', extra={'type_id':self.type_id})
return False
else:
logger.debug('Error when dowloading bin', extra={'type_id':self.type_id})
return False
def execute(self):
"""Will check that all required information is present, then execute the task.
Params:
Returns:
True if successful, False if unsucsessful
"""
if (self.task_id != None) and \
(self.bin_name != None) and \
(self.task_id != None) and \
(self.bin_shasum != None) and \
(self.input_type != None) and \
(self.paramaters != None) and \
(self.status == "accepted"):
logger.debug("All paramaters valid, executing task.", extra={'task_id':self.task_id, '__repr__':self.__repr__()})
else:
logger.error("Some paramateres were not valid, unable to execute task.", extra={'task_id':self.task_id, '__repr__':self.__repr__()})
return False
query = Query(self.manager_address, self.manager_port, None)
if (self.bin_exec != None) and (self.bin_exec != ""):
process_args = [f'{self.bin_exec} ']
else:
process_args = []
process_args.append(os.path.join(os.path.abspath('types'), self.bin_name))
self.download_bin()
# Process input paramaters
if self.input_type == "cli":
logger.debug('The input type is CLI, checking paramaters dict', extra={'task_id':self.task_id,'paramaters':self.paramaters})
# Prerper the task paramaters
parsed_parameters = self.paramaters
if isinstance(self.paramaters, dict):
# Make sure paramaters is a single depth dictionary, error if not dict and change deppers keys to JSPON strings instead.
for key in self.paramaters.keys():
if isinstance(self.paramaters[key], dict):
logger.debug('The provided dictionary has embedded keys, converting seccond depth value to JSON string', extra={'task_id':self.task_id, 'key':key,'paramaters':self.paramaters})
parsed_parameters[key] = json.dumps(self.paramaters[key])
logger.debug('The paramaters dict as been compiled', extra={'task_id':self.task_id, 'parsed_parameters':parsed_parameters})
# Build rest of subprocess args list
for key in parsed_parameters:
process_args.append(str(key))
process_args.append(parsed_parameters[key])
logger.debug('Process argument list is', extra={'task_id':self.task_id,'process_args':process_args})
# Run basic subprocess
query.update_task(self.task_id, "running", None)
self.subprocess = subprocess.run(process_args, capture_output=True, text=True)
else:
logger.debug('The provided task paramaters is not a dictionary', extra={'task_id':self.task_id,'paramaters':self.paramaters})
return False
else:
logger.error('Invalid input type', extra={'task_id':self.task_id, 'paramaters':self.paramaters})
return False
# Once subprocess has completed, parse any output and build a status dict
self.process_output = {}
self.process_output['meta'] = {}
if self.subprocess.returncode == 0:
self.process_output['meta']['successful'] = True
self.process_output['meta']['retun_code'] = int(self.subprocess.returncode)
self.status = "completed"
else:
self.process_output['meta']['successful'] = False
self.process_output['meta']['retun_code'] = int(self.subprocess.returncode)
self.status = "failed"
self.process_output['stderr'] = str(self.subprocess.stderr)
self.process_output['stdout'] = str(self.subprocess.stdout)
if self.output_type == "stdout":
self.process_output['output'] = str(self.subprocess.stdout)
else:
logger.error('Invalid output type when compiling resuts', extra={'task_id':self.task_id,'output_type':self.output_type})
# Update the task's status on the manager with the process_output
if query.update_task(self.task_id, self.status, json.dumps(self.process_output)):
logger.debug('Task execution process completed successfully', extra={'task_id':self.task_id})
return True
else:
logger.debug('Task execution process did not complete successfully', extra={'task_id':self.task_id})
return False
``` |
{
"source": "32blit/32blit-tools",
"score": 2
} |
#### File: ttblit/core/dfu.py
```python
import pathlib
import zlib
import construct
from construct import (Checksum, Const, CString, Flag, GreedyBytes,
GreedyRange, Hex, Int8ul, Int16ul, Int32ul, Padded,
Padding, Prefixed, RawCopy, Rebuild, Struct, len_, this)
DFU_SIGNATURE = b'DfuSe'
DFU_size = Rebuild(Int32ul, 0)
def DFU_file_length(ctx):
'''Compute the entire file size + 4 bytes for CRC
The total DFU file length is ostensibly the actual
length in bytes of the resulting file.
However DFU File Manager does not seem to agree,
since it's output size is 16 bytes short.
Since this is suspiciously the same as the suffix
length in bytes, we omit that number to match
DFU File Manager's output.
'''
size = 11 # DFU Header Length
# size += 16 # DFU Suffix Length
for target in ctx.targets:
# Each target has a 274 byte header consisting
# of the following fields:
size += Const(DFU_SIGNATURE).sizeof() # szSignature ('Target' in bytes)
size += Int8ul.sizeof() # bAlternateSetting
size += Int8ul.sizeof() # bTargetNamed
size += Padding(3).sizeof() # Padding
size += Padded(255, CString('utf8')).sizeof() # szTargetName
size += Int32ul.sizeof() # dwTargetSize
size += Int32ul.sizeof() # dwNbElements
size += DFU_target_size(target)
return size
def DFU_target_size(ctx):
'''Returns the size of the target binary data, plus the
dwElementAddress header, and dwElementSize byte count.
'''
size = 0
try:
images = ctx.images
except AttributeError:
images = ctx['images']
size += sum([DFU_image_size(image) for image in images])
return size
def DFU_image_size(image):
return len(image['data']) + Int32ul.sizeof() + Int32ul.sizeof()
DFU_image = Struct(
'dwElementAddress' / Hex(Int32ul), # Data offset address for image
'data' / Prefixed(Int32ul, GreedyBytes)
)
DFU_target = Struct(
'szSignature' / Const(b'Target'), # DFU target identifier
'bAlternateSetting' / Int8ul, # Gives device alternate setting for which this image can be used
'bTargetNamed' / Flag, # Boolean determining if the target is named
Padding(3), # Mystery bytes!
'szTargetName' / Padded(255, CString('utf8')), # Target name
# DFU File Manager does not initialise this
# memory, so our file will not exactly match
# its output.
'dwTargetSize' / Rebuild(Int32ul, DFU_target_size), # Total size of target images
'dwNbElements' / Rebuild(Int32ul, len_(this.images)), # Count the number of target images
'images' / GreedyRange(DFU_image)
)
DFU_body = Struct(
'szSignature' / Const(DFU_SIGNATURE), # DFU format identifier (changes on major revisions)
'bVersion' / Const(1, Int8ul), # DFU format revision (changes on minor revisions)
'DFUImageSize' / Rebuild(Int32ul, DFU_file_length), # Total DFU file length in bytes
'bTargets' / Rebuild(Int8ul, len_(this.targets)), # Number of targets in the file
'targets' / GreedyRange(DFU_target),
'bcdDevice' / Int16ul, # Firmware version, or 0xffff if ignored
'idProduct' / Hex(Int16ul), # USB product ID or 0xffff to ignore
'idVendor' / Hex(Int16ul), # USB vendor ID or 0xffff to ignore
'bcdDFU' / Const(0x011A, Int16ul), # DFU specification number
'ucDfuSignature' / Const(b'UFD'), # 0x44, 0x46 and 0x55 ie 'DFU' but reversed
'bLength' / Const(16, Int8ul) # Length of the DFU suffix in bytes
)
DFU = Struct(
'fields' / RawCopy(DFU_body),
'dwCRC' / Checksum(Int32ul, # CRC calculated over the whole file, except for itself
lambda data: 0xffffffff ^ zlib.crc32(data),
this.fields.data)
)
def display_dfu_info(parsed):
print(f'''
Device: {parsed.fields.value.bcdDevice}
Target: {parsed.fields.value.idProduct:04x}:{parsed.fields.value.idVendor:04x}
Size: {parsed.fields.value.DFUImageSize:,} bytes
Targets: {parsed.fields.value.bTargets}''')
for target in parsed.fields.value.targets:
print(f'''
Name: {target.szTargetName}
Alternate Setting: {target.bAlternateSetting}
Size: {target.dwTargetSize:,} bytes
Images: {target.dwNbElements}''')
for image in target.images:
print(f'''
Offset: {image.dwElementAddress}
Size: {len(image.data):,} bytes
''')
def build(input_file, output_file, address, force=False, id_product=0x0000, id_vendor=0x0483):
if not output_file.parent.is_dir():
raise RuntimeError(f'Output directory "{output_file.parent}" does not exist!')
elif output_file.is_file() and not force:
raise RuntimeError(f'Existing output file "{output_file}", use --force to overwrite!')
if not input_file.suffix == ".bin":
raise RuntimeError(f'Input file "{input_file}", is not a .bin file?')
output = DFU.build({'fields': {'value': {
'targets': [{
'bAlternateSetting': 0,
'bTargetNamed': True,
'szTargetName': 'ST...',
'images': [{
'dwElementAddress': address,
'data': open(input_file, 'rb').read()
}]
}],
'bcdDevice': 0,
'idProduct': id_product,
'idVendor': id_vendor
}}})
open(output_file, 'wb').write(output)
def read(input_file):
try:
return DFU.parse(open(input_file, 'rb').read())
except construct.core.ConstructError as error:
RuntimeError(f'Invalid dfu file {input_file} ({error})')
def dump(input_file, force=False):
parsed = read(input_file)
for target in parsed.fields.value.targets:
target_id = target.bAlternateSetting
for image in target.images:
address = image.dwElementAddress
data = image.data
dest = str(input_file).replace('.dfu', '')
filename = f"{dest}-{target_id}-{address}.bin"
if pathlib.Path(filename).is_file() and not force:
raise RuntimeError(f'Existing output file "{filename}", use --force to overwrite!')
print(f"Dumping image at {address} to {filename} ({len(data)} bytes)")
open(filename, 'wb').write(data)
```
#### File: ttblit/tool/setup.py
```python
import logging
import os
import pathlib
import re
import shutil
import stat
import subprocess
import textwrap
import click
# check environment before prompting
class SetupCommand(click.Command):
def parse_args(self, ctx, args):
logging.info("Checking for prerequisites...")
# command/name/required version
prereqs = [
('git --version', 'Git', None),
('cmake --version', 'CMake', [3, 9]),
('arm-none-eabi-gcc --version', 'GCC Arm Toolchain', [7, 3])
]
# adjust path to dectct the VS Arm toolchain
path = os.getenv('PATH')
vs_dir = os.getenv('VSInstallDir')
if vs_dir:
path = ';'.join([path, vs_dir + 'Linux\\gcc_arm\\bin'])
print(path)
failed = False
for command, name, version in prereqs:
try:
result = subprocess.run(command, stdout=subprocess.PIPE, text=True, shell=True, env={'PATH': path})
version_str = ".".join([str(x) for x in version]) if version else 'any'
found_version_str = re.search(r'[0-9]+\.[0-9\.]+', result.stdout).group(0)
if version:
found_version_list = [int(x) for x in found_version_str.split('.')[:len(version)]]
if found_version_list < version:
logging.critical(f'Found {name} version {found_version_str}, {version_str} is required!')
failed = True
logging.info(f'Found {name} version {found_version_str} (required {version_str})')
except subprocess.CalledProcessError:
logging.critical(f'Could not find {name}!')
failed = True
if failed:
click.echo('\nCheck the documentation for info on installing.\nhttps://github.com/32blit/32blit-sdk#you-will-need')
raise click.Abort()
super().parse_args(ctx, args)
def install_sdk(sdk_path):
click.echo('Installing SDK...')
subprocess.run(['git', 'clone', 'https://github.com/32blit/32blit-sdk', str(sdk_path)], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# checkout the latest release
# TODO: could do something with the GitHub API and download the release?
result = subprocess.run(['git', 'describe', '--tags', '--abbrev=0'], cwd=sdk_path, text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
latest_tag = result.stdout.strip()
result = subprocess.run(['git', 'checkout', latest_tag], cwd=sdk_path, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def vscode_config(project_path, sdk_path):
(project_path / '.vscode').mkdir()
open(project_path / '.vscode' / 'settings.json', 'w').write(textwrap.dedent(
'''
{
"cmake.configureSettings": {
"32BLIT_DIR": "{sdk_path}"
},
"C_Cpp.default.configurationProvider": "ms-vscode.cmake-tools"
}
'''.replace('{sdk_path}', str(sdk_path).replace('\\', '\\\\'))))
open(project_path / '.vscode' / 'cmake-kits.json', 'w').write(textwrap.dedent(
'''
[
{
"name": "32blit",
"toolchainFile": "{sdk_path}/32blit.toolchain"
}
]
'''.replace('{sdk_path}', str(sdk_path).replace('\\', '\\\\'))))
def visualstudio_config(project_path, sdk_path):
open(project_path / 'CMakeSettings.json', 'w').write(textwrap.dedent(
'''
{
"configurations": [
{
"name": "x64-Debug",
"generator": "Ninja",
"configurationType": "Debug",
"inheritEnvironments": [ "msvc_x64_x64" ],
"buildRoot": "${projectDir}\\\\out\\\\build\\\\${name}",
"installRoot": "${projectDir}\\\\out\\\\install\\\\${name}",
"cmakeCommandArgs": "",
"buildCommandArgs": "",
"ctestCommandArgs": "",
"variables": [
{
"name": "32BLIT_DIR",
"value": "{sdk_path}",
"type": "PATH"
}
]
},
{
"name": "x64-Release",
"generator": "Ninja",
"configurationType": "Release",
"buildRoot": "${projectDir}\\\\out\\\\build\\\\${name}",
"installRoot": "${projectDir}\\\\out\\\\install\\\\${name}",
"cmakeCommandArgs": "",
"buildCommandArgs": "",
"ctestCommandArgs": "",
"inheritEnvironments": [ "msvc_x64_x64" ],
"variables": [
{
"name": "32BLIT_DIR",
"value": "{sdk_path}",
"type": "PATH"
}
]
},
{
"name": "32Blit-Debug",
"generator": "Ninja",
"configurationType": "Debug",
"buildRoot": "${projectDir}\\\\out\\\\build\\\\${name}",
"installRoot": "${projectDir}\\\\out\\\\install\\\\${name}",
"cmakeCommandArgs": "",
"buildCommandArgs": "",
"ctestCommandArgs": "",
"inheritEnvironments": [ "gcc-arm" ],
"variables": [],
"cmakeToolchain": "{sdk_path}\\\\32blit.toolchain",
"intelliSenseMode": "linux-gcc-arm"
},
{
"name": "32Blit-Release",
"generator": "Ninja",
"configurationType": "Release",
"buildRoot": "${projectDir}\\\\out\\\\build\\\\${name}",
"installRoot": "${projectDir}\\\\out\\\\install\\\\${name}",
"cmakeCommandArgs": "",
"buildCommandArgs": "",
"ctestCommandArgs": "",
"cmakeToolchain": "{sdk_path}\\\\32blit.toolchain",
"inheritEnvironments": [ "gcc-arm" ],
"variables": [],
"intelliSenseMode": "linux-gcc-arm"
}
]
}
'''.replace('{sdk_path}', str(sdk_path).replace('\\', '\\\\'))))
@click.command('setup', help='Setup a project', cls=SetupCommand)
@click.option('--project-name', prompt=True)
@click.option('--author-name', prompt=True)
@click.option('--sdk-path', type=pathlib.Path, default=lambda: os.path.expanduser('~/32blit-sdk'), prompt='32Blit SDK path')
@click.option('--git/--no-git', prompt='Initialise a Git repository?', default=True)
@click.option('--vscode/--no-vscode', prompt='Create VS Code configuration?', default=True)
@click.option('--visualstudio/--no-visualstudio', prompt='Create Visual Studio configuration?')
def setup_cli(project_name, author_name, sdk_path, git, vscode, visualstudio):
if not (sdk_path / '32blit.toolchain').exists():
click.confirm(f'32Blit SDK not found at "{sdk_path}", would you like to install it?', abort=True)
install_sdk(sdk_path)
project_name_clean = re.sub(r'[^a-z0-9]+', '-', project_name.lower()).strip('-')
project_path = pathlib.Path.cwd() / project_name_clean
if project_path.exists():
logging.critical(f'A project already exists at {project_path}!')
raise click.Abort()
# get the boilerplate
click.echo('Downloading boilerplate...')
subprocess.run(['git', 'clone', '--depth', '1', 'https://github.com/32blit/32blit-boilerplate', str(project_path)], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# de-git it (using the template on GitHub also removes the history)
def remove_readonly(func, path, _):
os.chmod(path, stat.S_IWRITE)
func(path)
shutil.rmtree(pathlib.Path(project_name_clean) / '.git', onerror=remove_readonly)
# do some editing
cmakelists = open(project_path / 'CMakeLists.txt').read()
cmakelists = cmakelists.replace('project(game)', f'project({project_name_clean})')
open(project_path / 'CMakeLists.txt', 'w').write(cmakelists)
metadata = open(project_path / 'metadata.yml').read()
metadata = metadata.replace('game title', project_name).replace('you', author_name)
open(project_path / 'metadata.yml', 'w').write(metadata)
licence = open(project_path / 'LICENSE').read()
licence = licence.replace('<insert your name>', author_name)
open(project_path / 'LICENSE', 'w').write(licence)
# re-git it if we want a git repo
if git:
subprocess.run(['git', 'init'], cwd=project_path, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
subprocess.run(['git', 'add', '.'], cwd=project_path, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
subprocess.run(['git', 'commit', '-m', 'Initial commit'], cwd=project_path, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if vscode:
vscode_config(project_path, sdk_path)
if visualstudio:
visualstudio_config(project_path, sdk_path)
click.echo(f'\nYour new project has been created in: {project_path}!')
click.echo(f'If using CMake directly, make sure to pass -DCMAKE_TOOLCHAIN_FILE={sdk_path / "32blit.toolchain"} (building for the device)\nor -D32BLIT_DIR={sdk_path} when calling cmake.')
``` |
{
"source": "32leaves/pyshlist",
"score": 3
} |
#### File: pyshlist/pyshlist/backend.py
```python
import tinydb
class Purchases(object):
"""Manages all purches in the database"""
def __init__(self, db):
self.db = db.table('purchases')
@property
def all(self):
return self.db.all()
def new(self, name, price, due_date, desirability = 0, category = None, description = None):
"""Creates a new purchase"""
doc = {
"name" : name,
"price" : price,
"desirability" : desirability,
"due_date" : due_date,
"category" : category,
"description" : description
}
self.db.insert(doc)
def remove(self, name):
"""Removes a purchase with a given name"""
Purchase = tinydb.Query()
removed_items = self.db.remove(Purchase.name == name)
return any(removed_items)
def set_desirability(self, name, desirability):
"""Sets the desirability of a purchase"""
Purchase = tinydb.Query()
updated_items = self.db.update({ 'desirability' : desirability }, Purchase.name == name)
return any(updated_items)
@property
def categories(self):
return set([ x['category'] for x in self.db.all() if not x['category'] is None ])
class Comparisons(object):
def __init__(self, db):
self._db = db
self._table = db.table('comparisons')
def vote(self, a, b, a_more_important):
"""Marks the first purchase name as more important than the other"""
self._table.insert({
"a" : a,
"b" : b,
"a_more_important": a_more_important
})
def get_score(self, purchase_name):
"""Returns the score (0: not important to 1: very important) of a purchase based on prior comparisons"""
Comparison = tinydb.Query()
comparisons_won = 0
comparisons_won += self._table.count((Comparison.a == purchase_name) & (Comparison.a_more_important == True))
comparisons_won += self._table.count((Comparison.b == purchase_name) & (Comparison.a_more_important == False))
comparisons_involved = (self._table.count(Comparison.a == purchase_name) + self._table.count(Comparison.b == purchase_name))
result = None
if comparisons_involved > 0:
result = float(comparisons_won) / float(comparisons_involved)
return result
def prune(self, valid_purchase_names = None):
"""Removes all comparisons which contain non-valid purchase names"""
if valid_purchase_names is None:
purchases = Purchases(self._db)
valid_purchase_names = [ p['name'] for p in purchases.all ]
Comparison = tinydb.Query()
self._table.remove(Comparison.a.test(lambda x: not x in valid_purchase_names))
self._table.remove(Comparison.b.test(lambda x: not x in valid_purchase_names))
@property
def missing_comparisons(self):
"""Returns all missing purchase comparisons"""
purchases = [ p['name'] for p in Purchases(self._db).all ]
all_comparisons = [ (purchases[i], purchases[j]) for i in range(len(purchases)) for j in range(i) ]
Comparison = tinydb.Query()
return [ x for x in all_comparisons if self._table.count(((Comparison.a == x[0]) & (Comparison.b == x[1])) | ((Comparison.a == x[1]) & (Comparison.b == x[0]))) == 0 ]
@property
def rated_purchases(self):
"""Returns a list of all purchases with their score added"""
#self.prune()
purchases = Purchases(self._db).all
for p in purchases:
p['desirability'] = self.get_score(p['name'])
return purchases
``` |
{
"source": "32th-System/LuaSTG-EX-Plus_Archive",
"score": 3
} |
#### File: LuaSTGExPlus/LuaSTG/string2enum.py
```python
import sys
import json
import math
import random
def maxlen(str_list):
ret = 0
for s in str_list:
if len(s) > ret:
ret = len(s)
return ret
def combination(seq, length):
if not length:
yield []
else:
for i in range(len(seq)):
for result in combination(seq[i+1:], length-1):
yield [seq[i]] + result
def charat(key, idx):
if idx >= len(key):
return u'\0'
else:
return key[idx]
def is_prime(n):
if n <= 1:
return False
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
return False
return True
class union_set:
def __init__(self, count):
self._obj = [x for x in range(0, count)]
def find(self, x):
if x != self._obj[x]:
self._obj[x] = self.find(self._obj[x])
return self._obj[x]
else:
return x
def connect(self, a, b):
x = self.find(a)
y = self.find(b)
self._obj[x] = y
def is_connected(self, a, b):
return self.find(a) == self.find(b)
# find best indices
# !!NEED TO BE IMPROVED!!
def find_best_indices(key_list):
n = maxlen(key_list)
seq = [x for x in range(0, n)]
for cnt in range(1, n + 1):
for comb in combination(seq, cnt):
test_set = set()
fail = False
for key in key_list:
comb_str = ""
for idx in comb:
comb_str += charat(key, idx)
if comb_str in test_set:
fail = True
break
test_set.add(comb_str)
if not fail:
print("*** Best indices found: " + str(comb))
return comb
return None
def keyhash(key, rand_table, idx_list, n):
ret = 0
for i in range(0, len(idx_list)):
ret += ord(charat(key, idx_list[i])) * rand_table[i]
return ret % n
def generate_graph(key_list, idx_list, factor):
n = int(len(key_list) * factor + 1)
failed = True
while not is_prime(n):
n += 1
print("*** 'n' selected: " + str(n))
print("*** Start iterating...")
iter_cnt = 0
while failed:
iter_cnt += 1
print("trying iterating, step %d..." % iter_cnt)
# generate random table
T1 = [random.randint(1, 255) for i in range(0, len(idx_list))]
T2 = [random.randint(1, 255) for i in range(0, len(idx_list))]
# generate empty graph
adj_matrix = [list() for i in range(0, n)]
uniset = union_set(n)
# calcu each key
index = 0
failed = False
for key in key_list:
hash1 = keyhash(key, T1, idx_list, n)
hash2 = keyhash(key, T2, idx_list, n)
# connect hash1 and hash2
if uniset.is_connected(hash1, hash2):
failed = True
break
uniset.connect(hash1, hash2)
# make edge
edge = { "hash1" : hash1, "hash2" : hash2, "key" : key, "value" : index }
adj_matrix[hash1].append(edge)
adj_matrix[hash2].append(edge)
index += 1
print("*** Graph generated")
return T1, T2, adj_matrix
def find_func_g(adj_matrix, m):
g = [0 for i in range(0, len(adj_matrix))]
visited = [False for i in range(0, len(adj_matrix))]
def visit_graph(v):
visited[v] = True
for adj in adj_matrix[v]:
if v == adj["hash1"] and not visited[adj["hash2"]]:
g[adj["hash2"]] = (adj["value"] - g[adj["hash1"]]) % m
visit_graph(adj["hash2"])
elif v == adj["hash2"] and not visited[adj["hash1"]]:
g[adj["hash1"]] = (adj["value"] - g[adj["hash2"]]) % m
visit_graph(adj["hash1"])
print("*** Finding function 'g'...")
for vert in range(0, len(adj_matrix)):
if len(adj_matrix[vert]) != 0 and not visited[vert]:
g[vert] = 0
visit_graph(vert)
print("*** Function 'g' generated")
return g
def final_hash_func(key, idx_list, T1, T2, g_table, m):
n = len(g_table)
return (g_table[keyhash(key, T1, idx_list, n)] + g_table[keyhash(key, T2, idx_list, n)]) % m
def generated_mpf(filename):
with open(filename, "r") as f:
options = json.load(f)
keys = []
enums = []
keydict = {} # key->enum
enumdict = {} # enum->key
for item in options["keys"]:
keys.append(item[0])
enums.append(item[1])
keydict[item[0]] = item[1]
enumdict[item[1]] = item[0]
# step1: find best indices
best_indices = find_best_indices(keys)
# step2: generate random table and graph
hash_table1, hash_table2, graph = generate_graph(keys, best_indices, options["factor"] if "factor" in options else 2.0)
# step3: generate function g
hash_func_g = find_func_g(graph, len(keys))
# check step
for i in range(0, len(keys)):
hash_check = final_hash_func(keys[i], best_indices, hash_table1, hash_table2, hash_func_g, len(keys))
# print("key %s hash %d" % (keys[i], hash_check))
assert(i == hash_check)
# print results
print("*** Results:")
print("n = " + str(len(graph)))
print("m = " + str(len(keys)))
print("best_indices = " + str(best_indices))
print("hash_table1 = " + str(hash_table1))
print("hash_table2 = " + str(hash_table2))
print("hashfunc_g = " + str(hash_func_g))
# generate C++ source file
print("*** generating C++ source file...")
char_type = "wchar_t" if options["wide_char"] else "char"
with open(options["output"], "w") as out_file:
out_file.write(u"#pragma once\n")
out_file.write(u"#include <cstring>\n")
#out_file.write(u"#include <cstdint>\n")
out_file.write(u"\n")
out_file.write(u"namespace %s\n" % options["namespace"])
out_file.write(u"{\n")
out_file.write(u"\tenum class %s\n" % options["enum_name"])
out_file.write(u"\t{\n")
for i in range(0, len(enums)):
out_file.write(u"\t\t%s = %d,\n" % (enums[i], i))
out_file.write(u"\t\t_KEY_NOT_FOUND = -1\n")
out_file.write(u"\t};\n")
out_file.write(u"\n")
out_file.write(u"\tinline %s %s(const %s* key)\n" % (options["enum_name"], options["hashfunc_name"], char_type))
out_file.write(u"\t{\n")
out_file.write(u"\t\tstatic const %s* s_orgKeyList[] =\n" % char_type)
out_file.write(u"\t\t{\n")
for i in range(0, len(keys)):
out_file.write(u'\t\t\t%s"%s",\n' % (u'L' if options["wide_char"] else u'', keys[i]))
out_file.write(u"\t\t};\n")
out_file.write(u"\t\t\n")
out_file.write(u"\t\tstatic const unsigned int s_bestIndices[] =\n")
out_file.write(u"\t\t{")
for i in range(0, len(best_indices)):
if i % 10 == 0:
out_file.write(u'\n')
out_file.write(u'\t\t\t')
out_file.write(u'%d, ' % (best_indices[i]))
out_file.write(u"\n\t\t};\n")
out_file.write(u"\t\t\n")
out_file.write(u"\t\tstatic const unsigned int s_hashTable1[] =\n")
out_file.write(u"\t\t{")
for i in range(0, len(best_indices)):
if i % 10 == 0:
out_file.write(u'\n')
out_file.write(u'\t\t\t')
out_file.write(u'%d, ' % (hash_table1[i]))
out_file.write(u"\n\t\t};\n")
out_file.write(u"\t\t\n")
out_file.write(u"\t\tstatic const unsigned int s_hashTable2[] =\n")
out_file.write(u"\t\t{")
for i in range(0, len(best_indices)):
if i % 10 == 0:
out_file.write(u'\n')
out_file.write(u'\t\t\t')
out_file.write(u'%d, ' % (hash_table2[i]))
out_file.write(u"\n\t\t};\n")
out_file.write(u"\t\t\n")
out_file.write(u"\t\tstatic const unsigned int s_hashTableG[] =\n")
out_file.write(u"\t\t{")
for i in range(0, len(graph)):
if i % 10 == 0:
out_file.write(u'\n')
out_file.write(u'\t\t\t')
out_file.write(u'%d, ' % (hash_func_g[i]))
out_file.write(u"\n\t\t};\n")
out_file.write(u"\t\t\n")
out_file.write(u"\t\tunsigned int f1 = 0, f2 = 0, len = %s(key);\n" % options["strlen"])
out_file.write(u"\t\tfor (unsigned int i = 0; i < %d; ++i)\n" % len(best_indices))
out_file.write(u"\t\t{\n")
out_file.write(u"\t\t\tunsigned int idx = s_bestIndices[i];\n")
out_file.write(u"\t\t\tif (idx < len)\n")
out_file.write(u"\t\t\t{\n")
out_file.write(u"\t\t\t\tf1 = (f1 + s_hashTable1[i] * (unsigned int)key[idx]) %% %d;\n" % len(graph))
out_file.write(u"\t\t\t\tf2 = (f2 + s_hashTable2[i] * (unsigned int)key[idx]) %% %d;\n" % len(graph))
out_file.write(u"\t\t\t}\n")
out_file.write(u"\t\t\telse\n")
out_file.write(u"\t\t\t\tbreak;\n")
out_file.write(u"\t\t}\n")
out_file.write(u"\t\t\n")
out_file.write(u"\t\tunsigned int hash = (s_hashTableG[f1] + s_hashTableG[f2]) %% %d;\n" % len(keys))
out_file.write(u"\t\tif (%s(s_orgKeyList[hash], key) == 0)\n" % options["strcmp"])
out_file.write(u"\t\t\treturn static_cast<%s>(hash);\n" % options["enum_name"])
out_file.write(u"\t\treturn %s::_KEY_NOT_FOUND;\n" % options["enum_name"])
out_file.write(u"\t}\n")
out_file.write(u"}\n")
print("*** finished")
if len(sys.argv) != 2:
print("Invalid command argument.")
exit(-1)
generated_mpf(sys.argv[1])
``` |
{
"source": "32xnabin/DjangoRestHeroku",
"score": 2
} |
#### File: DjangoRestHeroku/restaurants/models.py
```python
from django.db import models
class Restaurant(models.Model):
"""
Restaurants' Model.
"""
id = models.CharField(primary_key = True, editable = True, max_length = 255, verbose_name = u'Id')
rating = models.IntegerField(verbose_name = u'Rating')
name = models.CharField(max_length = 255, verbose_name = u'Nombre')
site = models.CharField(max_length = 255, verbose_name = u'Sitio')
email = models.CharField(max_length = 255, verbose_name = u'Email')
phone = models.CharField(max_length = 255, verbose_name = u'Telefono')
street = models.CharField(max_length = 255, verbose_name = u'Calle')
city = models.CharField(max_length = 255, verbose_name = u'Ciudad')
state = models.CharField(max_length = 255, verbose_name = u'Estado')
lat = models.FloatField(verbose_name = u'Latitud')
long = models.FloatField(verbose_name = u'Longitud')
def __unicode__(self):
return '%s' % (self.name)
class Meta:
ordering = ('id', 'rating')
```
#### File: DjangoRestHeroku/restaurants/views.py
```python
from rest_framework import viewsets, permissions, response, views
from .utils import RestaurantSerializer
from .models import Restaurant
class StatisticView(views.APIView):
"""
Return a list of specifics restaurants.
"""
def get_object(self, pk):
try:
restaurants = Restaurant.objects.get(pk=pk)
except Restaurant.DoesNotExist:
return response.Response(status=status.HTTP_404_NOT_FOUND)
def get(self, request, pk, format = None):
restaurants = Restaurant.objects.get(pk=pk)
serializer_context = {
'request': request,
}
serializer = RestaurantSerializer(restaurants,context=serializer_context)
return response.Response(serializer.data)
class RestaurantViewSet(viewsets.ModelViewSet):
"""
retrieve:
Return a specific restaurant.
list:
Return all the restaurants in the API.
create:
Create a new restaurant instance.
"""
queryset = Restaurant.objects.all()
serializer_class = RestaurantSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
``` |
{
"source": "330040431/GsSeeyonDataDict",
"score": 2
} |
#### File: 330040431/GsSeeyonDataDict/GsSeeyonDataDict.py
```python
__author__ = 'Liam0611'
import pymssql
import xml.etree.ElementTree as Etree
from getCode import getCode
class GsSeeyonDataDict:
def getModuleDataDict(self, moduleName):
if not moduleName:
return None
sqlStr = r"SELECT FIELD_INFO FROM FORM_DEFINITION " \
r"WHERE NAME = '{moduleName}'".format(moduleName=moduleName)
self.__dbCursor.execute(sqlStr)
resXmlStr = self.__dbCursor.fetchall()
xmlRoot = Etree.fromstring(resXmlStr[0][0])
resDataDict = {}
# Reading by xml.
for tables in xmlRoot:
tableInfo = tables.attrib
tableInfo['display'] = getCode(tableInfo['display'], 'gb2312')
filedDataDict = {}
filedDataDict['TableName'] = tableInfo['name']
for fields in tables.find('FieldList'):
fieldInfo = fields.attrib
filedDataDict[getCode(fieldInfo['display'], 'gb2312')] = fieldInfo['name']
if r'master' == tableInfo['tabletype']:
resDataDict[moduleName] = filedDataDict
else:
resDataDict[tableInfo['display']] = filedDataDict
return resDataDict
def getAllModuleDataDict(self):
sqlStr = r"SELECT NAME FROM FORM_DEFINITION"
self.__dbCursor.execute(sqlStr)
resDataDicts = {}
for moduleName in self.__dbCursor.fetchall():
resDataDicts[moduleName[0]] = self.getModuleDataDict(moduleName[0])
return resDataDicts
def __init__(self, host, user, password, database):
self.__dbConn = pymssql.connect(host=host, user=user, password=password, database=database, charset='utf8')
self.__dbCursor = self.__dbConn.cursor()
def __del__(self):
try:
self.__dbConn.close()
except:
pass
if __name__ == '__main__':
gsDataDict = GsSeeyonDataDict(host='', user='', password='', database='')
allDataDict = gsDataDict.getAllModuleDataDict()
print(allDataDict)
``` |
{
"source": "331leo/HyundaiLogin",
"score": 3
} |
#### File: HyundaiLogin/models/user.py
```python
from pydantic import BaseModel
from pydantic.networks import EmailStr
# {
# "id": "110010986932044647324",
# "email": "<EMAIL>",
# "verified_email": True,
# "name": "11002김동현",
# "given_name": "김동현",
# "family_name": "11002",
# "picture": "https://lh3.googleusercontent.com/a/AATXAJzusswNrgMRPLv_SRxTBQ2kxJtpB_ZCcTjKHk-z=s96-c",
# "locale": "ko",
# "hd": "hyundai.hs.kr"
# }
class User(BaseModel):
id: str
email: EmailStr
full_name: str
name: str
student_id: str
is_student: bool
grade: int
classnum: int
number: int
def parse_google_response(data: dict) -> dict:
user: dict = dict()
user["id"] = data.get("id")
user["email"] = data.get("email")
user["full_name"] = data.get("name")
user["name"] = data.get("given_name")
user["student_id"] = data.get("family_name")
user["is_student"] = True if user["student_id"].isnumeric() else False
user["grade"] = int(user["student_id"][0]) if user["is_student"] else 0
user["classnum"] = int(user["student_id"][1:3]) if user["is_student"] else 0
user["number"] = int(user["student_id"][3:5]) if user["is_student"] else 0
return user
```
#### File: routes/v1/callback.py
```python
import json as jsonlib
from os import getenv
from typing import Optional
from fastapi import APIRouter
from fastapi.params import Cookie
from starlette.responses import JSONResponse, RedirectResponse
from models.user import User, parse_google_response
from utils.auth import gen_oauth_code
from utils.db import user_db
from utils.etc import md5hash
from utils.google import get_token, get_user_info
callback_router = APIRouter()
@callback_router.get("/google", response_class=RedirectResponse)
async def callback_google(code: str, hyundai_id_callback: Optional[str] = Cookie(None)):
# print(hyundai_id_callback)
try:
user = User.parse_obj(
parse_google_response(
await get_user_info((await get_token(code)).get("access_token"))
)
)
except Exception as e:
print(e)
return JSONResponse(
{
"code": "PARSE_ERROR",
"message": f"Failed Parsing user informaing from Google. Please retry and contact to: {getenv('CONTACT')}",
},
status_code=500,
)
user_db.set(md5hash(user.id), jsonlib.dumps(user.dict(), ensure_ascii=False))
return RedirectResponse(
hyundai_id_callback + f"?code={gen_oauth_code(user.id).get('code')}"
)
```
#### File: routes/v1/test.py
```python
from base64 import b64encode
from os import getenv
from fastapi import APIRouter
from utils.etc import REQ_TYPE, request
def get_basic_auth_token(username, password):
return "Basic " + b64encode(f"{username}:{password}".encode()).decode()
test_router = APIRouter()
@test_router.get("/callback", include_in_schema=False)
async def test(code: str):
token = (
await request(
REQ_TYPE.POST,
url=f"http://localhost:{getenv('PORT')}/v1/oauth2/token",
data={"code": code},
headers={
"Authorization": get_basic_auth_token(
getenv("TEST_CLIENT_ID"), getenv("TEST_CLIENT_SECRET")
)
},
)
).get("access_token")
return await request(
REQ_TYPE.GET,
url=f"http://localhost:{getenv('PORT')}/v1/users/@me",
headers={"Authorization": f"Bearer {token}"},
)
``` |
{
"source": "331leo/HyundaiOC_Backend",
"score": 4
} |
#### File: HyundaiOC_Backend/utils/time.py
```python
from datetime import date, datetime
weekdays_en = {0:"mon", 1:"tue", 2:"wen", 3:"thu", 4:"fri"}
weekdays_kr = {0:"월", 1:"화", 2:"수", 3:"목", 4:"금"}
def now_weekday(kr=False):
return weekdays_en[datetime.now().weekday()] if not kr else weekdays_kr[datetime.now().weekday()]
def get_date_string():
now = datetime.now()
return f"{now.year}년 {now.month}월 {now.day}일 ({now_weekday(kr=True)})"
def current_school_time():
now = datetime.now().strftime("%H:%M")
period = -1
if "07:50"<=now<="08:55":
period=0
elif "08:56"<=now<="09:55":
period=1
elif "09:56"<=now<="10:55":
period=2
elif "10:56"<=now<="11:55":
period=3
elif "11:56"<=now<="13:05":
period=-2 # 점심
elif "13:06"<=now<="13:55":
period=4
elif "13:56"<=now<="14:55":
period=5
elif "14:56"<=now<="15:55":
period=6
return period
# 7:50 ~ 8:55
# 8:56 ~ 9:55
# 9:56 ~ 10:55
# 10:56 ~ 11:55
# 11:56 ~ 13:05 (점심)
# 13:06 ~ 13:55
# 13:56 ~ 14:55
# 14:56 ~ 15:55
``` |
{
"source": "3333qwe/spark",
"score": 2
} |
#### File: python/pyspark/util.py
```python
import functools
import itertools
import os
import platform
import re
import sys
import threading
import traceback
from types import TracebackType
from typing import Any, Callable, Iterator, List, Optional, TextIO, Tuple
from py4j.clientserver import ClientServer # type: ignore[import]
__all__: List[str] = []
from py4j.java_gateway import JavaObject
def print_exec(stream: TextIO) -> None:
ei = sys.exc_info()
traceback.print_exception(ei[0], ei[1], ei[2], None, stream)
class VersionUtils(object):
"""
Provides utility method to determine Spark versions with given input string.
"""
@staticmethod
def majorMinorVersion(sparkVersion: str) -> Tuple[int, int]:
"""
Given a Spark version string, return the (major version number, minor version number).
E.g., for 2.0.1-SNAPSHOT, return (2, 0).
Examples
--------
>>> sparkVersion = "2.4.0"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 4)
>>> sparkVersion = "2.3.0-SNAPSHOT"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 3)
"""
m = re.search(r"^(\d+)\.(\d+)(\..*)?$", sparkVersion)
if m is not None:
return (int(m.group(1)), int(m.group(2)))
else:
raise ValueError(
"Spark tried to parse '%s' as a Spark" % sparkVersion
+ " version string, but it could not find the major and minor"
+ " version numbers."
)
def fail_on_stopiteration(f: Callable) -> Callable:
"""
Wraps the input function to fail on 'StopIteration' by raising a 'RuntimeError'
prevents silent loss of data when 'f' is used in a for loop in Spark code
"""
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return f(*args, **kwargs)
except StopIteration as exc:
raise RuntimeError(
"Caught StopIteration thrown from user's code; failing the task", exc
)
return wrapper
def walk_tb(tb: Optional[TracebackType]) -> Iterator[TracebackType]:
while tb is not None:
yield tb
tb = tb.tb_next
def try_simplify_traceback(tb: TracebackType) -> Optional[TracebackType]:
"""
Simplify the traceback. It removes the tracebacks in the current package, and only
shows the traceback that is related to the thirdparty and user-specified codes.
Returns
-------
TracebackType or None
Simplified traceback instance. It returns None if it fails to simplify.
Notes
-----
This keeps the tracebacks once it sees they are from a different file even
though the following tracebacks are from the current package.
Examples
--------
>>> import importlib
>>> import sys
>>> import traceback
>>> import tempfile
>>> with tempfile.TemporaryDirectory() as tmp_dir:
... with open("%s/dummy_module.py" % tmp_dir, "w") as f:
... _ = f.write(
... 'def raise_stop_iteration():\\n'
... ' raise StopIteration()\\n\\n'
... 'def simple_wrapper(f):\\n'
... ' def wrapper(*a, **k):\\n'
... ' return f(*a, **k)\\n'
... ' return wrapper\\n')
... f.flush()
... spec = importlib.util.spec_from_file_location(
... "dummy_module", "%s/dummy_module.py" % tmp_dir)
... dummy_module = importlib.util.module_from_spec(spec)
... spec.loader.exec_module(dummy_module)
>>> def skip_doctest_traceback(tb):
... import pyspark
... root = os.path.dirname(pyspark.__file__)
... pairs = zip(walk_tb(tb), traceback.extract_tb(tb))
... for cur_tb, cur_frame in pairs:
... if cur_frame.filename.startswith(root):
... return cur_tb
Regular exceptions should show the file name of the current package as below.
>>> exc_info = None
>>> try:
... fail_on_stopiteration(dummy_module.raise_stop_iteration)()
... except Exception as e:
... tb = sys.exc_info()[-1]
... e.__cause__ = None
... exc_info = "".join(
... traceback.format_exception(type(e), e, tb))
>>> print(exc_info) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
Traceback (most recent call last):
File ...
...
File "/.../pyspark/util.py", line ...
...
RuntimeError: ...
>>> "pyspark/util.py" in exc_info
True
If the traceback is simplified with this method, it hides the current package file name:
>>> exc_info = None
>>> try:
... fail_on_stopiteration(dummy_module.raise_stop_iteration)()
... except Exception as e:
... tb = try_simplify_traceback(sys.exc_info()[-1])
... e.__cause__ = None
... exc_info = "".join(
... traceback.format_exception(
... type(e), e, try_simplify_traceback(skip_doctest_traceback(tb))))
>>> print(exc_info) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
RuntimeError: ...
>>> "pyspark/util.py" in exc_info
False
In the case below, the traceback contains the current package in the middle.
In this case, it just hides the top occurrence only.
>>> exc_info = None
>>> try:
... fail_on_stopiteration(dummy_module.simple_wrapper(
... fail_on_stopiteration(dummy_module.raise_stop_iteration)))()
... except Exception as e:
... tb = sys.exc_info()[-1]
... e.__cause__ = None
... exc_info_a = "".join(
... traceback.format_exception(type(e), e, tb))
... exc_info_b = "".join(
... traceback.format_exception(
... type(e), e, try_simplify_traceback(skip_doctest_traceback(tb))))
>>> exc_info_a.count("pyspark/util.py")
2
>>> exc_info_b.count("pyspark/util.py")
1
"""
if "pypy" in platform.python_implementation().lower():
# Traceback modification is not supported with PyPy in PySpark.
return None
if sys.version_info[:2] < (3, 7):
# Traceback creation is not supported Python < 3.7.
# See https://bugs.python.org/issue30579.
return None
import pyspark
root = os.path.dirname(pyspark.__file__)
tb_next = None
new_tb = None
pairs = zip(walk_tb(tb), traceback.extract_tb(tb))
last_seen = []
for cur_tb, cur_frame in pairs:
if not cur_frame.filename.startswith(root):
# Filter the stacktrace from the PySpark source itself.
last_seen = [(cur_tb, cur_frame)]
break
for cur_tb, cur_frame in reversed(list(itertools.chain(last_seen, pairs))):
# Once we have seen the file names outside, don't skip.
new_tb = TracebackType(
tb_next=tb_next,
tb_frame=cur_tb.tb_frame,
tb_lasti=cur_tb.tb_frame.f_lasti,
tb_lineno=cur_tb.tb_frame.f_lineno if cur_tb.tb_frame.f_lineno is not None else -1,
)
tb_next = new_tb
return new_tb
def _print_missing_jar(lib_name: str, pkg_name: str, jar_name: str, spark_version: str) -> None:
print(
"""
________________________________________________________________________________________________
Spark %(lib_name)s libraries not found in class path. Try one of the following.
1. Include the %(lib_name)s library and its dependencies with in the
spark-submit command as
$ bin/spark-submit --packages org.apache.spark:spark-%(pkg_name)s:%(spark_version)s ...
2. Download the JAR of the artifact from Maven Central http://search.maven.org/,
Group Id = org.apache.spark, Artifact Id = spark-%(jar_name)s, Version = %(spark_version)s.
Then, include the jar in the spark-submit command as
$ bin/spark-submit --jars <spark-%(jar_name)s.jar> ...
________________________________________________________________________________________________
"""
% {
"lib_name": lib_name,
"pkg_name": pkg_name,
"jar_name": jar_name,
"spark_version": spark_version,
}
)
def _parse_memory(s: str) -> int:
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MiB
Examples
--------
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {"g": 1024, "m": 1, "t": 1 << 20, "k": 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def inheritable_thread_target(f: Callable) -> Callable:
"""
Return thread target wrapper which is recommended to be used in PySpark when the
pinned thread mode is enabled. The wrapper function, before calling original
thread target, it inherits the inheritable properties specific
to JVM thread such as ``InheritableThreadLocal``.
Also, note that pinned thread mode does not close the connection from Python
to JVM when the thread is finished in the Python side. With this wrapper, Python
garbage-collects the Python thread instance and also closes the connection
which finishes JVM thread correctly.
When the pinned thread mode is off, it return the original ``f``.
.. versionadded:: 3.2.0
Parameters
----------
f : function
the original thread target.
Notes
-----
This API is experimental.
It is important to know that it captures the local properties when you decorate it
whereas :class:`InheritableThread` captures when the thread is started.
Therefore, it is encouraged to decorate it when you want to capture the local
properties.
For example, the local properties from the current Spark context is captured
when you define a function here instead of the invocation:
>>> @inheritable_thread_target
... def target_func():
... pass # your codes.
If you have any updates on local properties afterwards, it would not be reflected to
the Spark context in ``target_func()``.
The example below mimics the behavior of JVM threads as close as possible:
>>> Thread(target=inheritable_thread_target(target_func)).start() # doctest: +SKIP
"""
from pyspark import SparkContext
if isinstance(SparkContext._gateway, ClientServer): # type: ignore[attr-defined]
# Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
# NOTICE the internal difference vs `InheritableThread`. `InheritableThread`
# copies local properties when the thread starts but `inheritable_thread_target`
# copies when the function is wrapped.
properties = (
SparkContext._active_spark_context._jsc.sc() # type: ignore[attr-defined]
.getLocalProperties()
.clone()
)
@functools.wraps(f)
def wrapped(*args: Any, **kwargs: Any) -> Any:
try:
# Set local properties in child thread.
SparkContext._active_spark_context._jsc.sc().setLocalProperties( # type: ignore[attr-defined]
properties
)
return f(*args, **kwargs)
finally:
InheritableThread._clean_py4j_conn_for_current_thread()
return wrapped
else:
return f
class InheritableThread(threading.Thread):
"""
Thread that is recommended to be used in PySpark instead of :class:`threading.Thread`
when the pinned thread mode is enabled. The usage of this class is exactly same as
:class:`threading.Thread` but correctly inherits the inheritable properties specific
to JVM thread such as ``InheritableThreadLocal``.
Also, note that pinned thread mode does not close the connection from Python
to JVM when the thread is finished in the Python side. With this class, Python
garbage-collects the Python thread instance and also closes the connection
which finishes JVM thread correctly.
When the pinned thread mode is off, this works as :class:`threading.Thread`.
.. versionadded:: 3.1.0
Notes
-----
This API is experimental.
"""
_props: JavaObject
def __init__(self, target: Callable, *args: Any, **kwargs: Any):
from pyspark import SparkContext
if isinstance(SparkContext._gateway, ClientServer): # type: ignore[attr-defined]
# Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
def copy_local_properties(*a: Any, **k: Any) -> Any:
# self._props is set before starting the thread to match the behavior with JVM.
assert hasattr(self, "_props")
SparkContext._active_spark_context._jsc.sc().setLocalProperties( # type: ignore[attr-defined]
self._props
)
try:
return target(*a, **k)
finally:
InheritableThread._clean_py4j_conn_for_current_thread()
super(InheritableThread, self).__init__(
target=copy_local_properties, *args, **kwargs # type: ignore[misc]
)
else:
super(InheritableThread, self).__init__(
target=target, *args, **kwargs # type: ignore[misc]
)
def start(self) -> None:
from pyspark import SparkContext
if isinstance(SparkContext._gateway, ClientServer): # type: ignore[attr-defined]
# Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
# Local property copy should happen in Thread.start to mimic JVM's behavior.
self._props = (
SparkContext._active_spark_context._jsc.sc() # type: ignore[attr-defined]
.getLocalProperties()
.clone()
)
return super(InheritableThread, self).start()
@staticmethod
def _clean_py4j_conn_for_current_thread() -> None:
from pyspark import SparkContext
jvm = SparkContext._jvm # type: ignore[attr-defined]
thread_connection = jvm._gateway_client.get_thread_connection()
if thread_connection is not None:
try:
# Dequeue is shared across other threads but it's thread-safe.
# If this function has to be invoked one more time in the same thead
# Py4J will create a new connection automatically.
jvm._gateway_client.deque.remove(thread_connection)
except ValueError:
# Should never reach this point
return
finally:
thread_connection.close()
if __name__ == "__main__":
if "pypy" not in platform.python_implementation().lower() and sys.version_info[:2] >= (3, 7):
import doctest
import pyspark.util
from pyspark.context import SparkContext
globs = pyspark.util.__dict__.copy()
globs["sc"] = SparkContext("local[4]", "PythonTest")
(failure_count, test_count) = doctest.testmod(pyspark.util, globs=globs)
globs["sc"].stop()
if failure_count:
sys.exit(-1)
``` |
{
"source": "333mhz/GPiCase2-Script",
"score": 3
} |
#### File: 333mhz/GPiCase2-Script/lakka_SafeShutdown_gpi2.py
```python
import RPi.GPIO as GPIO
import os
import time
from multiprocessing import Process
#initialize pins
#powerPin = 3 #pin 5
#ledPin = 14 #TXD
#resetPin = 2 #pin 13
#powerenPin = 4 #pin 5
powerPin = 26
powerenPin = 27
#initialize GPIO settings
def init():
GPIO.setmode(GPIO.BCM)
GPIO.setup(powerPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(powerenPin, GPIO.OUT, initial=GPIO.HIGH)
GPIO.output(powerenPin, GPIO.HIGH)
GPIO.setwarnings(False)
#waits for user to hold button up to 1 second before issuing poweroff command
def poweroff():
while True:
#self.assertEqual(GPIO.input(powerPin), GPIO.LOW)
GPIO.wait_for_edge(powerPin, GPIO.FALLING)
#start = time.time()
#while GPIO.input(powerPin) == GPIO.HIGH:
# time.sleep(0.5)
os.system("systemctl stop retroarch")
time.sleep(1)
os.system("systemctl poweroff")
def lcdrun():
while True:
os.system("sh /storage/.RetroFlag/lcdnext.sh")
time.sleep(1)
def audiofix():
while True:
time.sleep(0.5)
os.system("systemctl restart retroarch")
break
if __name__ == "__main__":
#initialize GPIO settings
init()
#create a multiprocessing.Process instance for each function to enable parallelism
powerProcess = Process(target = poweroff)
powerProcess.start()
lcdrunProcess = Process(target = lcdrun)
lcdrunProcess.start()
audiofixProcess = Process(target = audiofix)
audiofixProcess.start()
powerProcess.join()
lcdrunProcess.join()
audiofixProcess.join()
GPIO.cleanup()
``` |
{
"source": "3346zsai98/toutiao",
"score": 3
} |
#### File: common/utils/middlewares.py
```python
from flask import request,g
from .jwt_util import verify_jwt
# 需求:在每次请求前,尝试获取用户信息
# 手动构建认证机制,伪代码。
# - 构建认证机制
# - 对于特定视图,强制要求用户必须登录,才能访问
# - 对于所有视图,无论是否强制要求用户登录,都可以在视图中尝试获取用户认证后的身份信息
# 在每次请求前,尝试用户用户信息
# @app.before_request
def jwt_authentication():
# 从请求头中提取token,从token提取payload中存储的用户信息
token = request.headers.get('Authorization')
g.user_id = None
g.refresh = None
if token and token.startswith('Bearer '):
token = token[7:]
# token的校验
payload = verify_jwt(token)
if payload:
g.user_id = payload.get('user_id')
g.refresh = payload.get('refresh')
``` |
{
"source": "3370sohail/virtual-mystery",
"score": 2
} |
#### File: vm-django/authentication/models.py
```python
from django.db import models
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
"""
Automatically creates a user token.
Notes:
- uses the post_save signal
"""
if created:
Token.objects.create(user=instance)
```
#### File: vm-django/system/views.py
```python
from django.shortcuts import render
from django.db import models
from rest_framework import viewsets
from rest_framework.views import APIView, Response
from rest_framework.response import Response
from rest_framework import status, permissions
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.permissions import IsAuthenticated
from . import models
from system.models import Practical, Group, User
from comments.models import Comment
from .serializers import PracticalSerializer, GroupSerializer, ProfileSerializer
from comments.serializers import CommentSerializer
from release import get_current_release
class ListPracticals(APIView):
"""
Return list of all practical objects
"""
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, format=None):
# TO DO: need to send all names for all practicals
try:
if request.user.is_ta:
practical_list = Practical.objects.all()
#
serializer = PracticalSerializer(practical_list, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
except AttributeError:
return Response(status=status.HTTP_400_BAD_REQUEST)
class ListGroups(APIView):
"""
Take a Practical name as input and return all groups
that are in that practical
"""
def get(self,request,praName):
# TO DO: need to send all names for all groups
practical = Practical.objects.filter(name= praName).first()
group = Group.objects.filter(practical = practical)
serializer = GroupSerializer(group,many=True)
return Response(serializer.data,status=status.HTTP_200_OK)
class ListUsers(APIView):
"""
Take a Practical name as input and return all groups
that are in that practical
"""
def get(self,request, groupId):
# TO DO: need to send all names for all groups
group = Group.objects.filter(id = groupId).first()
users = User.objects.filter(group=group)
serializer = ProfileSerializer(users, many=True)
return Response(serializer.data,status=status.HTTP_200_OK)
class UserCheck(APIView):
"""
Checks if the user is a TA or not
"""
def get(self,request ,Formant=None):
user = request.user.is_ta
return Response ({'is_ta':user}, status=status.HTTP_200_OK)
class UserComment(APIView):
"""
Take a Practical name as input and return all groups
that are in that practical
"""
def get(self,request, userName):
#Blog.objects.filter(entry__headline__contains='Lennon', entry__pub_date__year=2008)
selectedUser = User.objects.get(username=userName)
selectedComment = Comment.objects.filter(owner= selectedUser)
serializer = CommentSerializer(selectedComment,many=True)
return Response(serializer.data,status=status.HTTP_200_OK)
``` |
{
"source": "33abrar/Bit-Plane-Slicing-and-Quantization-Based-Color-Image-Watermarking-in-Spatial-Domain",
"score": 2
} |
#### File: 33abrar/Bit-Plane-Slicing-and-Quantization-Based-Color-Image-Watermarking-in-Spatial-Domain/proposed_extract.py
```python
import PIL.Image as pimg
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np, copy, os
import arnold, yCbCr, single_channel, benchmark
from pytictoc import TicToc
#Gauss_0.001p0.5
#S&P_2%
#BLF_n=10&D0=100
#median_filter_3x3
#JPEG_50
#avionw
#baboonw
#lenaw
#peppersw
#sailboatw
def myFunc(e):
return e[0]
def extract(Media_dir, img_path):
t = TicToc() #create instance of class
t.tic() #Start timer
h=mpimg.imread(img_path);
M=512;N=32;m=4;kT=1;
h=yCbCr.rgb2ycbcr(h);
Yh=h[:,:,0];Cbh=h[:,:,1];Crh=h[:,:,2];
kb=33;kT=1;
bit_seq=single_channel.single_channel_extract(np.double(Yh.copy()),N,m,kb,kT);
xRw=bit_seq[0:N*N*4];xGw=bit_seq[N*N*4:2*N*N*4];xBw=bit_seq[2*N*N*4:3*N*N*4];
z_pad=np.zeros((N*N,4),np.uint8);
xRw=xRw.reshape((N*N,4));xRw=np.concatenate((xRw,z_pad),axis=1);xRw=np.packbits(xRw,axis=1);xRw=xRw.reshape((N,N))+8;
xGw=xGw.reshape((N*N,4));xGw=np.concatenate((xGw,z_pad),axis=1);xGw=np.packbits(xGw,axis=1);xGw=xGw.reshape((N,N))+8;
xBw=xBw.reshape((N*N,4));xBw=np.concatenate((xBw,z_pad),axis=1);xBw=np.packbits(xBw,axis=1);xBw=xBw.reshape((N,N))+8;
iterR=13;iterG=23;iterB=33;
xRw=arnold.iarnold(xRw,iterR);xGw=arnold.iarnold(xGw,iterG);xBw=arnold.iarnold(xBw,iterB);
xw=np.dstack((xRw,xGw,xBw));
xw[xw>255]=255;xw[xw<0]=0;
t.toc() #Time elapsed since t.tic()
files = os.listdir(Media_dir)
lst=[]
for index, file in enumerate(files):
w=pimg.open(os.path.join(Media_dir, file));
w=w.resize((N,N),0);w=copy.deepcopy(np.asarray(w));w=w[:,:,0:3];#plt.imshow(w/255);plt.show();
ben = benchmark.NC(w/255,xw/255);
lst.append([ben, file])
plt.figure(figsize=(3,3));
plt.subplot(1,2,1);plt.imshow(w),plt.title("Logo");
plt.subplot(1,2,2);plt.imshow(xw/255),plt.title("Extracted");
plt.show()
lst.sort(key=myFunc)
lst.reverse()
print(lst)
if (lst[0][0]>=.9):
return lst
return None
#plt.imshow(xw/255);plt.show();
#plt.imsave('D:/Computer/MIST/Level-04/Thesis/Code/paper/Attack/mf_lena.bmp',(xw/255));
``` |
{
"source": "33cn/libbft-go",
"score": 3
} |
#### File: python/async/err.py
```python
class AsyncException (Exception):
"""Base class for async exceptions."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class AsyncXDRException (AsyncException):
"""XDR encode/decode error within async."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class AsyncRPCException (AsyncException):
"""RPC program exception within async."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class AsyncUnionException (AsyncException,UnboundLocalError):
"""Accessing union member that was not switched to."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
```
#### File: python/ex1/ex1srv.py
```python
import async.rpctypes
import async
import ex1
import socket
import sys
import signal
active_srvs = [];
def dispatch (srv, sbp):
print "in dispatch"
if sbp.eof ():
print "EOF !!"
# note: need to do this explicitly, since we have a circular
# data structure: srv -> dispatch (C) -> dispatch (py) -> srv
srv.clearcb ();
active_srvs.remove (srv)
return
print "procno=", sbp.proc ()
#sbp.getarg ().warn ()
if sbp.proc () == ex1.FOO_NULL:
sbp.reply (None)
elif sbp.proc () == ex1.FOO_BAR:
bar = sbp.getarg ()
s = 0
for i in bar.y:
print i
s += i
f = ex1.foo_t ()
f.x = 'the sum is ' + `s`
f.xx = s
sbp.reply (f)
elif sbp.proc () == ex1.FOO_BB:
bb = sbp.getarg ()
r = 0
if bb.aa == ex1.A2:
s = 0
for f in bb.b.foos:
s += f.xx
for y in bb.b.bar.y:
s += y
r = s
elif bb.aa == ex1.A1:
r = bb.f.xx
else:
r = bb.i
res = ex1.foo_opq_t ();
sbp.reply (r)
elif sbp.proc () == ex1.FOO_FOOZ:
arg = sbp.getarg ()
bytes = arg.b;
f = ex1.foo_t ()
f.str2xdr (bytes)
f.warn ()
sbp.reply (sbp.getarg ())
elif sbp.proc () == ex1.FOO_OPQ:
x = ex1.foo_opq_t ();
x.c = '4432we00rwersfdqwer';
sbp.reply (x)
else:
sbp.reject (async.arpc.PROC_UNAVAIL)
def newcon(sock):
print "calling newcon"
nsock, addr = sock.accept ()
print "accept returned; fd=", `nsock.fileno ()`
print "new connection accepted", addr
x = async.arpc.axprt_stream (nsock.fileno (), nsock)
print "returned from axprt_stream init"
srv = async.arpc.asrv (x, ex1.foo_prog_1 (),
lambda y : dispatch (srv, y))
print "returned from asrv init"
active_srvs.append (srv)
port = 3000
if len (sys.argv) > 1:
port = int (sys.argv[1])
sock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
sock.bind (('127.0.0.1', port))
sock.listen (10);
async.core.fdcb (sock.fileno (), async.core.selread, lambda : newcon (sock))
async.util.fixsignals ()
async.core.amain ()
``` |
{
"source": "33du/blogging-with-django",
"score": 2
} |
#### File: posts/templatetags/posts_custom.py
```python
from django import template
from django.utils.html import strip_spaces_between_tags, strip_tags
from django.utils.text import Truncator
register = template.Library()
@register.filter(name='excerpt')
def excerpt_with_ptag_spacing(value):
# remove spaces between tags
#value = strip_spaces_between_tags(value)
# add space before each P end tag (</p>)
value = value.replace("</p>"," </p>")
# strip HTML tags
value = strip_tags(value)
# other usage: return Truncator(value).words(length, html=True, truncate=' see more')
return value
```
#### File: blogging-with-django/posts/views.py
```python
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect, JsonResponse, HttpResponse, HttpResponseBadRequest
from django.urls import reverse
from django.core.paginator import Paginator
from django.core import serializers
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
import markdown
from .models import Post, Comment, Tag, Image
from .forms import CommentForm
from users.forms import LoginForm, RegistrationForm
@csrf_exempt
def home(request):
login_form = LoginForm()
registration_form = RegistrationForm()
image_posts = Post.objects.all().filter(pub_time__lte=timezone.now()).exclude(image__isnull=True).order_by('-pub_time')[:5]
image_list = []
for post in image_posts:
image_list.append(post.image_set.all()[:1].get())
recent_posts = Post.objects.all().filter(pub_time__lte=timezone.now()).order_by('-pub_time')[:5]
recent_comments = Comment.objects.all().order_by('-pub_time')[:5]
context_objects = {
'login_form': login_form,
'registration_form': registration_form,
'image_list': image_list,
'recent_posts': recent_posts,
'recent_comments': recent_comments
}
return render(request, 'home.html', context_objects)
@csrf_exempt
def index(request, tag_name=''):
tag_list = Tag.objects.all().order_by('id')
login_form = LoginForm()
registration_form = RegistrationForm()
if tag_name != '':
tag_chosen = Tag.objects.get(name=tag_name)
post_list = tag_chosen.post_set.all().filter(pub_time__lte=timezone.now()).order_by('-pub_time')
else:
tag_chosen = None
post_list = Post.objects.all().filter(pub_time__lte=timezone.now()).order_by('-pub_time')
for post in post_list:
post.text = markdown.markdown(post.text)
paginator = Paginator(post_list, 10)
if request.is_ajax():
if request.GET.get('page_number'):
# Paginate based on the page number in the GET request
page_number = request.GET.get('page_number')
response = []
try:
post_list = paginator.page(page_number).object_list
for post in post_list:
post_dict = {}
post_dict['pk'] = post.pk
post_dict['title'] = post.title
post_dict['pub_time'] = post.pub_time
post_dict['text'] = post.text
if post.image_set.all():
post_dict['image_url'] = post.image_set.all()[:1].get().url
else:
post_dict['image_url'] = None
response.append(post_dict)
except Exception as e:
print(e)
return HttpResponseBadRequest(content_type="text/json")
return JsonResponse(response, safe=False)
else:
post_list = paginator.page(1).object_list
context_objects = {
'login_form': login_form,
'registration_form': registration_form,
'post_list': post_list,
'tag_list': tag_list,
'tag_chosen': tag_chosen,
}
return render(request, 'posts/index.html', context_objects)
@csrf_exempt
def post_detail(request, post_id):
post = get_object_or_404(Post, pk=post_id)
post.text = markdown.markdown(post.text)
comment_list = post.comment_set.filter(parent_id=None).order_by('-pub_time')
login_form = LoginForm()
registration_form = RegistrationForm()
try:
next = Post.objects.all().filter(pub_time__lte=post.pub_time).exclude(id=post_id).order_by('-pub_time')[0:1].get()
except Post.DoesNotExist:
next = None
try:
prev = Post.objects.all().filter(pub_time__lte=timezone.now(), pub_time__gte=post.pub_time).exclude(id=post_id).order_by('pub_time')[0:1].get()
except Post.DoesNotExist:
prev = None
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
text = form.cleaned_data['text']
alias = form.cleaned_data['alias']
post = Post.objects.get(id=post_id)
if request.user.is_authenticated:
user = request.user
else:
user = None
if form.cleaned_data['parent_id'] != None:
parent_id = Comment.objects.get(id=form.cleaned_data['parent_id'])
else:
parent_id = None
try:
Comment.objects.create(post=post, user=user, text=text, alias=alias, parent_id=parent_id)
except Exception:
response = {
'has_error': True,
'error_msg': "Request failed, please retry."
}
else:
response = {
'has_error': False
}
return JsonResponse(response)
else:
form = CommentForm()
context_objects = {
'login_form': login_form,
'registration_form': registration_form,
'post': post,
'form': form,
'comment_list': comment_list,
'prev': prev,
'next': next,
}
return render(request, 'posts/detail.html', context_objects)
@csrf_exempt
def delete_comment(request):
if request.method == 'POST':
comment_to_delete = Comment.objects.get(id=request.POST['comment_id'])
if request.user.is_authenticated and request.user == comment_to_delete.user:
comment_to_delete.delete()
response = {
'has_error': False
}
elif request.user.is_superuser:
comment_to_delete.delete()
response = {
'has_error': False
}
else:
response = {
'has_error': True
}
return JsonResponse(response)
``` |
{
"source": "33N-Ltd/aws-lambda-log-collector",
"score": 3
} |
#### File: aws-lambda-log-collector/functions/log_collector.py
```python
import boto3, json, time, logging, os, gzip
from datetime import date, datetime, timedelta
from botocore.exceptions import ClientError
s3_bucket_name = os.environ['S3_BUCKET_NAME']
start_number_of_days= 1
end_number_of_days= 0
#############################
timestring = datetime.fromtimestamp(time.time()).strftime('%Y%m%d-%Hh%Mm%Ss')
ts=(int((datetime.today() - timedelta(hours=24 * start_number_of_days)).timestamp())) * 1000
te=(int((datetime.today() - timedelta(hours=24 * end_number_of_days)).timestamp())) * 1000
# logging init
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event,context):
data = json.dumps(event)
logger.info('Starting log collection based on this event:')
logger.info(data)
passnumber = 1
# grab region parameter from our event
aws_region = event['region']
# initialize a logstream object array - this is a list of all log groups (objects) in the account
log_group_object_array = []
# initialize a log group list array - this is the list of log groups that will eventually get processed
log_group_array = []
# init boto3
client = boto3.client('logs', region_name=aws_region)
# get the output of LogGroups api call, "stream" it into an array of objects and then loop through the array to create a list array of logGroupNames
log_group_stream = client.describe_log_groups() #logGroupName=log_group_name, descending=True, limit=50, orderBy='LastEventTime')
log_group_object_array += log_group_stream['logGroups']
# LogGroups API call will only return max 50 results so we need to handle situations where the number of logGroups is greater than 50
while 'nextToken' in log_group_stream:
log_group_stream = client.describe_log_groups(nextToken=log_group_stream['nextToken'])
log_group_object_array += log_group_stream['logGroups']
log_group_name_dict = [stream_lg['logGroupName'] for stream_lg in log_group_object_array]
# If there are logs from many log groups we want to ensure that we stay within lambda execution time limits.
# That is why we will first check that we don't waste time processing log groups which have no new entries.
# Log group will be processed further if it has at least one new event.
print('------------- Lambda log-collector --------------')
print('Preprosessing log groups:')
h = 1
for i in log_group_name_dict:
one_log_stream_object = []
print(str(h) + ' ' + i)
# get the output of DescribeLogStreams API. Get only the logStream with latest entry, "stream" it into an object
one_log_stream = client.describe_log_streams(logGroupName=i, descending=True, limit=1, orderBy='LastEventTime')
one_log_stream_object += one_log_stream['logStreams']
# a log group may exist without any log streams. Make sure a log stream exists. After that loop through the object to get the logStreamName
if one_log_stream_object != []:
one_log_stream_name = [stream_ls['logStreamName'] for stream_ls in one_log_stream_object]
# With the logGroupName and the logStreamName verify that there are log entries for the period. If there are then add the logGroup to the log_group_array
log_entries = client.get_log_events(logGroupName=str(i), logStreamName=one_log_stream_name[0], startTime=ts, endTime=te)
if log_entries['events'] != []:
log_group_array.append(i)
h = h + 1
# Preprocessing finished
print('\n' + 'Log groups which have new entries are: ')
for n in log_group_array:
print(n)
# print(log_group_array)
print('Total ' + str(len(log_group_array)))
# With the final list array (log_group_array) we start the process of gathering log events
for e in log_group_array:
log_collector(str(e), str(aws_region), str(s3_bucket_name), int(passnumber))
passnumber = passnumber + 1
time.sleep(2)
print('Finished processing')
def log_collector(logGroupName, awsRegion, s3BucketName, passNumber):
log_group_name = logGroupName
aws_region = awsRegion
s3_bucket_name = s3BucketName
lgnumber = passNumber
# the name of the s3 object will be transformed to a string not containing forward slashes and not starting with a dash
folder_name = logGroupName.replace("/","-")
if folder_name.startswith('-'):
folder_name = folder_name[1:]
file_name = logGroupName.replace("/","-") + '-' + timestring + '.gz'
if file_name.startswith('-'):
file_name = file_name[1:]
# init boto3 for s3
s3 = boto3.resource('s3')
client = boto3.client('logs', region_name=aws_region)
print('\nFor LogGroup ' + str(lgnumber) + ' ' + logGroupName)
print('Events between: ' + str(ts) + ' and ' + str(te))
print('------------- LogStreamName -------------- : # events')
all_streams = []
stream_batch = client.describe_log_streams(logGroupName=log_group_name, descending=True, limit=50, orderBy='LastEventTime')
all_streams += stream_batch['logStreams']
# LogStreams API call will only return max 50 results at a time so we need to handle situations where the number is greater.
# But since a single log group can, over the years, accumulate tens of thousands of log streams and since we don't want to
# fetch all these old log groups we set the upper limit with (k). Upper limit amounts to 50*(k)=250 in this case.
k = 0
while 'nextToken' in stream_batch and k < 5:
stream_batch = client.describe_log_streams(logGroupName=log_group_name, descending=True, limit=50, orderBy='LastEventTime', nextToken=stream_batch['nextToken'])
all_streams += stream_batch['logStreams']
k = k + 1
stream_names = [stream['logStreamName'] for stream in all_streams]
out_file = []
for stream in stream_names:
logs_batch = client.get_log_events(logGroupName=log_group_name, logStreamName=stream, startTime=ts, endTime=te, startFromHead=True)
for event in logs_batch['events']:
event.update({'group': log_group_name, 'stream': stream})
out_file.append(json.dumps(event))
# GetLogEvents API call will return max 10000 events per log stream. We need a loop if logStream has more events, similarly than in previous loops,
# but this time we need extra logic since GetLogEvents API will ALWAYS return a nextBackwardToken (response token equals request token at the end).
# For this same reason we always go inside the while loop and execute at least the first print statement, make the next logs_batch request and
# check for the length of the events array - if it is 0 then we are at the end and both the loop and the if-clause within the loop equal to false.
while 'nextBackwardToken' in logs_batch and len(logs_batch['events']) != 0:
print(stream, ":", len(logs_batch['events']), " Group total :", len(out_file))
logs_batch = client.get_log_events(logGroupName=log_group_name, logStreamName=stream, startTime=ts, endTime=te, startFromHead=True, nextToken=logs_batch['nextBackwardToken'])
if len(logs_batch['events']) != 0:
for event in logs_batch['events']:
event.update({'group': log_group_name, 'stream': stream})
out_file.append(json.dumps(event))
print(stream, ":", len(logs_batch['events']), " Group total :", len(out_file))
print('-------------------------------------------\nTotal number of events: ' + str(len(out_file)))
print(file_name)
json_str = json.dumps(out_file)
json_bytes = json_str.encode('utf-8')
gzip_object = gzip.compress(json_bytes)
s3object = s3.Object(s3_bucket_name, folder_name + '/' + file_name)
print('Starting the upload of file ' + file_name + ' to s3 bucket ' + s3_bucket_name)
try:
s3object.put(Body=gzip_object)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchUpload':
print("Upload Failed")
else:
print("Log file uploaded to s3\n")
### Local test event
# lambda_handler({'region': 'eu-west-2', 'account': '637085696726'}, {'context'})
``` |
{
"source": "3-3PO/olympus-sdk",
"score": 3
} |
#### File: olympus-sdk/olympus/fixtures.py
```python
import json
import os
THIS_DIR = os.path.join(
os.path.dirname(
os.path.abspath(__file__)
)
)
def load_abis():
abis = {}
abi_dir = os.path.join(THIS_DIR, 'abi')
for fname in os.listdir(abi_dir):
fpath = os.path.join(abi_dir, fname)
with open(fpath, 'r') as f:
abis[fname] = json.loads(f.read())
return abis
def load_addresses():
fname = 'addresses-mainnet.json'
with open(os.path.join(THIS_DIR, fname), 'r') as f:
addresses = json.loads(f.read())
return addresses
abis = load_abis()
addresses = load_addresses()
```
#### File: olympus-sdk/tests/test_account.py
```python
import os
import pytest
from olympus.account import Account
from olympus.consts import ZERO_ACCOUNT
from olympus.exceptions import AccountException
class DummyTransactable:
"""Dummy transactable.
Similar api to an on-chain func from web3 so we don't call the
live stuff when testing.
"""
def call(self):
return
def buildTransaction(self, *args, **kwargs):
return
class RemovePK:
def __enter__(self):
if 'WALLET_PK' in os.environ:
self.tmp_pk = os.getenv('WALLET_PK')
del os.environ['WALLET_PK']
def __exit__(self, type, value, traceback):
if hasattr(self, 'tmp_pk'):
os.environ['WALLET_PK'] = self.tmp_pk
def test_transact_without_pk_raises():
with RemovePK():
with pytest.raises(AccountException):
Account(ZERO_ACCOUNT).transact(DummyTransactable())
```
#### File: olympus-sdk/tests/test_subclasses.py
```python
import pytest
from olympus.router import UniswapV2Router02
from olympus.consts import ZERO_ACCOUNT
class AccountWithClashingNameSpace(UniswapV2Router02):
# Router has an on-chain function with this name
def WETH(self):
pass
def test_clashing_namespace_warns():
with pytest.warns(
UserWarning,
match="WETH already in namespace of <class 'tests.test_subclasses.AccountWithClashingNameSpace'> object."
):
AccountWithClashingNameSpace()
``` |
{
"source": "33r13k1d/jira-auto-transition-py",
"score": 2
} |
#### File: jira-auto-transition-py/tests/conftest.py
```python
from typing import Generator
import pytest
from aioresponses import aioresponses
from fastapi.testclient import TestClient
from app.main import app
@pytest.fixture(scope="module")
def client() -> Generator:
with TestClient(app) as c:
yield c
@pytest.fixture
def aioresps() -> Generator:
with aioresponses() as r:
yield r
``` |
{
"source": "33TU/fstream",
"score": 2
} |
#### File: fstream/protocol/base.py
```python
from asyncio import Transport, Future
from asyncio.exceptions import LimitOverrunError
from types import coroutine
from typing import Any, Awaitable, List, Optional, Union
# Awaitable with instant return
_completed = coroutine(lambda: None if True else (yield))()
class BaseStreamProtocol:
__slots__ = (
'_loop',
'_client_connected_cb',
'_transport',
'_closed',
'_exc',
'_writing_paused',
'_data_future',
'_drain_future',
'_close_future',
'data_buffer'
)
def __init__(self, loop, connected_cb) -> None:
self._loop = loop
self._client_connected_cb = connected_cb
self._transport = None
self._closed = False
self._exc = None
self._writing_paused = False
self._data_future: Optional[Future] = None
self._drain_future: Optional[Future] = None
self._close_future: Optional[Future] = None
self.data_buffer = bytearray()
@property
def transport(self) -> Transport:
return self.transport
def connection_made(self, transport) -> None:
self._transport = transport
if self._client_connected_cb is not None:
self._loop.create_task(self._client_connected_cb(
StreamReader(self),
StreamWriter(self),
))
def connection_lost(self, exc) -> None:
if self._closed: return
self._exc = exc
self._closed = True
if exc is not None:
if self._data_future is not None and not self._data_future.done():
self._data_future.set_exception(exc)
if self._drain_future is not None and not self._drain_future.done():
self._drain_future.set_exception(exc)
if self._close_future is not None and not self._close_future.done():
self._close_future.set_exception(exc)
else:
if self._data_future is not None and not self._data_future.done():
self._data_future.set_result(None)
if self._drain_future is not None and not self._drain_future.done():
self._drain_future.set_result(None)
if self._close_future is not None and not self._close_future.done():
self._close_future.set_result(None)
def pause_writing(self) -> None:
self._writing_paused = True
def resume_writing(self) -> None:
self._writing_paused = False
if self._drain_future is not None:
self._drain_future.set_result(None)
self._drain_future = None
def wait_data_notify(self) -> Awaitable:
if self._closed:
raise self._exc or ConnectionResetError('Connection lost')
if self._data_future is None:
self._data_future = self._loop.create_future()
self._transport.resume_reading()
return self._data_future
def wait_drain_notify(self) -> Awaitable:
if self._closed:
raise self._exc or ConnectionResetError('Connection lost')
if not self._writing_paused:
return _completed
if self._drain_future is None:
self._drain_future = self._loop.create_future()
return self._drain_future
def wait_close_notify(self) -> Awaitable:
if self._closed:
if self._exc is not None:
raise self._exc
else:
return _completed
if self._close_future is None:
self._close_future = self._loop.create_future()
return self._close_future
def get_exception(self) -> Optional[Exception]:
return self._exc
class StreamReader:
__slots__ = ('protocol',)
def __init__(self, protocol: BaseStreamProtocol) -> None:
self.protocol = protocol
async def readuntil(self, separator=b'\n', include_delimiter=True, limit=1024*1024) -> bytearray:
"""
Read data from the stream until ``separator`` is found.
"""
if self.protocol._exc is not None:
raise self.protocol._exc
data_buffer = self.protocol.data_buffer
sep_len = len(separator)
if sep_len == 0:
raise ValueError('Separator should be at least one-byte string')
sep_index = data_buffer.find(separator)
while sep_index == -1:
data_len = len(data_buffer)
if data_len > limit:
raise LimitOverrunError(
'Separator is not found, and chunk exceed the limit', data_len)
await self.protocol.wait_data_notify()
sep_start = 0 if sep_len > data_len else data_len - sep_len
sep_index = data_buffer.find(separator, sep_start)
buffer_len = sep_index + sep_len
buffer = data_buffer[:buffer_len if include_delimiter else sep_index]
del data_buffer[:buffer_len]
return buffer
async def read(self, nbytes: int) -> Union[bytearray, bytes]:
"""
Read max ``nbytes`` about of bytes.
Returns bytearray if ``nbytes`` > 0 otherwise bytes
"""
if self.protocol._exc is not None:
raise self.protocol._exc
if nbytes < 0:
raise ValueError('read size has to be greater than zero')
elif nbytes == 0:
return b''
data_buffer = self.protocol.data_buffer
buffer_len = len(data_buffer)
if buffer_len == 0:
await self.protocol.wait_data_notify()
buffer_len = len(data_buffer)
read_len = nbytes if nbytes < buffer_len else buffer_len
buffer = data_buffer[:read_len]
del data_buffer[:read_len]
return buffer
async def readexactly(self, nbytes: int) -> Union[bytearray, bytes]:
"""
Read exactly ``nbytes`` about of bytes.
Returns bytearray if ``nbytes`` > 0 otherwise bytes
"""
if self.protocol._exc is not None:
raise self.protocol._exc
if nbytes < 0:
raise ValueError('readexactly size can not be less than zero')
elif nbytes == 0:
return b''
data_buffer = self.protocol.data_buffer
while len(data_buffer) < nbytes:
await self.protocol.wait_data_notify()
buffer = data_buffer[:nbytes]
del data_buffer[:nbytes]
return buffer
async def readlen(self, limit: int = 1024*1024, endian='little') -> Union[bytearray, bytes]:
"""
Reads length prefixed message from the stream.
[u32: length | payload bytes ]
"""
if self.protocol._exc is not None:
raise self.protocol._exc
if limit < 0:
raise ValueError('limit size has to be greater than zero')
data_buffer = self.protocol.data_buffer
while len(data_buffer) < 4:
await self.protocol.wait_data_notify()
buffer_len = int.from_bytes(data_buffer[:4], endian)
if buffer_len > limit:
raise LimitOverrunError('buffer length exceed the limit', buffer_len)
elif buffer_len == 0:
del data_buffer[:4]
return b''
read_len = buffer_len + 4
while len(data_buffer) < read_len:
await self.protocol.wait_data_notify()
buffer = data_buffer[4:read_len]
del data_buffer[:read_len]
return buffer
class StreamWriter:
__slots__ = ('protocol',)
def __init__(self, protocol: BaseStreamProtocol) -> None:
self.protocol = protocol
def close(self) -> None:
self.protocol._transport.close()
def is_closing(self) -> bool:
return self.protocol._transport.is_closing()
def can_write_eof(self) -> bool:
return self.protocol._transport.can_write_eof()
def get_extra_info(self, name, default=None) -> Any:
return self.protocol._transport.get_extra_info(name, default)
def write(self, buffer: Union[bytes, bytearray]) -> None:
self.protocol._transport.write(buffer)
def writelines(self, buffers: List[Any]) -> None:
self.protocol._transport.writelines(buffers)
def writelen(self, buffer: Union[bytes, bytearray], endian='little') -> None:
"""
Writes length prefixed message to stream.
[u32: length | payload bytes ]
"""
self.protocol._transport.write(len(buffer).to_bytes(4, endian))
self.protocol._transport.write(buffer)
def write_eof(self) -> None:
return self.protocol._transport.write_eof()
def drain(self) -> Awaitable:
return self.protocol.wait_drain_notify()
def wait_closed(self) -> Awaitable:
return self.protocol.wait_close_notify()
``` |
{
"source": "3401797899/Django-API-Utils",
"score": 3
} |
#### File: Django-API-Utils/utils/response.py
```python
import json
from django.http import HttpResponse
from utils.response_status import ResponseStatus
class Response(HttpResponse):
"""
HttpResponse 的包装类
将响应状态 status 与响应数据 data 生成 JSON 格式的响应内容
其中如果 status 不存在或类型错误, 则以意外错误作为响应状态
Example:
return Response(ResponseStatus.OK)
data = {'key': 'value'}
return Response(ResponseStatus.OK, data)
"""
def __init__(self, status: ResponseStatus, data=None):
"""
:param status: 返回的状态类
:param data: 返回的数据
"""
content = {}
if not status or not isinstance(status, ResponseStatus):
status = ResponseStatus.UNEXPECTED_ERROR
content['code'] = status.code
content['msg'] = status.msg
if status == ResponseStatus.OK and data is not None:
content['data'] = data
content = json.dumps(content)
super().__init__(content=content,
content_type='application/json',
status=200,
charset='utf-8')
``` |
{
"source": "34127chi/text_matching",
"score": 2
} |
#### File: text_matching/esim/graph.py
```python
import tensorflow as tf
from esim import args
class Graph:
def __init__(self):
self.p = tf.placeholder(dtype=tf.int32, shape=(None, args.seq_length), name='p')
self.p_mask = tf.cast(tf.math.equal(self.p, 0), tf.float32)
self.h = tf.placeholder(dtype=tf.int32, shape=(None, args.seq_length), name='h')
self.h_mask = tf.cast(tf.math.equal(self.h, 0), tf.float32)
self.y = tf.placeholder(dtype=tf.int32, shape=None, name='y')
self.keep_prob = tf.placeholder(dtype=tf.float32, name='drop_rate')
self.embedding = tf.get_variable(dtype=tf.float32, shape=(args.vocab_size, args.char_embedding_size),
name='embedding')
self.forward()
def dropout(self, x):
return tf.nn.dropout(x, keep_prob=self.keep_prob)
def bilstm(self, x, hidden_size):
fw_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_size)
bw_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_size)
return tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, x, dtype=tf.float32)
def forward(self):
p_embedding = tf.nn.embedding_lookup(self.embedding, self.p)
h_embedding = tf.nn.embedding_lookup(self.embedding, self.h)
with tf.variable_scope("lstm_p", reuse=tf.AUTO_REUSE):
(p_f, p_b), _ = self.bilstm(p_embedding, args.embedding_hidden_size)
with tf.variable_scope("lstm_p", reuse=tf.AUTO_REUSE):
(h_f, h_b), _ = self.bilstm(h_embedding, args.embedding_hidden_size)
p = tf.concat([p_f, p_b], axis=2)
h = tf.concat([h_f, h_b], axis=2)
p = self.dropout(p)
h = self.dropout(h)
e = tf.matmul(p, tf.transpose(h, perm=[0, 2, 1]))
a_attention = tf.nn.softmax(e + tf.tile(tf.expand_dims(self.h_mask*(-2**32 + 1),1), [1, tf.shape(e)[1],1]))#batch_size seq_len seq_len
b_attention = tf.nn.softmax(tf.transpose(e, perm=[0, 2, 1]) + tf.tile(tf.expand_dims(self.p_mask*(-2**32 + 1),1), [1, tf.shape(tf.transpose(e, perm=[0, 2, 1]))[1],1]))#
a = tf.matmul(a_attention, h)
b = tf.matmul(b_attention, p)
m_a = tf.concat((a, p, a - p, tf.multiply(a, p)), axis=2)
m_b = tf.concat((b, h, b - h, tf.multiply(b, h)), axis=2)
with tf.variable_scope("lstm_a", reuse=tf.AUTO_REUSE):
(a_f, a_b), _ = self.bilstm(m_a, args.context_hidden_size)
with tf.variable_scope("lstm_a", reuse=tf.AUTO_REUSE):
(b_f, b_b), _ = self.bilstm(m_b, args.context_hidden_size)
a = tf.concat((a_f, a_b), axis=2)#batch_size seq_len 2*hidden_size
b = tf.concat((b_f, b_b), axis=2)#batch_size seq_len 2*hidden_size
a = self.dropout(a)
b = self.dropout(b)
a_avg = tf.reduce_mean(a, axis=1)#batch_size 2*hidden_size
b_avg = tf.reduce_mean(b, axis=1)#batch_size 2*hidden_size
a_max = tf.reduce_max(a, axis=1)#batch_size 2*hidden_size
b_max = tf.reduce_max(b, axis=1)#batch_size 2*hidden_size
v = tf.concat((a_avg, a_max, b_avg, b_max), axis=1)
v = tf.layers.dense(v, 512, activation='tanh')
v = self.dropout(v)
logits = tf.layers.dense(v, 2, activation='tanh')
self.prob = tf.nn.softmax(logits)
self.prediction = tf.argmax(logits, axis=1)
self.train(logits)
def train(self, logits):
y = tf.one_hot(self.y, args.class_size)
loss = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits)
self.loss = tf.reduce_mean(loss)
self.train_op = tf.train.AdamOptimizer(args.learning_rate).minimize(self.loss)
correct_prediction = tf.equal(tf.cast(self.prediction, tf.int32), self.y)
self.acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
``` |
{
"source": "343695222/QQZoneMood",
"score": 3
} |
#### File: src/util/util.py
```python
import time
import os
import pandas as pd
import re
# %a 星期的简写。如 星期三为Web
# %A 星期的全写。如 星期三为Wednesday
# %b 月份的简写。如4月份为Apr
# %B 月份的全写。如4月份为April
# %c: 日期时间的字符串表示。(如: 04/07/10 10:43:39)
# %d: 日在这个月中的天数(是这个月的第几天)
# %f: 微秒(范围[0,999999])
# %H: 小时(24小时制,[0, 23])
# %I: 小时(12小时制,[0, 11])
# %j: 日在年中的天数 [001,366](是当年的第几天)
# %m: 月份([01,12])
# %M: 分钟([00,59])
# %p: AM或者PM
# %S: 秒(范围为[00,61],为什么不是[00, 59],参考python手册~_~)
# %U: 周在当年的周数当年的第几周),星期天作为周的第一天
# %w: 今天在这周的天数,范围为[0, 6],6表示星期天
# %W: 周在当年的周数(是当年的第几周),星期一作为周的第一天
# %x: 日期字符串(如:04/07/10)
# %X: 时间字符串(如:10:43:39)
# %y: 2个数字表示的年份
# %Y: 4个数字表示的年份
# %z: 与utc时间的间隔 (如果是本地时间,返回空字符串)
# %Z: 时区名称(如果是本地时间,返回空字符串)
# %%: %% => %
# Oct 19, 2017 12:00:00 AM
# May 27, 2015 12:00:00 AM
def get_short_date(date):
time_array = time.strptime(date, "%Y-%m-%d")
return time.strftime("%Y%m%d", time_array)
def get_standard_date(date):
time_array = time.strptime(date, "%b %d, %Y %X %p")
return time.strftime("%Y-%m-%d", time_array)
def get_standard_date2(date):
time_array = time.strptime(date, "%Y-%m-%d %X")
return time.strftime("%Y-%m-%d", time_array)
# 将字符串时间转换为时间戳
def get_mktime(date_string):
return time.mktime(time.strptime(date_string, '%Y-%m-%d'))
# 将字符串时间转换为时间戳
def get_mktime2(date_string):
return time.mktime(time.strptime(date_string, '%Y年%m月%d日'))
# 将时间戳转化为标准时间
def get_standard_time_from_mktime(mktime):
return time.strftime("%Y-%m-%d", time.localtime(mktime))
def get_standard_time_from_mktime2(mktime):
temp = time.strftime("%Y-%m-%d", time.localtime(mktime))
return get_mktime(temp)
def get_full_time_from_mktime(mktime):
return time.strftime("%Y-%m-%d %X", time.localtime(mktime))
def get_month(date):
time_array = time.strptime(str(date), "%Y-%m-%d")
return time.strftime("%Y-%m", time_array)
def check_dir_exist(dir):
if os.path.exists(dir) == False:
os.makedirs(dir)
def open_file_list(path, open_data_frame = False):
path_dir = os.listdir(path)
if open_data_frame:
df = pd.DataFrame()
else:
page_list = []
for dir in path_dir:
print('open dir:', dir, '...')
file_name = path + dir
if open_data_frame:
data_df = do_read_csv(file_name)
df = pd.concat([df, data_df], axis=0)
else:
data = do_open_file(file_name=file_name)
page_list.append(data)
if open_data_frame:
return df
else:
return page_list
def do_open_file(file_name):
with open(file_name, 'r', encoding='utf-8') as r:
try:
data = r.read()
print(file_name)
return data
except BaseException as e:
format_error(e, file_name + "file error")
def get_file_full_path(path):
path_dir = os.listdir(path)
file_name_list = []
for dir in path_dir:
file_name = path + dir
file_name_list.append(file_name)
return file_name_list
def get_file_list(path):
return os.listdir(path)
def do_read_csv(file_name):
if file_name.find('.csv') != -1:
data = pd.read_csv(file_name)
return data
elif file_name.find('.xlsx') != -1:
data = pd.read_excel(file_name)
return data
else:
return pd.DataFrame()
def format_error(e, msg=""):
print('ERROR===================')
print(e)
print(msg)
print('ERROR===================')
def date_to_millis(d):
return int(time.mktime(d.timetuple())) * 1000
def remove_waste_emoji(text):
text = re.subn(re.compile('\[em\].*?\[\/em\]'), '', text)[0]
text = re.subn(re.compile('@\{.*?\}'), '', text)[0]
return text
if __name__ =='__main__':
print(get_mktime('2018-09-6'))
print(get_mktime('2018-9-06'))
print(get_full_time_from_mktime(1566545874))
``` |
{
"source": "343829084/flask_practice",
"score": 2
} |
#### File: 343829084/flask_practice/hello.py
```python
import os
from flask import Flask, render_template, request, flash, redirect, url_for, flash, session
from flask_script import Manager
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, BooleanField, PasswordField, IntegerField, TextField, FormField, SelectField, FieldList
from wtforms.validators import DataRequired, Length
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
import pymysql
#import config #把上面的配置文件导入进来
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.secret_key = 'dev'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:root@localhost:3306/testflask'
# 动态追踪修改设置,如未设置只会提示警告
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] =False
#查询时会显示原始SQL语句
app.config['SQLALCHEMY_ECHO'] = True
#app.config.from_object(config)
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
manager = Manager(app)
db.create_all()
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role %r>' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __repr__(self):
return '<User %r>' % self.username
@app.shell_context_processor
def make_shell_context():
return dict(db=db, User=User, Role=Role)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(1, 20)])
password = PasswordField('Password', validators=[DataRequired(), Length(5, 20)])
remember = BooleanField('Remember me')
submit = SubmitField()
# 主页
@app.route('/', methods=['POST', 'GET'])
def index():
return render_template('index.html')
# 判断是否已经登陆,如果还没有登陆,退回到登录界面
username = session.get('username')
# 如果还没有登录,就返回登录页面
if username == None:
return redirect(url_for('login'))
# 从数据库中获取展示数据
#data = db.show()
#return render_template('index.html', all_message=data, user=username)
return render_template('index.html', user=username)
@app.route('/login', methods=['GET', 'POST'])
def login():
app.logger.debug('A value for debugging')
form = LoginForm()
if form.validate_on_submit():
app.logger.debug('uasername=%s, pwd=%s', form.username.data, form.password.data)
#code = form['username']
#api = form['password']
if form.username.data != 'admin' or form.password.data != '<PASSWORD>':
return render_template('login.html', form=form, error="username or pwd is error!")
else:
return redirect(url_for('index'))
return render_template('login.html', form=form, error='')
if __name__ == '__main__':
#app.run(debug=True, host='0.0.0.0')
manager.run()
``` |
{
"source": "343GuiltySpark-04/-cautious-tribble-",
"score": 4
} |
#### File: 343GuiltySpark-04/-cautious-tribble-/perc_finder.py
```python
import menu
import sub_menu
def perc_find():
print("Percentage finder.")
user_input_1 = float(input("Enter a percentage: "))
user_input_2 = float(input("Enter a number: "))
temp_input_1 = 0
if user_input_1 < 1:
temp_input_1 = user_input_1 * 100
else:
temp_input_1 = user_input_1
if user_input_1 < 0:
print("Invalid input!")
perc_find()
elif user_input_2 < 0:
print("Invalid input!")
perc_find()
if user_input_1 >= 1:
user_input_1 = user_input_1 / 100
perc = user_input_1 * user_input_2
print(temp_input_1, "% of", user_input_2, "is", perc)
sub_menu.end()
``` |
{
"source": "343max/led_lamp",
"score": 3
} |
#### File: 343max/led_lamp/settings.py
```python
import json
import os
from typing import Optional, Tuple
state_json = os.path.dirname(os.path.abspath(__file__)) + '/state.json'
def store_scene(scene_name: str, on: bool):
with open(state_json, 'w') as json_file:
json.dump({'scene_name': scene_name, 'on': on}, json_file)
def load_scene() -> Tuple[Optional[str], bool]:
try:
with open(state_json) as json_file:
state = json.load(json_file)
return (state['scene_name'], state['on'] if 'on' in state else False)
except FileNotFoundError:
return (None, False)
except json.decoder.JSONDecodeError:
return (None, False)
``` |
{
"source": "344399889/ranblog",
"score": 2
} |
#### File: 344399889/ranblog/fabfile.py
```python
from datetime import datetime
from fabric.api import *
# 登录用户和主机名:
env.user = 'root'
env.hosts = ['192.168.127.12'] # 如果有多个主机,fabric会自动依次部署
def pack():
' 定义一个pack任务 '
# 打一个tar包:
tar_files = ['*.py', 'static/*', 'templates/*']
#local('rm -f example.tar.gz')
local('tar -czvf example.tar.gz --exclude=\'*.tar.gz\' --exclude=\'fabfile.py\' %s' % ' '.join(tar_files))
def deploy():
' 定义一个部署任务 '
# 远程服务器的临时文件:
remote_tmp_tar = '/tmp/example.tar.gz'
tag = datetime.now().strftime('%Y-%M-%d_%H.%M.%S')
run('rm -f %s' % remote_tmp_tar)
# 上传tar文件至远程服务器:
put('example.tar.gz', remote_tmp_tar)
# 解压:
remote_dist_dir = '/srv/www.example.com-%s' % tag
remote_dist_link = '/srv/www.example.com'
run('mkdir %s' % remote_dist_dir)
with cd(remote_dist_dir):
run('tar -xzvf %s' % remote_tmp_tar)
# 设定新目录的www-data权限:
# run('chown -R www-data:www-data %s' % remote_dist_dir)
# 删除旧的软链接:
run('rm -f %s' % remote_dist_link)
# 创建新的软链接指向新部署的目录:
run('ln -s %s %s' % (remote_dist_dir, remote_dist_link))
# run('chown -R www-data:www-data %s' % remote_dist_link)
# 重启fastcgi:
# fcgi = '/etc/init.d/py-fastcgi'
# with settings(warn_only=True):
# run('%s stop' % fcgi)
# run('%s start' % fcgi)
``` |
{
"source": "345074893/text-sentiment-transfer-based-on-keywords",
"score": 2
} |
#### File: text-sentiment-transfer-based-on-keywords/Gan_architecture/trans_trans.py
```python
from tensorflow.python.ops import tensor_array_ops, control_flow_ops
from utils.ops import *
import texar as tx
from Gan_architecture import trans_config
from texar.modules import TransformerEncoder, TransformerDecoder, MLPTransformConnector, GumbelSoftmaxEmbeddingHelper
from texar.utils import transformer_utils
import numpy as np
#The generator network based on the Relational Memory
def generator(text_ids, text_keyword_id, text_keyword_length, labels, text_length, temperature, vocab_size, batch_size,
seq_len, gen_emb_dim, mem_slots, head_size, num_heads,hidden_dim, start_token):
is_target = tf.to_float(tf.not_equal(text_ids[:, 1:], 0))
# Source word embedding
src_word_embedder = tx.modules.WordEmbedder(
vocab_size=vocab_size, hparams=trans_config.emb)
src_word_embeds = src_word_embedder(text_keyword_id)
src_word_embeds = src_word_embeds * trans_config.hidden_dim ** 0.5
# Position embedding (shared b/w source and target)
pos_embedder = tx.modules.SinusoidsPositionEmbedder(
position_size=seq_len,
hparams=trans_config.position_embedder_hparams)
# src_seq_len = batch_data['text_keyword_length']
src_pos_embeds = pos_embedder(sequence_length=seq_len)
src_input_embedding = src_word_embeds + src_pos_embeds
encoder = TransformerEncoder(hparams=trans_config.encoder)
encoder_output = encoder(inputs=src_input_embedding,
sequence_length=text_keyword_length)
# modify sentiment label
label_connector = MLPTransformConnector(output_size=trans_config.hidden_dim)
labels = tf.to_float(tf.reshape(labels, [-1, 1]))
c = tf.reshape(label_connector(labels), [batch_size, 1, 512])
c_ = tf.reshape(label_connector(1-labels), [batch_size, 1, 512])
encoder_output = tf.concat([c, encoder_output[:, 1:, :]], axis=1)
encoder_output_ = tf.concat([c_, encoder_output[:, 1:, :]], axis=1)
# The decoder ties the input word embedding with the output logit layer.
# As the decoder masks out <PAD>'s embedding, which in effect means
# <PAD> has all-zero embedding, so here we explicitly set <PAD>'s embedding
# to all-zero.
tgt_embedding = tf.concat(
[tf.zeros(shape=[1, src_word_embedder.dim]),
src_word_embedder.embedding[1:, :]],
axis=0)
tgt_embedder = tx.modules.WordEmbedder(tgt_embedding)
tgt_word_embeds = tgt_embedder(text_ids)
tgt_word_embeds = tgt_word_embeds * trans_config.hidden_dim ** 0.5
tgt_seq_len = text_length
tgt_pos_embeds = pos_embedder(sequence_length=tgt_seq_len)
tgt_input_embedding = tgt_word_embeds + tgt_pos_embeds
_output_w = tf.transpose(tgt_embedder.embedding, (1, 0))
decoder = TransformerDecoder(vocab_size=vocab_size,
output_layer=_output_w,
hparams=trans_config.decoder)
# For training
outputs = decoder(
memory=encoder_output,
memory_sequence_length=text_keyword_length,
inputs=tgt_input_embedding,
decoding_strategy='train_greedy',
mode=tf.estimator.ModeKeys.TRAIN
)
mle_loss = transformer_utils.smoothing_cross_entropy(
outputs.logits[:, :-1, :], text_ids[:, 1:], vocab_size, trans_config.loss_label_confidence)
pretrain_loss = tf.reduce_sum(mle_loss * is_target) / tf.reduce_sum(is_target)
# Gumbel-softmax decoding, used in training
start_tokens = np.ones(batch_size, int)
end_token = int(2)
gumbel_helper = GumbelSoftmaxEmbeddingHelper(
tgt_embedding, start_tokens, end_token, temperature)
gumbel_outputs, sequence_lengths = decoder(
memory=encoder_output_,
memory_sequence_length=text_keyword_length,
helper=gumbel_helper
)
# max_index = tf.argmax(gumbel_outputs.logits, axis=2)
# gen_x_onehot_adv = tf.one_hot(max_index, vocab_size, sentiment.1.0, 0.0)
gen_o = tf.reduce_sum(tf.reduce_max(gumbel_outputs.logits, axis=2))
return gumbel_outputs.logits, gumbel_outputs.sample_id, pretrain_loss, gen_o
# The discriminator network based on the CNN classifier
def discriminator(x_onehot, load_wordvec, id2word, batch_size, seq_len, vocab_size, dis_emb_dim, num_rep, sn):
# get the embedding dimension for each presentation
emb_dim_single = int(dis_emb_dim / num_rep)
assert isinstance(emb_dim_single, int) and emb_dim_single > 0
filter_sizes = [2, 3, 4, 5]
num_filters = [300, 300, 300, 300]
dropout_keep_prob = 0.75
while load_wordvec:
embed = read_wordvec('data/glove.twitter.27B.100d.txt', id2word)
load_wordvec = False
d_embeddings = tf.get_variable('d_emb', shape=[vocab_size, dis_emb_dim],
initializer=tf.constant_initializer(embed))
input_x_re = tf.reshape(x_onehot, [-1, vocab_size])
emb_x_re = tf.matmul(input_x_re, d_embeddings)
emb_x = tf.reshape(emb_x_re, [batch_size, seq_len, dis_emb_dim]) # batch_size x seq_len x dis_emb_dim
emb_x_expanded = tf.expand_dims(emb_x, -1) # batch_size x seq_len x dis_emb_dim x sentiment.1
print('shape of emb_x_expanded: {}'.format(emb_x_expanded.get_shape().as_list()))
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for filter_size, num_filter in zip(filter_sizes, num_filters):
conv = conv2d(emb_x_expanded, num_filter, k_h=filter_size, k_w=emb_dim_single,
d_h=1, d_w=emb_dim_single, sn=sn, stddev=None, padding='VALID',
scope="conv-%s" % filter_size) # batch_size x (seq_len-k_h+sentiment.1) x num_rep x num_filter
out = tf.nn.relu(conv, name="relu")
pooled = tf.nn.max_pool(out, ksize=[1, seq_len - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1], padding='VALID',
name="pool") # batch_size x sentiment.1 x num_rep x num_filter
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = sum(num_filters)
h_pool = tf.concat(pooled_outputs, 3) # batch_size x sentiment.1 x num_rep x num_filters_total
print('shape of h_pool: {}'.format(h_pool.get_shape().as_list()))
h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
# Add highway
h_highway = highway(h_pool_flat, h_pool_flat.get_shape()[1], 1, 0) # (batch_size*num_rep) x num_filters_total
# Add dropout
h_drop = tf.nn.dropout(h_highway, dropout_keep_prob, name='dropout')
# fc
fc_out = linear(h_drop, output_size=100, use_bias=True, sn=sn, scope='fc')
is_real_logits = linear(fc_out, output_size=1, use_bias=True, sn=sn, scope='logits')
# sentiment_logits = linear(fc_out, output_size=2, use_bias=True, sn=sn, scope='sentiment_logits')
#
# sentiment_prob = tf.nn.softmax(sentiment_logits, axis=2)
# sentimen_class = tf.arg_max(sentiment_prob)
is_real_logits = tf.squeeze(is_real_logits, -1) # batch_size*num_rep
# return is_real_logits, sentiment_prob, sentimen_class
return is_real_logits
``` |
{
"source": "3453-315h/Profil3r",
"score": 3
} |
#### File: modules/forum/hackernews.py
```python
from profil3r.app.search import search_get
from bs4 import BeautifulSoup
import time
class Hackernews:
def __init__(self, config, permutations_list):
# 1000 ms
self.delay = config['plateform']['hackernews']['rate_limit'] / 1000
# https://news.ycombinator.com/user?id={username}
self.format = config['plateform']['hackernews']['format']
self.permutations_list = permutations_list
# Forum
self.type = config['plateform']['hackernews']['type']
# Generate all potential hackernews usernames
def possible_usernames(self):
possible_usernames = []
for permutation in self.permutations_list:
possible_usernames.append(self.format.format(
permutation = permutation,
))
return possible_usernames
def search(self):
hackernews_usernames = {
"type": self.type,
"accounts": []
}
possible_usernames_list = self.possible_usernames()
for username in possible_usernames_list:
r = search_get(username)
if not r:
continue
# If the account exists
if r.text.find("No such user.") != 0:
# Account object
account = {}
# Get the username
account["value"] = username
# Parse HTML response content with beautiful soup
soup = BeautifulSoup(r.text, 'html.parser')
# Scrape the user informations
try:
user_creation_date = str(soup.find_all("table")[2].find_all("td")[3].get_text()) if soup.find_all("table") else None
user_karma = str(soup.find_all("table")[2].find_all("td")[5].get_text()) if soup.find_all("table") else None
account["creation_date"] = {"name": "Creation Date", "value": user_creation_date}
account["karma"] = {"name": "Karma", "value": user_karma}
except:
pass
# Append the account to the accounts table
hackernews_usernames["accounts"].append(account)
time.sleep(self.delay)
return hackernews_usernames
```
#### File: modules/gaming/leagueoflegends.py
```python
from profil3r.app.search import search_get
from bs4 import BeautifulSoup
import time
class Leagueoflegends:
def __init__(self, config, permutations_list):
# 1000 ms
self.delay = config['plateform']['leagueoflegends']['rate_limit'] / 1000
# op.gg/summoner/userName={permutation}
self.format = config['plateform']['leagueoflegends']['format']
# League of legends usernames are not case sensitive
self.permutations_list = permutations_list
# Gaming
self.type = config['plateform']['leagueoflegends']['type']
# Servers
self.servers = config['plateform']['leagueoflegends']['servers']
# Generate all potential league of legends usernames
def possible_usernames(self):
possible_usernames = []
for permutation in self.permutations_list:
possible_usernames.append(self.format.format(
permutation=permutation,
))
return possible_usernames
def search(self):
leagueoflegends_usernames = {
"type": self.type,
"accounts": []
}
possible_usernames_list = self.possible_usernames()
for username in possible_usernames_list:
for server in self.servers:
# {subdomain}{username}
url = server["url"].format(username)
r = search_get(url)
if not r:
continue
if r.status_code == 200:
# Account object
account = {}
# Get the URL
account["value"] = url
# Parse HTML response content with beautiful soup
soup = BeautifulSoup(r.text, 'html.parser')
# Scrape the user informations
try:
user_username = str(soup.find_all(class_="Name")[0].get_text()) if soup.find_all(class_="Name") else None
user_elo_score = str(soup.find_all(class_="TierRank")[0].get_text()) if soup.find_all(class_="TierRank") else None
user_last_connection = str(soup.find_all(class_="TimeStamp")[0].find_all(class_="_timeago")[0].get_text()) if soup.find_all(class_="TimeStamp") else None
# If the account exists
if user_username:
account["user_username"] = {"name": "Name", "value": user_username}
account["user_elo"] = {"name": "Elo", "value": user_elo_score}
account["user_last_connection"] = {"name": "Last Connection", "value": user_last_connection}
# Append the account to the accounts table
leagueoflegends_usernames["accounts"].append(account)
except:
pass
time.sleep(self.delay)
return leagueoflegends_usernames
``` |
{
"source": "3454394124/sayhello",
"score": 4
} |
#### File: sayhello/hellopy/minlcm.py
```python
def lcm(x, y):
#获取最大的数
if x > y:
greater = x
else:
greater = y
while (True):
if(greater % x == 0) and (greater % y == 0 ):
lcm = greater
break
greater +=1
return lcm
num1 = int(input('请输入第一个整数:'))
num2 = int(input('请输入第二个整数:'))
print(num1, '和', num2, '的最小公倍数为:', lcm(num1, num2))
``` |
{
"source": "346pro/Twispy",
"score": 3
} |
#### File: Twispy/twispy/handler.py
```python
import json
import os
from collections import OrderedDict
from typing import List, Dict, Union
from twispy.request import Request
with open(os.path.abspath(os.path.dirname(__file__)) + "/api.json", "rb") as f:
api_dict = json.loads(f.read().decode())
class API:
__slots__ = list(api_dict.keys()) + ["_request", "_do", "streaming"]
def __init__(self, ck, cs, at, ats, uuid=None, deviceId=None):
self._request = Request(ck, cs, at, ats, uuid, deviceId)
self._do = self._request.do
self.streaming = self._request.streaming
def __getattr__(self, name):
def func(**kwargs) -> Union[List, Dict]:
if name in api_dict:
api = api_dict[name]
data = OrderedDict()
for array in api["data"]:
key, value = array[0:2]
data[key] = value
if key in kwargs:
data[key] = str(kwargs[key])
if data[key] == False:
# optional argument
del data[key]
continue
if data[key] == None:
# necessary argument
raise Exception("{} must have non-null parameter.".format(key))
result = self._do(api["method"], api["url"], data, headerType=api["headerType"], authorizationType=api["authorizationType"])
return result
raise AttributeError("No such a method found.")
return func
@staticmethod
def default_callback(stream) -> None:
print(json.dumps(stream, indent=4))
def create_poll(self, text: str, choices: List[str], minutes=1440) -> Dict:
"""
Create Twitter poll tweet. CK/CS must be Twitter Official Keys.
:param text: Tweet content
:param choices: List[str]:
:param minutes: how long this poll lasts (minute)
:return: status object
"""
if len(choices) not in [2, 3, 4]:
raise Exception("choices must has 2 to 4")
params = OrderedDict()
for i in range(len(choices)):
params["twitter:string:choice{}_label".format(i + 1)] = choices[i]
params["twitter:api:api:endpoint"] = "1"
params["twitter:card"] = "poll{}choice_text_only".format(len(choices))
params["twitter:long:duration_minutes"] = minutes
r = self.cards_create(
card_data=json.dumps(params)
)
if "card_uri" not in r:
raise Exception("API returned an error.\nAPI response: {}\n".format(repr(r)))
return self.statuses_update(
status=text,
card_uri=r["card_uri"]
)
``` |
{
"source": "346/pulumi-spotinst",
"score": 2
} |
#### File: python/pulumi_spotinst/subscription.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from . import utilities, tables
class Subscription(pulumi.CustomResource):
endpoint: pulumi.Output[str]
"""
The endpoint the notification will be sent to: url in case of `"http"`/`"https"`, email address in case of `"email"`/`"email-json"`, sns-topic-arn in case of `"aws-sns"`.
"""
event_type: pulumi.Output[str]
"""
The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`, `"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`, `"GROUP_UPDATED"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EMR_PROVISION_TIMEOUT"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`.
"""
format: pulumi.Output[dict]
"""
The format of the notification content (JSON Format - Key+Value). Valid values: `"%instance-id%"`, `"%event%"`, `"%resource-id%"`, `"%resource-name%"`.
"""
protocol: pulumi.Output[str]
"""
The protocol to send the notification. Valid values: `"http"`, `"https"`, `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
"""
resource_id: pulumi.Output[str]
"""
Spotinst Resource ID (Elastigroup ID).
"""
def __init__(__self__, resource_name, opts=None, endpoint=None, event_type=None, format=None, protocol=None, resource_id=None, __name__=None, __opts__=None):
"""
Provides a Spotinst subscription resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] endpoint: The endpoint the notification will be sent to: url in case of `"http"`/`"https"`, email address in case of `"email"`/`"email-json"`, sns-topic-arn in case of `"aws-sns"`.
:param pulumi.Input[str] event_type: The event to send the notification when triggered. Valid values: `"AWS_EC2_INSTANCE_TERMINATE"`, `"AWS_EC2_INSTANCE_TERMINATED"`, `"AWS_EC2_INSTANCE_LAUNCH"`, `"AWS_EC2_INSTANCE_UNHEALTHY_IN_ELB"`, `"GROUP_ROLL_FAILED"`, `"GROUP_ROLL_FINISHED"`, `"CANT_SCALE_UP_GROUP_MAX_CAPACITY"`, `"GROUP_UPDATED"`, `"AWS_EC2_CANT_SPIN_OD"`, `"AWS_EMR_PROVISION_TIMEOUT"`, `"AWS_EC2_INSTANCE_READY_SIGNAL_TIMEOUT"`.
:param pulumi.Input[dict] format: The format of the notification content (JSON Format - Key+Value). Valid values: `"%instance-id%"`, `"%event%"`, `"%resource-id%"`, `"%resource-name%"`.
:param pulumi.Input[str] protocol: The protocol to send the notification. Valid values: `"http"`, `"https"`, `"email"`, `"email-json"`, `"aws-sns"`, `"web"`.
:param pulumi.Input[str] resource_id: Spotinst Resource ID (Elastigroup ID).
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if endpoint is None:
raise TypeError('Missing required property endpoint')
__props__['endpoint'] = endpoint
if event_type is None:
raise TypeError('Missing required property event_type')
__props__['event_type'] = event_type
__props__['format'] = format
if protocol is None:
raise TypeError('Missing required property protocol')
__props__['protocol'] = protocol
if resource_id is None:
raise TypeError('Missing required property resource_id')
__props__['resource_id'] = resource_id
super(Subscription, __self__).__init__(
'spotinst:index/subscription:Subscription',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
``` |
{
"source": "34700/OBS-Studio-Cursor-skin",
"score": 3
} |
#### File: 34700/OBS-Studio-Cursor-skin/mouse_skin_obs.py
```python
import obspython as obs
from pynput.mouse import Controller # python -m pip install pynput
c = Controller()
get_position = lambda: c.position
__version__ = "1.0.0"
def apply_scale(x, y, width, height):
width = round(width * x)
height = round(height * y)
return width, height
def lerp(minVal, maxVal, k):
val = minVal + ((maxVal - minVal)*k)
return val
class CursorAsSource:
source_name = None
lock = True
flag = True
refresh_rate = 15
def update_cursor(self):
source = obs.obs_get_source_by_name(self.source_name)
settings = obs.obs_data_create()
if source is not None:
scene_source = obs.obs_frontend_get_current_scene()
scene_width = obs.obs_source_get_width(source)
scene_height = obs.obs_source_get_height(source)
scene = obs.obs_scene_from_source(scene_source)
scene_item = obs.obs_scene_find_source(scene, self.source_name)
target_item = obs.obs_scene_find_source(scene, self.target_name)
if scene_item:
scale = obs.vec2()
obs.obs_sceneitem_get_scale(scene_item, scale)
scene_width, scene_height = apply_scale(
scale.x, scale.y, scene_width, scene_height
)
next_pos = obs.vec2()
next_pos.x, next_pos.y = get_position()
next_pos.x -= self.offset_x
next_pos.y -= self.offset_y
## base: 1920*1080, i should add something to make this automatically change based on the Desktop Capture used
## maybe make it able to use multiple monitors as well?
ratio_x = next_pos.x/1920
ratio_y = next_pos.y/1080
target_scale = obs.vec2()
target = obs.obs_get_source_by_name(self.target_name)
obs.obs_sceneitem_get_scale(target_item, target_scale)
target_x = obs.obs_source_get_width(target) * target_scale.x
target_y = obs.obs_source_get_height(target) * target_scale.y
next_pos.x = lerp(0, target_x, ratio_x)
next_pos.y = lerp(0, target_y, ratio_y)
obs.obs_sceneitem_set_pos(scene_item, next_pos)
obs.obs_data_release(settings)
obs.obs_scene_release(scene)
obs.obs_source_release(source)
def update_crop(self):
"""
Create 2 display captures.
Create crop filter with this name: cropXY.
Check relative.
Set Width and Height to relatively small numbers e.g : 64x64 .
Image mask blend + color correction might be an option too.
Run script,select this source as cursor source , check Update crop, click start.
"""
source = obs.obs_get_source_by_name(self.source_name)
crop = obs.obs_source_get_filter_by_name(source, "cropXY")
filter_settings = obs.obs_source_get_settings(crop)
_x, _y = get_position()
# https://github.com/obsproject/obs-studio/blob/79981889c6d87d6e371e9dc8fcaad36f06eb9c9e/plugins/obs-filters/crop-filter.c#L87-L93
w = obs.obs_data_get_int(filter_settings, "cx")
h = obs.obs_data_get_int(filter_settings, "cy")
h, w = int(h / 2), int(w / 2)
obs.obs_data_set_int(filter_settings, "left", _x - h)
obs.obs_data_set_int(filter_settings, "top", _y - w)
obs.obs_source_update(crop, filter_settings)
obs.obs_data_release(filter_settings)
obs.obs_source_release(source)
obs.obs_source_release(crop)
def ticker(self):
""" how fast update.One callback at time with lock"""
if self.lock:
if self.update_xy:
self.update_crop()
self.update_cursor()
else:
self.update_cursor()
if not self.lock:
obs.remove_current_callback()
py_cursor = CursorAsSource()
def stop_pressed(props, prop):
py_cursor.flag = True
py_cursor.lock = False
def start_pressed(props, prop):
if py_cursor.source_name != "" and py_cursor.flag:
obs.timer_add(py_cursor.ticker, py_cursor.refresh_rate)
py_cursor.lock = True
py_cursor.flag = False # to keep only one timer callback
def script_defaults(settings):
obs.obs_data_set_default_int(settings, "_refresh_rate", py_cursor.refresh_rate)
def script_update(settings):
py_cursor.update_xy = obs.obs_data_get_bool(settings, "bool_yn")
py_cursor.source_name = obs.obs_data_get_string(settings, "source")
py_cursor.target_name = obs.obs_data_get_string(settings, "target")
py_cursor.refresh_rate = obs.obs_data_get_int(settings, "_refresh_rate")
py_cursor.offset_x = obs.obs_data_get_int(settings, "_offset_x")
py_cursor.offset_y = obs.obs_data_get_int(settings, "_offset_y")
def script_properties():
props = obs.obs_properties_create()
number = obs.obs_properties_add_int(
props, "_refresh_rate", "Refresh rate (ms)", 15, 300, 5
)
## i am only winging this so please forgive me
offsetx = obs.obs_properties_add_int(
props, "_offset_x", "Offset X", -5000, 5000, 1
)
offsety = obs.obs_properties_add_int(
props, "_offset_y", "Offset Y", -5000, 5000, 1
)
p1 = obs.obs_properties_add_list(
props,
"source",
"Select cursor source",
obs.OBS_COMBO_TYPE_EDITABLE,
obs.OBS_COMBO_FORMAT_STRING,
)
p2 = obs.obs_properties_add_list(
props,
"target",
"Select target window",
obs.OBS_COMBO_TYPE_EDITABLE,
obs.OBS_COMBO_FORMAT_STRING,
)
sources = obs.obs_enum_sources()
if sources is not None:
## property 1 for image source
for source in sources:
source_id = obs.obs_source_get_unversioned_id(source)
name = obs.obs_source_get_name(source)
obs.obs_property_list_add_string(p1, name, name)
## property 2 for target window
for target in sources:
source_id = obs.obs_source_get_unversioned_id(target)
name = obs.obs_source_get_name(target)
obs.obs_property_list_add_string(p2, name, name)
obs.source_list_release(sources)
obs.obs_properties_add_button(props, "button", "Stop", stop_pressed)
obs.obs_properties_add_button(props, "button2", "Start", start_pressed)
obs.obs_properties_add_bool(props, "bool_yn", "Update crop")
return props
``` |
{
"source": "34-Matt/Latex-Math",
"score": 3
} |
#### File: 34-Matt/Latex-Math/equationClass.py
```python
from labeldata import loadDict_BA
class equation:
"""A class to store the output of a CNN determining an equation of variable that can be printed to LaTeX code
"""
def __init__(self, vals, states):
"""Class initializer
Args:
vals (int list): list of ints that appear represent sequentially appearing characters in the equation
states (int list): list of states corresponding to each character that denote superscript, subscript, or normal character
"""
assert len(vals) == len(states),'vals and states should be the same size'
self.terms = vals
self.states = states
self.characters = loadDict_BA("LabelDict.csv")
def appendTerm(self, val, state):
"""Append another term onto the end of an existing equation
Args:
val (int): Value representing character to append on
state (int): State of value, 1 = superscript, 0 = normal, -1 = subscript
"""
self.terms.append(val)
self.states.append(state)
def printLatex(self):
"""Output a string of LaTeX code that would generate the eqution represented by this object
"""
global specialCharacters
latex = "$"
prevState = 0
for index,state in zip(self.terms, self.states):
# Requires loading of precreated characters array from labeldata
term = self.characters[index]
# Handle sub/superscript with state
if prevState != state:
# Only change states if current state is different than previous
if prevState == 0:
if state == 1:
# Start a superscript from normal
latex = latex + "^{"
elif state == -1:
# Start a supscript from normal
latex = latex + "_{"
else: # previous state was not normal
if state == 0:
# End previous special state and enter normal state
latex = latex + "}"
elif state == 1:
# End previous special state and enter superscirpt state
latex = latex + "}^{"
elif state == -1:
# End previous special state and enter supscirpt state
latex = latex + "}_{"
# Print out term
if len(term) > 1 or term == "{" or term == "}":
# If a special character add the corresponding LaTeX shortcut
latex = latex + "\\"
latex = latex + term
prevState = state
return latex + "$"
```
#### File: 34-Matt/Latex-Math/FlaskWebpage.py
```python
from flask import Flask, jsonify, request, render_template
import numpy as np
import joblib
import cv2
import traceback
from Box_Character import Box_Character
from equationClass import equation
from TrainCNN import loadModel
app = Flask(__name__)
model = None
@app.route('/')
def main():
return render_template('index.html')
@app.route('/about')
def about():
return render_template('index.html')
@app.route('/run',methods=["POST"])
def run():
global model
try:
# Initialize equation storage
LatexEq = equation([],[])
# Grab user image
image = request.files['file'].read()
arr = cv2.imdecode(np.fromstring(image,np.uint8), cv2.IMREAD_UNCHANGED)
# Need to breakup images into parts
images = Box_Character(arr)
# Predict each part and append to equation
for im in images:
im = im.reshape((1,45,45,1))
preds = model.predict(im)
print(preds)
pred = preds.argmax()
print(pred)
LatexEq.appendTerm(pred,0)
# Latex format
latex = LatexEq.printLatex()
# Send to webpage
return jsonify({
"message": f"Latex Format: {latex}",
"latex":latex
})
except Exception as e:
print(traceback.format_exc())
return jsonify({
"message" : f"An error occurred. {e}"
})
@app.route('/run-ui')
def run_ui():
return render_template("process.html")
if __name__ == '__main__':
model = loadModel((45,45,1),66,'training/cp-0016.ckpt')
app.run(debug=True)
```
#### File: 34-Matt/Latex-Math/labeldata.py
```python
import os
import numpy as np
import imageio
import csv
import sys
from sklearn.model_selection import train_test_split
import cv2
import pickle
def createDict(images_path):
#images_path = './extracted_images/'
dirlist = os.listdir(images_path)
single = []
multiple = []
for item in dirlist:
item = item.lower() #make everything lowercase
if len(item) == 1:
single.append(item)
else:
multiple.append(item)
multiple.sort() #alphabetical order
#single_ascii = []
#for item in single:
# single_ascii.append(ord(item)) #converts strings to ascii equivalent
#single_ascii.sort() #ascii numerical order
single.sort() #ascii numerical order
dict = {}
counter = 0
for item in multiple:
dict[item] = counter
counter += 1
for item in single:
dict[item] = counter
counter += 1
#writing to an Excel file
file = open("LabelDict.csv","w")
w = csv.writer(file)
for key, val in dict.items():
w.writerow([key,val])
file.close()
def loadDict_AB(file_name):
dict = {}
with open(file_name) as file:
readCSV = csv.reader(file)
for row in readCSV:
if len(row) > 0:
dict[row[0]] = int(row[1])
return dict
def loadDict_BA(file_name):
dict = {}
with open(file_name) as file:
readCSV = csv.reader(file)
for row in readCSV:
if len(row) > 0:
dict[int(row[1])] = row[0]
return dict
def loadDataset(file_name1,file_name2,rate = 0.2): #file_name1 location of all characters, file_name2 dict
dict = loadDict(file_name2)
ds1 = os.listdir(file_name1)
file_count = sum([len(files) for r, d, files in os.walk(file_name1)])
counter = 0
X = np.empty((0,45,45),dtype=np.uint8)
Y = np.empty((0,1),dtype=np.uint8)
for d in ds1:
folder = os.path.join(file_name1,d)
ds2 = os.listdir(folder)
d = d.lower()
for d2 in ds2:
filei = os.path.join(folder,d2)
image = cv2.imread(filei)
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) # Convert to gray
npi = np.asarray(image).reshape(45,45) #might need to change
X = np.append(X, [npi],axis = 0) #might need to change
Y = np.append(Y,dict[d])
counter += 1
output_string = f"Image File {counter} of {file_count}\n"
sys.stdout.write(output_string)
sys.stdout.flush()
#x_train,x_test,y_train,y_test = train_test_split(X,Y,test_size = rate)
return X, Y
if __name__ == '__main__':
path = 'C:/Users/cdunc/Documents/CSM Grad School Work/2019/Fall/CSCI 575B - Machine Learning/Group Project/Data/Single Characters/Removed Duplicates & Symbols'
createDict(path)
dict_name = 'LabelDict.csv'
dict = loadDict(dict_name)
#for key,val in dict.items():
# print("{} : {}".format(key,val))
#x_train, x_test, y_train, y_test = loadDataset(path,dict_name,rate = 0.2)
X, Y = loadDataset(path,dict_name,rate = 0.2)
with open('X_Y_Data.pickle', 'wb') as f:
pickle.dump([X, Y], f)
```
#### File: 34-Matt/Latex-Math/TrainCNN.py
```python
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential # For constructing model
from tensorflow.keras.layers import Dense, Dropout, Flatten # Layer cores
from tensorflow.keras.layers import Conv2D, MaxPooling2D # CNN layers
from tensorflow.keras.utils import to_categorical # Extra utilities
import pickle
from sklearn.model_selection import train_test_split
import os
def loadData(fileName,size=0.2):
with open(fileName, 'rb') as f:
X, Y = pickle.load(f)
X=X.reshape(-1,45,45,1)
X_train, X_test, y_train, y_test = train_test_split(X,Y,test_size = size)
return X_train, X_test, y_train, y_test
def createModel(input,output):
model = Sequential()
# Images are 48 by 48
model.add(Conv2D(32, (3,3), activation='relu', input_shape=input)) #46 by 46
model.add(MaxPooling2D())
model.add(Conv2D(64, (3,3), activation='relu')) #44 by 44
model.add(MaxPooling2D())
model.add(Conv2D(128, (3,3), activation='relu')) #44 by 44
model.add(MaxPooling2D())
model.add(Dropout(rate=0.15))
model.add(Flatten()) #1964 by 1
model.add(Dense(500, activation='relu')) #500 by 1
model.add(Dropout(0.2))
model.add(Dense(250, activation='relu')) #250 by 1
model.add(Dropout(0.2))
model.add(Dense(125, activation='relu')) #120 by 1
model.add(Dropout(0.2))
model.add(Dense(66, activation='softmax')) # 66 by 1 (only english, digits, and symbols)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def loadModel(input,output,fileName=None):
if fileName is None:
model = loadLatestModel(input,output)
else:
model = createModel(input,output)
#model = Sequential()
print('Loading weights')
model.load_weights(fileName)
return model
def loadLatestModel(input,output):
model = createModel(input,output) #Currently (45,45,1),65
latestPath = tf.train.latest_checkpoint('training')
model.load_weights(latestPath)
return model
def trainModel(model, X_train, y_train, X_test, y_test, ep=50, initial=0):
checkpoint_path = "training/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
save_weights_only=True,
period = 2)
model.save_weights(checkpoint_path.format(epoch=initial))
model.fit(X_train,
y_train,
batch_size = 100,
epochs=ep,
callbacks=[cp_callback],
validation_data=(X_test,y_test),
verbose=2,
initial_epoch=initial)
return model
if __name__ == "__main__":
X_train, X_test, y_train, y_test = loadData('X_Y_Data.pickle')
model = createModel(X_train.shape[1:],66)
model = trainModel(model, X_train, y_train, X_test, y_test,1000)
``` |
{
"source": "34r7h/pycardano",
"score": 2
} |
#### File: test/pycardano/test_nativescript.py
```python
from test.pycardano.util import check_two_way_cbor
import pytest
from pycardano.exception import InvalidArgumentException
from pycardano.key import VerificationKey
from pycardano.nativescript import (
InvalidBefore,
InvalidHereAfter,
ScriptAll,
ScriptAny,
ScriptNofK,
ScriptPubkey,
)
from pycardano.transaction import Transaction
"""The following ground truths of script hashes (policy ID) are generated from cardano-cli."""
def test_pubkey():
vk = VerificationKey.from_cbor(
"<KEY>"
)
script = ScriptPubkey(key_hash=vk.hash())
assert "88d1bd864d184909138e772d5b71b312113a985590fb551e8b35f50c" == str(
script.hash()
)
check_two_way_cbor(script)
def test_alter_script_type_number_with_exception():
with pytest.raises(InvalidArgumentException):
vk = VerificationKey.from_cbor(
"<KEY>"
)
script = ScriptPubkey(key_hash=vk.hash(), TYPE=3)
def test_script_all():
vk1 = VerificationKey.from_cbor(
"<KEY>"
)
vk2 = VerificationKey.from_cbor(
"<KEY>"
)
spk1 = ScriptPubkey(key_hash=vk1.hash())
spk2 = ScriptPubkey(key_hash=vk2.hash())
before = InvalidHereAfter(123456789)
after = InvalidBefore(123456780)
script = ScriptAll([before, after, spk1, spk2])
assert "ec8b7d1dd0b124e8333d3fa8d818f6eac068231a287554e9ceae490e" == str(
script.hash()
)
check_two_way_cbor(script)
vk1 = VerificationKey.from_cbor(
"<KEY>"
)
spk1 = ScriptPubkey(key_hash=vk1.hash())
before = InvalidHereAfter(80059041)
script = ScriptAll([spk1, before])
assert "b9ef27af6a13e3f779bf77c1f624966068b2464ea92b59e8d26fa19b" == str(
script.hash()
)
def test_script_any():
vk1 = VerificationKey.from_cbor(
"<KEY>"
)
vk2 = VerificationKey.from_cbor(
"<KEY>"
)
spk1 = ScriptPubkey(key_hash=vk1.hash())
spk2 = ScriptPubkey(key_hash=vk2.hash())
before = InvalidHereAfter(123456789)
after = InvalidBefore(123456780)
script = ScriptAny([before, after, spk1, spk2])
assert "2cca2c35ff880760b34e42c87172125d2bad18d8bcf42e209298648b" == str(
script.hash()
)
check_two_way_cbor(script)
def test_script_nofk():
vk1 = VerificationKey.from_cbor(
"<KEY>"
)
vk2 = VerificationKey.from_cbor(
"<KEY>"
)
spk1 = ScriptPubkey(key_hash=vk1.hash())
spk2 = ScriptPubkey(key_hash=vk2.hash())
before = InvalidHereAfter(123456789)
after = InvalidBefore(123456780)
script = ScriptNofK(2, [before, after, spk1, spk2])
assert "088a24a57345f12db09c6eddac2e88edf281bf766e66a98ff1045c0d" == str(
script.hash()
)
check_two_way_cbor(script)
def test_full_tx():
cbor = (
"84a60081825820b35a4ba9ef3ce21adcd6879d08553642224304704d206c74d3ffb3e6eed3ca28000d80018182581d60cc304"
"97f4ff962f4c1dca54cceefe39f86f1d7179668009f8eb71e598200a1581c50ab3393739cfa524cbe554c88d13bd41a356794"
"0af6bbf780a5854ba24f5365636f6e6454657374746f6b656e1a009896804954657374746f6b656e1a00989680021a000493e"
"<KEY>"
"6e1a009896804954657374746f6b656e1a00989680a200828258206443a101bdb948366fc87369336224595d36d8b0eee5602"
"<KEY>"
"6e4b4c73eabb25af91064d9cdebce4bad6246a51460b890b8258205797dc2cc919dfec0bb849551ebdf30d96e5cbe0f33f734"
"a87fe826db30f7ef95840d4fefcc897e8271f9639a02b4df91f68f4b16335569492a2df531e7974e57ae5778d8cf943981f86"
"3bdf4542029664d54143d150de277304fd3cb1eb7ed29d04018182018382051a075bcd158200581c9139e5c0a42f0f2389634"
"c3dd18dc621f5594c5ba825d9a8883c66278200581c835600a2be276a18a4bebf0225d728f090f724f4c0acd591d066fa6ff5f6"
)
tx = Transaction.from_cbor(cbor)
vk1 = VerificationKey.from_cbor(
"<KEY>"
)
vk2 = VerificationKey.from_cbor(
"<KEY>"
)
spk1 = ScriptPubkey(key_hash=vk1.hash())
spk2 = ScriptPubkey(key_hash=vk2.hash())
before = InvalidHereAfter(123456789)
script = ScriptAll([before, spk1, spk2])
assert tx.transaction_witness_set.native_scripts[0] == script
```
#### File: test/pycardano/test_serialization.py
```python
from dataclasses import dataclass, field
from test.pycardano.util import check_two_way_cbor
from pycardano.serialization import ArrayCBORSerializable, MapCBORSerializable
def test_array_cbor_serializable():
@dataclass
class Test1(ArrayCBORSerializable):
a: str
b: str = None
@dataclass
class Test2(ArrayCBORSerializable):
c: str
test1: Test1
t = Test2(c="c", test1=Test1(a="a"))
assert t.to_cbor() == "826163826161f6"
check_two_way_cbor(t)
def test_array_cbor_serializable_optional_field():
@dataclass
class Test1(ArrayCBORSerializable):
a: str
b: str = field(default=None, metadata={"optional": True})
@dataclass
class Test2(ArrayCBORSerializable):
c: str
test1: Test1
t = Test2(c="c", test1=Test1(a="a"))
assert t.test1.to_shallow_primitive() == ["a"]
assert t.to_cbor() == "826163816161"
check_two_way_cbor(t)
def test_map_cbor_serializable():
@dataclass
class Test1(MapCBORSerializable):
a: str = ""
b: str = ""
@dataclass
class Test2(MapCBORSerializable):
c: str = None
test1: Test1 = Test1()
t = Test2(test1=Test1(a="a"))
assert t.to_cbor() == "a26163f6657465737431a261616161616260"
check_two_way_cbor(t)
def test_map_cbor_serializable_custom_keys():
@dataclass
class Test1(MapCBORSerializable):
a: str = field(default="", metadata={"key": "0"})
b: str = field(default="", metadata={"key": "1"})
@dataclass
class Test2(MapCBORSerializable):
c: str = field(default=None, metadata={"key": "0", "optional": True})
test1: Test1 = field(default=Test1(), metadata={"key": "1"})
t = Test2(test1=Test1(a="a"))
assert t.to_primitive() == {"1": {"0": "a", "1": ""}}
assert t.to_cbor() == "a16131a261306161613160"
check_two_way_cbor(t)
``` |
{
"source": "34ro/tapyrus-core",
"score": 2
} |
#### File: test/functional/feature_serialization.py
```python
from codecs import encode
from io import BytesIO
import struct
from test_framework.blocktools import create_coinbase, create_block, create_transaction, create_tx_with_script, create_witness_tx, add_witness_commitment
from test_framework.messages import msg_block, hash256, CTransaction, ToHex, FromHex, CTxIn, CTxOut, COutPoint
from test_framework.mininode import P2PDataStore, mininode_lock
from test_framework.script import CScript, hash160, OP_1, OP_DROP, OP_HASH160, OP_EQUAL, OP_TRUE, SignatureHash, SIGHASH_ALL
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error, assert_equal, bytes_to_hex_str, hex_str_to_bytes, wait_until
from test_framework.key import CECKey
CHAIN_HEIGHT = 111
REJECT_INVALID = 16
def unDERify(tx):
"""
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
"""
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
try:
newscript.append(i[0:-1] + b'\0' + i[-1:])
except TypeError:
newscript.append(struct.pack("<i", i) + b'\0')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
def getInput(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
def assert_not_equal(thing1, thing2):
if thing1 == thing2:
raise AssertionError("%s == %s" % (str(thing1), str(thing2)))
class SerializationTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1', '-acceptnonstdtxn=0']]
self.setup_clean_chain = True
self.pubkey = ""
def run_test(self):
self.nodes[0].add_p2p_connection(P2PDataStore())
self.nodeaddress = self.nodes[0].getnewaddress()
self.pubkey = self.nodes[0].getaddressinfo(self.nodeaddress)["pubkey"]
self.log.info("Mining %d blocks", CHAIN_HEIGHT)
self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(CHAIN_HEIGHT, self.signblockprivkeys) ]
## P2PKH transaction
########################
self.log.info("Test using a P2PKH transaction")
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0], self.nodeaddress, amount=10)
spendtx.rehash()
copy_spendTx = CTransaction(spendtx)
#cache hashes
hash = spendtx.hash
hashMalFix = spendtx.hashMalFix
#malleate
unDERify(spendtx)
spendtx.rehash()
# verify that hashMalFix remains the same even when signature is malleated and hash changes
assert_not_equal(hash, spendtx.hash)
assert_equal(hashMalFix, spendtx.hashMalFix)
# verify that hash is spendtx.serialize()
hash = encode(hash256(spendtx.serialize())[::-1], 'hex_codec').decode('ascii')
assert_equal(hash, spendtx.hash)
# verify that hashMalFix is spendtx.serialize(with_scriptsig=False)
hashMalFix = encode(hash256(spendtx.serialize(with_scriptsig=False))[::-1], 'hex_codec').decode('ascii')
assert_equal(hashMalFix, spendtx.hashMalFix)
assert_not_equal(hash, hashMalFix)
#as this transaction does not have witness data the following is true
assert_equal(spendtx.serialize(), spendtx.serialize(with_witness=True, with_scriptsig=True))
assert_equal(spendtx.serialize(with_witness=False), spendtx.serialize(with_witness=True,with_scriptsig=True))
assert_not_equal(spendtx.serialize(with_witness=False), spendtx.serialize(with_witness=True,with_scriptsig=False))
assert_equal(spendtx.serialize(with_witness=False), spendtx.serialize_without_witness(with_scriptsig=True))
assert_equal(spendtx.serialize_with_witness(with_scriptsig=True), spendtx.serialize_without_witness(with_scriptsig=True))
assert_equal(spendtx.serialize_with_witness(with_scriptsig=False), spendtx.serialize_without_witness(with_scriptsig=False))
#Create block with only non-DER signature P2PKH transaction
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(CHAIN_HEIGHT + 1), block_time)
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.hashImMerkleRoot = block.calc_immutable_merkle_root()
block.rehash()
block.solve(self.signblockprivkeys)
# serialize with and without witness block remains the same
assert_equal(block.serialize(with_witness=True), block.serialize())
assert_equal(block.serialize(with_witness=True), block.serialize(with_witness=False))
assert_equal(block.serialize(with_witness=True), block.serialize(with_witness=False, with_scriptsig=True))
self.log.info("Reject block with non-DER signature")
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_INVALID)
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed')
self.log.info("Accept block with DER signature")
#recreate block with DER sig transaction
block = create_block(int(tip, 16), create_coinbase(CHAIN_HEIGHT + 1), block_time)
block.vtx.append(copy_spendTx)
block.hashMerkleRoot = block.calc_merkle_root()
block.hashImMerkleRoot = block.calc_immutable_merkle_root()
block.rehash()
block.solve(self.signblockprivkeys)
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
## P2SH transaction
########################
self.log.info("Test using P2SH transaction ")
REDEEM_SCRIPT_1 = CScript([OP_1, OP_DROP])
P2SH_1 = CScript([OP_HASH160, hash160(REDEEM_SCRIPT_1), OP_EQUAL])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(self.coinbase_txids[1], 16), 0), b"", 0xffffffff))
tx.vout.append(CTxOut(10, P2SH_1))
tx.rehash()
spendtx_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx), [], "ALL", self.options.scheme)["hex"]
spendtx = FromHex(spendtx, spendtx_raw)
spendtx.rehash()
copy_spendTx = CTransaction(spendtx)
#cache hashes
hash = spendtx.hash
hashMalFix = spendtx.hashMalFix
#malleate
spendtxcopy = spendtx
unDERify(spendtxcopy)
spendtxcopy.rehash()
# verify that hashMalFix remains the same even when signature is malleated and hash changes
assert_not_equal(hash, spendtxcopy.hash)
assert_equal(hashMalFix, spendtxcopy.hashMalFix)
# verify that hash is spendtx.serialize()
hash = encode(hash256(spendtx.serialize(with_witness=False))[::-1], 'hex_codec').decode('ascii')
assert_equal(hash, spendtx.hash)
# verify that hashMalFix is spendtx.serialize(with_scriptsig=False)
hashMalFix = encode(hash256(spendtx.serialize(with_witness=False, with_scriptsig=False))[::-1], 'hex_codec').decode('ascii')
assert_equal(hashMalFix, spendtx.hashMalFix)
assert_not_equal(hash, hashMalFix)
#as this transaction does not have witness data the following is true
assert_equal(spendtx.serialize(), spendtx.serialize(with_witness=True, with_scriptsig=True))
assert_equal(spendtx.serialize(with_witness=False), spendtx.serialize(with_witness=True, with_scriptsig=True))
assert_not_equal(spendtx.serialize(with_witness=False), spendtx.serialize(with_witness=True,with_scriptsig=False))
assert_equal(spendtx.serialize(with_witness=False), spendtx.serialize_without_witness(with_scriptsig=True))
assert_equal(spendtx.serialize_with_witness(with_scriptsig=True), spendtx.serialize_without_witness(with_scriptsig=True))
assert_equal(spendtx.serialize_with_witness(with_scriptsig=False), spendtx.serialize_without_witness(with_scriptsig=False))
#Create block with only non-DER signature P2SH transaction
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(CHAIN_HEIGHT + 2), block_time)
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.hashImMerkleRoot = block.calc_immutable_merkle_root()
block.rehash()
block.solve(self.signblockprivkeys)
# serialize with and without witness block remains the same
assert_equal(block.serialize(with_witness=True), block.serialize())
assert_equal(block.serialize(with_witness=True), block.serialize(with_witness=False))
assert_equal(block.serialize(with_witness=True), block.serialize(with_witness=True, with_scriptsig=True))
self.log.info("Reject block with non-DER signature")
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_INVALID)
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed')
self.log.info("Accept block with DER signature")
#recreate block with DER sig transaction
block = create_block(int(tip, 16), create_coinbase(CHAIN_HEIGHT + 2), block_time)
block.vtx.append(copy_spendTx)
block.hashMerkleRoot = block.calc_merkle_root()
block.hashImMerkleRoot = block.calc_immutable_merkle_root()
block.rehash()
block.solve(self.signblockprivkeys)
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
## redeem previous P2SH
#########################
self.log.info("Test using P2SH redeem transaction ")
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(COutPoint(block.vtx[1].malfixsha256, 0), b''))
(sighash, err) = SignatureHash(REDEEM_SCRIPT_1, tx, 1, SIGHASH_ALL)
signKey = CECKey()
signKey.set_secretbytes(b"horsebattery")
sig = signKey.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, REDEEM_SCRIPT_1])
tx.vin[0].scriptSig = scriptSig
tx.rehash()
spendtx_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx), [], "ALL", self.options.scheme)["hex"]
spendtx = FromHex(spendtx, spendtx_raw)
spendtx.rehash()
#cache hashes
hash = spendtx.hash
hashMalFix = spendtx.hashMalFix
#malleate
spendtxcopy = spendtx
unDERify(spendtxcopy)
spendtxcopy.rehash()
# verify that hashMalFix remains the same even when signature is malleated and hash changes
assert_not_equal(hash, spendtxcopy.hash)
assert_equal(hashMalFix, spendtxcopy.hashMalFix)
# verify that hash is spendtx.serialize()
hash = encode(hash256(spendtx.serialize(with_witness=False))[::-1], 'hex_codec').decode('ascii')
assert_equal(hash, spendtx.hash)
# verify that hashMalFix is spendtx.serialize(with_scriptsig=False)
hashMalFix = encode(hash256(spendtx.serialize(with_witness=False, with_scriptsig=False))[::-1], 'hex_codec').decode('ascii')
assert_equal(hashMalFix, spendtx.hashMalFix)
assert_not_equal(hash, hashMalFix)
#as this transaction does not have witness data the following is true
assert_equal(spendtx.serialize(), spendtx.serialize(with_witness=True, with_scriptsig=True))
assert_equal(spendtx.serialize(with_witness=False), spendtx.serialize(with_witness=True,with_scriptsig=True))
assert_not_equal(spendtx.serialize(with_witness=False), spendtx.serialize(with_witness=True,with_scriptsig=False))
assert_equal(spendtx.serialize(with_witness=False), spendtx.serialize_without_witness(with_scriptsig=True))
assert_equal(spendtx.serialize_with_witness(with_scriptsig=True), spendtx.serialize_without_witness(with_scriptsig=True))
assert_equal(spendtx.serialize_with_witness(with_scriptsig=False), spendtx.serialize_without_witness(with_scriptsig=False))
#Create block with only non-DER signature P2SH redeem transaction
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(CHAIN_HEIGHT + 3), block_time)
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.hashImMerkleRoot = block.calc_immutable_merkle_root()
block.rehash()
block.solve(self.signblockprivkeys)
# serialize with and without witness block remains the same
assert_equal(block.serialize(with_witness=True), block.serialize())
assert_equal(block.serialize(with_witness=True), block.serialize(with_witness=False))
assert_equal(block.serialize(with_witness=True), block.serialize(with_witness=True, with_scriptsig=True))
self.log.info("Accept block with P2SH redeem transaction")
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
## p2sh_p2wpkh transaction
##############################
self.log.info("Test using p2sh_p2wpkh transaction ")
spendtxStr = create_witness_tx(self.nodes[0], True, getInput(self.coinbase_txids[4]), self.pubkey, amount=1.0)
#get CTRansaction object from above hex
spendtx = CTransaction()
spendtx.deserialize(BytesIO(hex_str_to_bytes(spendtxStr)))
spendtx.rehash()
#cache hashes
spendtx.rehash()
hash = spendtx.hash
hashMalFix = spendtx.hashMalFix
withash = spendtx.calc_sha256(True)
# malleate
unDERify(spendtx)
spendtx.rehash()
withash2 = spendtx.calc_sha256(True)
# verify that hashMalFix remains the same even when signature is malleated and hash changes
assert_equal(withash, withash2)
assert_equal(hash, spendtx.hash)
assert_equal(hashMalFix, spendtx.hashMalFix)
# verify that hash is spendtx.serialize()
hash = encode(hash256(spendtx.serialize())[::-1], 'hex_codec').decode('ascii')
assert_equal(hash, spendtx.hash)
# verify that hashMalFix is spendtx.serialize(with_scriptsig=False)
hashMalFix = encode(hash256(spendtx.serialize(with_scriptsig=False))[::-1], 'hex_codec').decode('ascii')
assert_equal(hashMalFix, spendtx.hashMalFix)
assert_not_equal(hash, hashMalFix)
#as this transaction does not have witness data the following is true
assert_equal(spendtx.serialize(), spendtx.serialize(with_witness=True, with_scriptsig=True))
assert_equal(spendtx.serialize(with_witness=False), spendtx.serialize(with_witness=True,with_scriptsig=True))
assert_not_equal(spendtx.serialize(with_witness=False), spendtx.serialize(with_witness=True,with_scriptsig=False))
assert_equal(spendtx.serialize(with_witness=False), spendtx.serialize_without_witness(with_scriptsig=True))
assert_equal(spendtx.serialize_with_witness(with_scriptsig=True), spendtx.serialize_without_witness(with_scriptsig=True))
assert_equal(spendtx.serialize_with_witness(with_scriptsig=False), spendtx.serialize_without_witness(with_scriptsig=False))
#Create block with only non-DER signature p2sh_p2wpkh transaction
spendtxStr = self.nodes[0].signrawtransactionwithwallet(spendtxStr, [], "ALL", self.options.scheme)
assert("errors" not in spendtxStr or len(["errors"]) == 0)
spendtxStr = spendtxStr["hex"]
spendtx = CTransaction()
spendtx.deserialize(BytesIO(hex_str_to_bytes(spendtxStr)))
spendtx.rehash()
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(CHAIN_HEIGHT + 4), block_time)
block.vtx.append(spendtx)
add_witness_commitment(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.hashImMerkleRoot = block.calc_immutable_merkle_root()
block.rehash()
block.solve(self.signblockprivkeys)
# serialize with and without witness
assert_equal(block.serialize(with_witness=False), block.serialize())
assert_not_equal(block.serialize(with_witness=True), block.serialize(with_witness=False))
assert_not_equal(block.serialize(with_witness=True), block.serialize(with_witness=False, with_scriptsig=True))
self.log.info("Reject block with p2sh_p2wpkh transaction and witness commitment")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", self.nodes[0].submitblock, bytes_to_hex_str(block.serialize(with_witness=True)))
assert_equal(self.nodes[0].getbestblockhash(), tip)
block = create_block(int(tip, 16), create_coinbase(CHAIN_HEIGHT + 4), block_time)
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.hashImMerkleRoot = block.calc_immutable_merkle_root()
block.rehash()
block.solve(self.signblockprivkeys)
self.log.info("Accept block with p2sh_p2wpkh transaction")
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(with_witness=True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
## p2sh_p2wsh transaction
##############################
self.log.info("Test using p2sh_p2wsh transaction")
spendtxStr = create_witness_tx(self.nodes[0], True, getInput(self.coinbase_txids[5]), self.pubkey, amount=1.0)
#get CTRansaction object from above hex
spendtx = CTransaction()
spendtx.deserialize(BytesIO(hex_str_to_bytes(spendtxStr)))
spendtx.rehash()
#cache hashes
spendtx.rehash()
hash = spendtx.hash
hashMalFix = spendtx.hashMalFix
withash = spendtx.calc_sha256(True)
# malleate
unDERify(spendtx)
spendtx.rehash()
withash2 = spendtx.calc_sha256(True)
# verify that hashMalFix remains the same even when signature is malleated and hash changes
assert_equal(withash, withash2)
assert_equal(hash, spendtx.hash)
assert_equal(hashMalFix, spendtx.hashMalFix)
# verify that hash is spendtx.serialize()
hash = encode(hash256(spendtx.serialize())[::-1], 'hex_codec').decode('ascii')
assert_equal(hash, spendtx.hash)
# verify that hashMalFix is spendtx.serialize(with_scriptsig=False)
hashMalFix = encode(hash256(spendtx.serialize(with_scriptsig=False))[::-1], 'hex_codec').decode('ascii')
assert_equal(hashMalFix, spendtx.hashMalFix)
assert_not_equal(hash, hashMalFix)
#as this transaction does not have witness data the following is true
assert_equal(spendtx.serialize(), spendtx.serialize(with_witness=True, with_scriptsig=True))
assert_equal(spendtx.serialize(with_witness=False), spendtx.serialize(with_witness=True,with_scriptsig=True))
assert_not_equal(spendtx.serialize(with_witness=False), spendtx.serialize(with_witness=True,with_scriptsig=False))
assert_equal(spendtx.serialize(with_witness=False), spendtx.serialize_without_witness(with_scriptsig=True))
assert_equal(spendtx.serialize_with_witness(with_scriptsig=True), spendtx.serialize_without_witness(with_scriptsig=True))
assert_equal(spendtx.serialize_with_witness(with_scriptsig=False), spendtx.serialize_without_witness(with_scriptsig=False))
#Create block with only non-DER signature p2sh_p2wsh transaction
spendtxStr = self.nodes[0].signrawtransactionwithwallet(spendtxStr, [], "ALL", self.options.scheme)
assert("errors" not in spendtxStr or len(["errors"]) == 0)
spendtxStr = spendtxStr["hex"]
spendtx = CTransaction()
spendtx.deserialize(BytesIO(hex_str_to_bytes(spendtxStr)))
spendtx.rehash()
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(CHAIN_HEIGHT + 5), block_time)
block.vtx.append(spendtx)
add_witness_commitment(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.hashImMerkleRoot = block.calc_immutable_merkle_root()
block.rehash()
block.solve(self.signblockprivkeys)
# serialize with and without witness
assert_equal(block.serialize(with_witness=False), block.serialize())
assert_not_equal(block.serialize(with_witness=True), block.serialize(with_witness=False))
assert_not_equal(block.serialize(with_witness=True), block.serialize(with_witness=False, with_scriptsig=True))
self.log.info("Reject block with p2sh_p2wsh transaction and witness commitment")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", self.nodes[0].submitblock, bytes_to_hex_str(block.serialize(with_witness=True)))
assert_equal(self.nodes[0].getbestblockhash(), tip)
block = create_block(int(tip, 16), create_coinbase(CHAIN_HEIGHT + 5), block_time)
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.hashImMerkleRoot = block.calc_immutable_merkle_root()
block.rehash()
block.solve(self.signblockprivkeys)
self.log.info("Accept block with p2sh_p2wsh transaction")
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(with_witness=True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
if __name__ == '__main__':
SerializationTest().main()
``` |
{
"source": "350dotorg/akcrm",
"score": 2
} |
#### File: akcrm/cms/forms.py
```python
from actionkit import Client
from actionkit.models import CoreTag, CorePage
from django import forms
from django.conf import settings
from akcrm.cms.models import AllowedTag
class AllowedTagForm(forms.Form):
tag_name = forms.CharField(required=True)
def clean_tag_name(self):
tag_name = self.cleaned_data['tag_name'].strip()
if AllowedTag.objects.filter(tag_name=tag_name).exists():
raise forms.ValidationError("A tag with this name has already been installed.")
return tag_name
def create_core_tag(self, new_tag_name):
actionkit = Client()
tag = actionkit.Tag.create(dict(name=new_tag_name))
return tag['id']
def create_tag_page(self, tag_id):
actionkit = Client()
page = actionkit.ImportPage.create(dict(name="activator_tag_page_%s" % tag_id))
actionkit.ImportPage.save(dict(id=page['id'], tags=[
tag_id, settings.AKTIVATOR_TAG_PAGE_TAG_ID]))
return page['id']
def save(self):
tag_name = self.cleaned_data['tag_name'].strip()
try:
core_tag = CoreTag.objects.using("ak").get(name=tag_name)
except CoreTag.DoesNotExist:
tag_id = self.create_core_tag(tag_name)
else:
tag_id = core_tag.id
try:
core_page = CorePage.objects.using("ak").filter(
pagetags__tag=tag_id).get(pagetags__tag=settings.AKTIVATOR_TAG_PAGE_TAG_ID)
except CorePage.DoesNotExist:
page_id = self.create_tag_page(tag_id)
else:
page_id = core_page.id
self.cleaned_data.update({'ak_tag_id': tag_id, 'ak_page_id': page_id})
return AllowedTag.objects.create(**self.cleaned_data)
```
#### File: akcrm/crm/views.py
```python
from actionkit import Client
from actionkit.models import *
from django.conf import settings
from django.db import connections
from django.db.models import Count
from django.contrib import messages
from djangohelpers import rendered_with, allow_http
from django.http import HttpResponseNotFound, HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.template.defaultfilters import date
from django.utils.simplejson import JSONEncoder
import datetime
import dateutil.parser
import json
import os.path
import re
from akcrm.crm.forms import ContactForm
from akcrm.crm.models import ContactRecord
from akcrm.permissions import authorize
@authorize("add_contact_record")
@allow_http("POST")
@rendered_with("_form.html")
def contacts_for_user(request, akid):
akid = [i for i in request.POST.getlist('akid') if i and i.strip()][0]
post = request.POST.copy()
post['akid'] = akid
form = ContactForm(data=post)
if form.is_valid():
contact = form.save()
messages.success(request, u'Contact saved')
return HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
return locals()
def contact_record(request, contact_id):
contact = get_object_or_404(ContactRecord, id=contact_id)
```
#### File: mysql_echo/backend/base.py
```python
from django.db.backends.mysql.base import *
from django.db.backends.mysql.base import DatabaseWrapper as MySQLDatabaseWrapper
from django.db.backends.mysql.base import DatabaseOperations as MySQLDatabaseOperations
class DatabaseOperations(MySQLDatabaseOperations):
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return cursor._last_executed
class DatabaseWrapper(MySQLDatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.ops = DatabaseOperations()
```
#### File: akcrm/permissions/__init__.py
```python
from django.contrib.auth.decorators import user_passes_test
from django.http import HttpResponseForbidden
class LazyPermissions(object):
def __init__(self, request):
self.request = request
self._groups = None
def __getattr__(self, permission):
if self.request.user.is_superuser or self.request.user.is_staff:
return True
if self._groups is None:
self._groups = list(self.request.user.groups.values_list("name", flat=True))
return permission in self._groups
def authorize(permissions):
if isinstance(permissions, basestring):
permissions = [permissions]
def wrapper(func):
def inner(request, *args, **kw):
for permission in permissions:
if not getattr(request.PERMISSIONS, permission):
return HttpResponseForbidden()
return func(request, *args, **kw)
return inner
return wrapper
```
#### File: akcrm/permissions/middleware.py
```python
from akcrm.permissions import LazyPermissions
class PermissionsMiddleware(object):
def process_request(self, request):
setattr(request, 'PERMISSIONS', LazyPermissions(request))
```
#### File: akcrm/search/fields.py
```python
from search.models import SearchField
def get_fields():
_fields = {}
for field in SearchField.objects.all():
if field.category not in _fields:
_fields[field.category] = []
_fields[field.category].append((field.name, field.display_name))
return _fields
```
#### File: akcrm/search/tasks.py
```python
from celery.decorators import task, periodic_task
from akcrm.search.models import ActiveReport
@task
def poll_report(report):
report.poll_results()
``` |
{
"source": "3520kramer/EnterpriseServiceBus",
"score": 3
} |
#### File: simple_async_mq_server/models/message.py
```python
from uuid import uuid4
from ..utilities.helpers import current_datetime
from ..utilities.transformer import transform_to_dict
import json
import datetime
class Message():
def __init__(self, topic: str, published_time: str, content_format: str, org_content: str, content: str or dict or list = None,
consumed_time: str = None, uuid: str = None) -> None:
self.__uuid = uuid4().hex if uuid is None else uuid
self.__is_consumed = False
self.__topic = topic
self.__published_time = published_time
self.__content_format = content_format
self.__org_content = org_content
self.__content = content
self.__consumed_time = consumed_time
def __str__(self):
return str(self.__dict__)
@property
def uuid(self):
return self.__uuid
@property
def topic(self):
return self.__topic
@property
def org_content(self):
return self.__org_content
@property
def content_format(self):
return self.__content_format
@property
def is_consumed(self):
return self.__is_consumed
@is_consumed.setter
def is_consumed(self, is_consumed):
self.__is_consumed = is_consumed
@property
def content(self):
return self.__content
@content.setter
def content(self, content):
self.__content = content
@property
def consumed_time(self):
return self.__consumed_time
@consumed_time.setter
def consumed_time(self, consumed_time):
self.__consumed_time = consumed_time
def get_publish_message(self):
return {
'uuid': self.__uuid,
'topic': self.__topic,
'content_format': self.__content_format,
'content': self.__content
}
def get_log_message(self):
return {
'uuid': self.__uuid,
'is_consumed': self.__is_consumed,
'topic': self.__topic,
'published_time': str(self.__published_time),
'content_format': self.__content_format,
'content': json.dumps(self.__content),
'org_content': self.__org_content,
'consumed_time': self.consumed_time if self.consumed_time == None else str(self.consumed_time)
}
```
#### File: simple_async_mq_server/utilities/helpers.py
```python
import uuid
from datetime import datetime
import configparser
def create_log_message(msg):
return {
'uuid': uuid.uuid4().hex,
'is_consumed': False,
'topic': msg['topic'],
'published_time': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'content_format': msg['content_format'],
'content': msg['content'],
'org_content': msg['org_content']
}
def current_datetime():
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def create_publish_message(data):
return {
'uuid': uuid.uuid4().hex,
'topic': data['topic'],
'content_format': data['content_format'],
'content': data['content']
}
``` |
{
"source": "353055619/Q_Learning_O_Run",
"score": 4
} |
#### File: src/Q_Learning_O_Run/env.py
```python
__author__ = 'Godw'
import time
from typing import Tuple
class Env(object):
ACTION = ['left', 'right']
def __init__(self, length: int = 6, init_postion: int = 0, fresh_time: float = 0.3) -> None:
"""
初始化
:param length: 智能体完成任务需要走的最大步数
:param init_postion: 智能体的初始位置
:param fresh_time: 每步花费时间
"""
print("Env环境初始化")
self.l = length
self.env_list = ['-'] * self.l + ['T']
self.position = init_postion
self.FRESH_TIME = fresh_time
def refresh(self, init_postion: int = 0) -> None:
"""
重置环境
:param init_postion: 环境重置后,智能体‘o’所处的位置
"""
self.position = init_postion
def update_env(self, action: str) -> Tuple[int, int, bool]:
"""
更新环境
:param action: 采取的动作值
:return: 本步奖励、下步位置、是否终止
"""
reward = 0
termination = False
if action == 'right':
self.position = self.position + 1
if self.position == self.l:
reward = 1
termination = True
self.position = self.position - 1
if action == 'left':
self.position = self.position - 1
if self.position == -1:
self.position = 0
self.env_list[self.position] = 'o'
print(''.join(self.env_list))
time.sleep(self.FRESH_TIME)
self.env_list = ['-'] * self.l + ['T']
return reward, self.position, termination
``` |
{
"source": "35359595/pyfs",
"score": 3
} |
#### File: pyfs/tvcheck/pyfs.py
```python
from urllib.request import urlopen
from os.path import expanduser, exists, join
from os import makedirs
import sys as Sys
import time
#Globals
HOME = expanduser('~')
speed = 0
metrics = 'bps'
START_TIME = time.process_time()
list_location = join(HOME, '.tvcheck', 'list')
def arg_parsing():
"""Arg parsing function"""
args = list()
if len(list(Sys.argv)) > 2:
if len(Sys.argv[1]) == 1:
args = Sys.argv
return args
def check_filesystem():
"""Function for checking existance of working dir and list file"""
if not exists(join(HOME, '.tvcheck')):
makedirs(join(HOME, '.tvcheck'))
if not exists(join(HOME, '.tvcheck', 'list')):
with open(join(HOME, '.tvcheck', 'list'), mode = 'w+') as new_list:
print('No list file found.')
new_list.write(input('Paste episode list url:'))
def read_from_fs(url=None):
"""Getting the list from file located @ fs server
@params:
url - http link to file"""
with urlopen(url) as remote_list:
urls = list()
for line in remote_list:
urls.append(line.decode('utf-8'))
return urls
def read_from_file(path=None):
"""Reading lines from file and returning array of lines
@usage: read_from_file('/path/name.extension')"""
with open(path, mode='rt', encoding='utf-8') as link_list:
return list(link_list.readlines())
def append_to_file(path=None, new_link=None):
"""Appending one line to file, adding newline afterwards"""
with open(path, mode='at', encoding='utf-8') as local_list:
local_list.write(new_link.__add__('\n'))
def round_to_mb(bts):
"""Returns Mb from b rounded to .xx"""
return round((int(bts) / 1024 / 1024), 2)
def print_progress(iteration, total, start, prefix = '', suffix = '', decimals = 2, barLength = 100):
"""Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
"""
filledLength = int(round(barLength * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
bar = '#' * filledLength + '-' * (barLength - filledLength)
global metrics
global START_TIME
global speed
if (time.process_time() - START_TIME) * 1000 > 5:
START_TIME = time.process_time()
speed = round((iteration*8//(time.process_time() - start)//1024), decimals)
metrics = 'Kbps'
if speed > 1024:
speed = speed//1024
metrics = 'Mbps'
Sys.stdout.write('%s [%s] %s%s %s%s %s\r' % (prefix, bar, percents, '%', suffix, speed, metrics)),
Sys.stdout.flush()
if iteration == total:
print("\n")
def callback(progress, size=0):
"""Downloading progress displaying function"""
start = time.process_time()
print_progress(progress, size, start, prefix = 'Downloading:', suffix = 'Speed:', barLength = 50)
def copyfileobject(fsrc, fdst, callback, size, length=16*1024):
"""Function for saving the file. Iteration with callable function."""
copied = 0
while True:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
copied += len(buf)
callback(copied, size)
def download_episode(url=None, episode_name=None):
"""Downloading function"""
out_file = join(HOME, 'Downloads', episode_name)
with urlopen(url) as response, open(out_file, 'wb') as out_file:
size = response.getheader("Content-Length")
copyfileobject(response, out_file, callback, size)
def main():
"""Main function
@algorythm:
1. reads list of series from list file;
2. reads list of episodes from each of the series;
3. compares the list of episodes from fs.to with local list;
4. if new episodes found - downloading with aria2;
5. after successfull download append new episode to local list."""
args = list(arg_parsing())
check_filesystem()
if args:
if len(args) > 2:
print('To many arguments, bye!')
Sys.exit()
elif args[1] == 'l':
for series in read_from_file(join(HOME, '.tvcheck', 'list')):
print(series)
Sys.exit()
elif args[1] == 'n':
new_url = input('Provide URL of new list in format http://fs.to/flist/...:')
if new_url[:19] == 'http://fs.to/flist/':
append_to_file(list_location, new_url)
else:
print('Wrong Url format, bye!')
Sys.exit()
elif args[1] == 'h':
print("""
Parameters:\n
h - show this help;\n
l - show local series list;\n
n - add series to local list (follow the instructions).\n""")
Sys.exit()
else:
while True:
decision = input("Parameter not found. Continue check? Y/N: ")
if decision.upper() == 'Y':
break
elif decision.upper() == 'N':
Sys.exit()
#1:
local_list = read_from_file(list_location)
for url in local_list:
remote_list = read_from_fs(url)
#2:
local_list_name = join(HOME, '.tvcheck', url[19:].rstrip())
local_list = read_from_file(local_list_name)
#3:
if len(local_list) == len(remote_list):
print('No new episodes. Already watched', len(remote_list), 'episodes.')
elif len(remote_list) == 0:
print('Server returned empty list. Redownload:', url)
#4:
elif len(remote_list) > len(local_list):
new_episodes = list()
new_episodes_count = len(remote_list) - len(local_list)
while new_episodes_count > 0:
new_episodes.append(remote_list.pop().rstrip())
new_episodes_count -= 1
for new_link in new_episodes.__reversed__():
last_slash = new_link.rfind('/')
episode_name = new_link[last_slash+1:]
print('New episode:', episode_name)
global START_TIME
START_TIME = time.process_time()
download_episode(new_link, episode_name)
print(new_link)
#5:
append_to_file(local_list_name, new_link)
#call execution if runned from console
if __name__ == '__main__':
main()
``` |
{
"source": "353622088/CapsNet",
"score": 4
} |
#### File: examples/3_NeuralNetworks/neural_network.py
```python
from __future__ import print_function
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("../tmp/data/", one_hot=False)
import tensorflow as tf
# Parameters
learning_rate = 0.1
num_steps = 1000
batch_size = 128
display_step = 100
# Network Parameters
n_hidden_1 = 256 # 1st layer number of neurons
n_hidden_2 = 256 # 2nd layer number of neurons
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
# Define the neural network
def neural_net(x_dict):
# TF Estimator input is a dict, in case of multiple inputs
x = x_dict['images']
# Hidden fully connected layer with 256 neurons
layer_1 = tf.layers.dense(x, n_hidden_1)
# Hidden fully connected layer with 256 neurons
layer_2 = tf.layers.dense(layer_1, n_hidden_2)
# Output fully connected layer with a neuron for each class
out_layer = tf.layers.dense(layer_2, num_classes)
return out_layer
# Define the model function (following TF Estimator Template)
def model_fn(features, labels, mode):
# Build the neural network
logits = neural_net(features)
# Predictions
pred_classes = tf.argmax(logits, axis=1)
pred_probas = tf.nn.softmax(logits)
# If prediction mode, early return
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=tf.cast(labels, dtype=tf.int32)))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op,
global_step=tf.train.get_global_step())
# Evaluate the accuracy of the model
acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)
# TF Estimators requires to return a EstimatorSpec, that specify
# the different ops for training, evaluating, ...
estim_specs = tf.estimator.EstimatorSpec(
mode=mode,
predictions=pred_classes,
loss=loss_op,
train_op=train_op,
eval_metric_ops={'accuracy': acc_op})
return estim_specs
# Build the Estimator
model = tf.estimator.Estimator(model_fn)
# Define the input function for training
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.train.images}, y=mnist.train.labels,
batch_size=batch_size, num_epochs=None, shuffle=True)
# Train the Model
model.train(input_fn, steps=num_steps)
# Evaluate the Model
# Define the input function for evaluating
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.test.images}, y=mnist.test.labels,
batch_size=batch_size, shuffle=False)
# Use the Estimator 'evaluate' method
e = model.evaluate(input_fn)
print("Testing Accuracy:", e['accuracy'])
``` |
{
"source": "353622088/hairstyle_try",
"score": 2
} |
#### File: hairstyle_try/script/main.py
```python
from PIL import Image
import os
import scipy.io as scio
import numpy as np
import cv2
import functools
import time
from common.config import DESKTOP as Desktop
from common.utils import get_baseInfo_tx
def time_cal(func):
@functools.wraps(func)
def wrapper(*args, **kw):
t1 = time.time()
r = func(*args, **kw) # 先让函数运行一次,防止直接输出,将其赋值给一个变量
if time.time() - t1 > 0.001:
print('函数%s执行的时间为:%f' % (func.__name__, time.time() - t1))
return r
return wrapper
@time_cal
def get_landmark_dict(file_path):
mat_file = file_path.split(".")[0] + '.mat'
if os.path.exists(mat_file):
landmark_dict = scio.loadmat(mat_file)
else:
landmark_dict = get_baseInfo_tx(file_path)
if landmark_dict['roll'] != 0:
Image.open(file_path).rotate(-landmark_dict['roll']).save(file_path)
landmark_dict = get_baseInfo_tx(file_path)
scio.savemat(mat_file, landmark_dict)
return landmark_dict
@time_cal
def check_right_eye(points):
fixed_points = points.copy()
if points[0][0] < points[1][0]:
fixed_points[0] = points[4]
fixed_points[1] = points[3]
fixed_points[2] = points[2]
fixed_points[3] = points[1]
fixed_points[4] = points[0]
fixed_points[5] = points[7]
fixed_points[6] = points[6]
fixed_points[7] = points[5]
return fixed_points
@time_cal
def check_left_eye(points):
fixed_points = points.copy()
if points[0][0] > points[1][0]:
fixed_points[0] = points[4]
fixed_points[1] = points[5]
fixed_points[2] = points[6]
fixed_points[3] = points[7]
fixed_points[4] = points[0]
fixed_points[5] = points[1]
fixed_points[6] = points[2]
fixed_points[7] = points[3]
return fixed_points
@time_cal
def check_face_profile(points):
# fixed_points = points[16:37]
# v_x = 2 * points[10][0]
#
# left_p = [[v_x - p[0], p[1]] for p in fixed_points[11:][::-1]]
# right_p = [[v_x - p[0], p[1]] for p in fixed_points[:10][::-1]]
# merge_p = np.vstack((left_p, fixed_points[10]))
# merge_p = np.vstack((merge_p, right_p))
# fixed_points = (fixed_points + merge_p) / 2
#
# m1 = get_similarity_matrix(fixed_points, merge_p,True)
# fixed_points2 = landmark_trans_by_m(points, m1)
# print(m1)
return points
@time_cal
def get_points(landmark_dict):
'''
:param landmark_dict:
:return:左眼0-7 左眉8-15 脸16-36 鼻子37-49 嘴50-71 右眉72-79 右眼80-87 88-89左右眼球
'''
def _get_eye_center(points):
eye_center = [(points[0] + points[4])[0] // 2, (points[2] + points[6])[1] // 2]
return eye_center
p0 = np.vstack([check_left_eye(landmark_dict['left_eye']), landmark_dict['left_eyebrow']])
p1 = np.vstack([p0, landmark_dict['face_profile']])
p2 = np.vstack([p1, landmark_dict['nose']])
p3 = np.vstack([p2, landmark_dict['mouth']])
p4 = np.vstack([p3, landmark_dict['right_eyebrow']])
p5 = np.vstack([p4, check_right_eye(landmark_dict['right_eye'])])
p6 = np.vstack([p5, [_get_eye_center(landmark_dict['left_eye']), _get_eye_center(landmark_dict['right_eye'])]])
p6 = check_face_profile(p6)
return p6, [tuple(p) for p in p6]
@time_cal
def get_similarity_matrix(orange_points, tree_points, fullAffine=False):
'''
dst->src 的变换矩阵
:param dst_points: 目标特征点
:param src_points: 底图特征点
:return: matrix
'''
m = cv2.estimateRigidTransform(np.array(orange_points), np.array(tree_points), fullAffine)
if m is None:
print('异常')
m = cv2.getAffineTransform(np.float32(orange_points[:3]), np.float32(tree_points[:3]))
return m
@time_cal
def save_img(img_array, save_name, ifsave):
if ifsave:
cv2.imwrite(save_name, img_array)
@time_cal
def landmark_trans_by_m(points, m):
p1 = np.transpose(points, [1, 0])
p2 = np.pad(p1, ((0, 1), (0, 0)), 'constant', constant_values=(1, 1))
p3 = np.matmul(m, p2)
p4 = np.transpose(p3, [1, 0])
return p4
@time_cal
def get_measure_triangle():
triangles = scio.loadmat("triangle_matrix.mat")['triangle']
return [list(t.astype(np.int32)) for t in triangles]
@time_cal
def get_measure_triangle_skin():
triangles = scio.loadmat("triangle_matrix_skin_nose.mat")['triangle']
return [list(t.astype(np.int32)) for t in triangles]
@time_cal
def affine_transform(src, src_tri, dst_tri, size):
warp_mat = cv2.getAffineTransform(np.float32(src_tri), np.float32(dst_tri))
# warp_mat = cv2.estimateRigidTransform(np.array(src_tri), np.array(dst_tri), True)
dst = cv2.warpAffine(src, warp_mat, (size[0], size[1]),
None,
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return dst
@time_cal
def morph_triangle(src, dst, img, face_mask, t_src, t_dst, t, base_alpha, step=0):
# t_src, t_dst, t
# 分别为特征点的三角形坐标
r1 = cv2.boundingRect(np.float32([t_src]))
r2 = cv2.boundingRect(np.float32([t_dst]))
r = cv2.boundingRect(np.float32([t]))
# 获取三角形的凸包正方形 格式 xmin,ymin,wid,height
t1_rect = []
t2_rect = []
t_rect = []
for i in range(0, 3):
t_rect.append(((t[i][0] - r[0]), (t[i][1] - r[1])))
t1_rect.append(((t_src[i][0] - r1[0]), (t_src[i][1] - r1[1])))
t2_rect.append(((t_dst[i][0] - r2[0]), (t_dst[i][1] - r2[1])))
# 将坐标转换为相对正方形左上角坐标
mask = np.zeros((r[3], r[2], 3), dtype=np.float32)
# 包含剖分三角形的正方形区域
cv2.fillConvexPoly(mask, np.int32(t_rect), (1., 1., 1.))
# 填充剖分三角形
img1_rect = src[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
img2_rect = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]]
size = (r[2], r[3])
warp_img_src = affine_transform(img1_rect, t1_rect, t_rect, size)
warp_img_dst = affine_transform(img2_rect, t2_rect, t_rect, size)
# alpha = 0.5 if step > 49 else alpha
if step < 16:
# print('眼睛')
alpha = min(1.25 * base_alpha, 1.0)
elif step < 28:
# print('鼻子')
alpha = min(1.0 * base_alpha, 1.0)
elif step < 40:
# print('眉毛')
alpha = min(1.13 * base_alpha, 1.0)
elif step < 50:
# print('眉毛')
alpha = min(1.25 * base_alpha, 1.0)
else:
alpha = min(1.0 * base_alpha, 1.0)
img_rect = (1.0 - alpha) * warp_img_src + alpha * warp_img_dst
img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] = img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] * (1 - mask) + img_rect * mask
face_mask[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] = face_mask[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] * (
1 - mask[:, :, 0]) + 255 * mask[:, :, 0]
return img, face_mask
@time_cal
def affine_triangle(src, src2, dst, dst2, t_src, t_dst):
r1 = cv2.boundingRect(np.float32([t_src]))
r2 = cv2.boundingRect(np.float32([t_dst]))
t1_rect = []
t2_rect = []
t2_rect_int = []
for i in range(0, 3):
t1_rect.append((t_src[i][0] - r1[0], t_src[i][1] - r1[1]))
t2_rect.append((t_dst[i][0] - r2[0], t_dst[i][1] - r2[1]))
t2_rect_int.append((t_dst[i][0] - r2[0], t_dst[i][1] - r2[1]))
mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)
cv2.fillConvexPoly(mask, np.int32(t2_rect_int), (1.0, 1.0, 1.0))
img1_rect = src[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
size = (r2[2], r2[3])
if src2:
alpha_img1_rect = src2[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
alpha_img2_rect = affine_transform(alpha_img1_rect, t1_rect, t2_rect, size)
alpha_img2_rect = alpha_img2_rect * mask
img2_rect = affine_transform(img1_rect, t1_rect, t2_rect, size)
img2_rect = img2_rect * mask
# (1620, 280, 3)
# (800, 0, 820, 1620)
dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] * (
(1.0, 1.0, 1.0) - mask)
dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] + img2_rect
if dst2:
dst2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = dst2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] * (
(1.0, 1.0, 1.0) - mask)
dst2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = dst2[r2[1]:r2[1] + r2[3],
r2[0]:r2[0] + r2[2]] + alpha_img2_rect
@time_cal
def morph_img(tree_img, tree_points, orange_img, orange_points, alpha):
def _get_morph_points(_tree_points, _orange_points, alphas):
'''
:param src_points:
:param dst_points:
:param alphas: eye_alpha, face_alpha, other_alpha 分别为dst 占据的比例
:return:
'''
eye_alpha, face_alpha, other_alpha = alphas
_morph_points = (1 - other_alpha) * _tree_points + other_alpha * _orange_points
other_alpha2 = .5
_mask_points = (1 - other_alpha2) * _tree_points + other_alpha2 * _orange_points
eye_points = (1 - eye_alpha) * _tree_points + eye_alpha * _orange_points
face_points = (1 - face_alpha) * _tree_points + face_alpha * _orange_points
m1 = get_similarity_matrix(_morph_points[0:8] - _morph_points[88], eye_points[0:8] - eye_points[88])
_morph_points[0:8] = landmark_trans_by_m(_morph_points[0:8] - _morph_points[88], m1) + _morph_points[88]
m2 = get_similarity_matrix(_morph_points[80:88] - _morph_points[89], eye_points[80:88] - eye_points[89])
_morph_points[80:88] = landmark_trans_by_m(_morph_points[80:88] - _morph_points[89], m2) + _morph_points[89]
m3 = get_similarity_matrix(_morph_points[16:37] - _morph_points[26], face_points[16:37] - face_points[26])
_morph_points[16:37] = landmark_trans_by_m(_morph_points[16:37] - _morph_points[26], m3) + _morph_points[26]
return _mask_points, _morph_points,
tree_img = tree_img.astype(np.float32)
orange_img = orange_img.astype(np.float32)
res_img = np.zeros(tree_img.shape, dtype=tree_img.dtype)
_face_mask = np.zeros(orange_img.shape[:2], dtype=np.uint8)
mask_points, morph_points_ = _get_morph_points(tree_points, orange_points, alpha[:3])
# morph_points = dst_points
# src_point 格式[(),()]
# 根据88点获取149个三角剖分对应的88点的index
dt = get_measure_triangle()[47:]
for i in range(0, len(dt)):
t1 = []
t2 = []
t = []
for j in range(0, 3):
t1.append(tree_points[dt[i][j]])
t2.append(orange_points[dt[i][j]])
t.append(mask_points[dt[i][j]])
_, face_maskk = morph_triangle(tree_img, orange_img, res_img, _face_mask, t1, t2, t, alpha[3], i)
return res_img, morph_points_, face_maskk
@time_cal
def tran_src(tree_img, alpha_tree_img, tree_points, orange_points):
"""
应用三角仿射转换将模板图人脸轮廓仿射成目标图像人脸轮廓
:param src_img:
:param src_points:
:param dst_points:
:param face_area:
:return:
"""
h, w, c = tree_img.shape
h -= 1
w -= 1
mask_area = cv2.boundingRect(np.float32([orange_points]))
start_x = max(.9 * mask_area[0], 1)
start_y = max(.9 * mask_area[1], 1)
end_x = min(start_x + 1.2 * mask_area[2], w - 10)
end_y = min(start_y + 1.2 * mask_area[3], h - 10)
sum_x = start_x + end_x
sum_y = start_y + end_y
bound_area = np.int32([
[start_x, start_y], [end_x, start_y], [end_x, end_y], [start_x, end_y],
[0, 0], [w, 0], [w, h], [0, h],
[0.5 * sum_x, start_y], [end_x, 0.5 * sum_y], [0.5 * sum_x, end_y], [start_x, 0.5 * sum_y]
])
tree_list = np.vstack([tree_points, bound_area])
orange_list = np.vstack([orange_points, bound_area])
res_img = np.zeros(tree_img.shape, dtype=tree_img.dtype)
alpha_res_img = np.zeros(alpha_tree_img.shape, dtype=alpha_tree_img.dtype) if alpha_tree_img else ''
dt = get_measure_triangle()
for i in range(0, len(dt)):
t_src = []
t_dst = []
for j in range(0, 3):
t_src.append(tree_list[dt[i][j]])
t_dst.append(orange_list[dt[i][j]])
affine_triangle(tree_img, alpha_tree_img, res_img, alpha_res_img, t_src, t_dst)
return res_img, alpha_res_img
@time_cal
def merge_img(orange_img, tree_img, face_mask, orange_points, mat_rate=.88):
r = cv2.boundingRect(np.float32([orange_points]))
center = (r[0] + int(r[2] / 2), r[1] + int(int(r[3] / 2)))
mat = cv2.getRotationMatrix2D(center, 0, mat_rate)
face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))
# face_mask = cv2.blur(face_mask, (3, 3))
# face_mask = cv2.GaussianBlur(face_mask, (27, 27), 1)
# kernel = np.ones((60, 60), np.uint8)
# face_mask = cv2.dilate(face_mask, kernel) # 膨胀
# face_mask = cv2.erode(face_mask, kernel) # 腐蚀
# face_mask = cv2.medianBlur(face_mask, 19)
return cv2.seamlessClone(np.uint8(orange_img), np.uint8(tree_img), face_mask, center, 1)
@time_cal
def toushi_img(orange_img, orange_points, tree_points, yaw=0):
if abs(yaw) <= 5:
rate = 0.1
else:
rate = min(abs(yaw), 12) / 12
_tree = rate * tree_points + (1 - rate) * orange_points
pts1 = np.float32([orange_points[17], orange_points[18], orange_points[34], orange_points[35]])
pts2 = np.float32([_tree[17], _tree[18], _tree[34], _tree[35]])
M = cv2.getPerspectiveTransform(pts1, pts2)
p2 = np.pad(orange_points, ((0, 0), (0, 1)), 'constant', constant_values=(1, 1))
new_data1 = np.matmul(p2, M.T)
new_data1 = new_data1 / np.repeat(new_data1[:, 2:3], 3, axis=1)
new_orange_points = new_data1[:, :2]
new_orange_img = cv2.warpPerspective(orange_img, M, (2 * orange_img.shape[1], 2 * orange_img.shape[0]))
return new_orange_img, new_orange_points
@time_cal
def resize_img(img_array, fusion_face_wid):
img_array = img_array[..., [2, 1, 0, 3]]
img = Image.fromarray(np.uint8(img_array), "RGBA")
wid, hei = img.size
std_face_wid = 257
fixed_loc = [500, 500]
# rate = std_face_wid / fusion_face_wid
# 可优化更合理的对比指标
rate = max(0.93, std_face_wid / fusion_face_wid)
img = img.resize([int(rate * wid), int(rate * hei)])
wid2, hei2 = img.size
diff_x = abs(int((rate - 1) * fixed_loc[0]))
diff_y = abs(int((rate - 1) * fixed_loc[1]))
if wid2 <= wid:
rr = ((diff_y, wid - wid2 - diff_y), (diff_x, wid - wid2 - diff_x), (0, 0))
image = np.pad(np.array(img), rr, mode='constant', constant_values=(0, 0))
img = Image.fromarray(np.uint8(image))
else:
img = img.crop([diff_x, diff_y, diff_x + wid, diff_y + hei])
return img
@time_cal
def get_data_analysis(skin_ori):
skin_ori_flatten = skin_ori.reshape([-1, 1])
skin_ori_index = np.flatnonzero(skin_ori_flatten != 0)
skin_ori_value = skin_ori_flatten[skin_ori_index]
skin_ori_value_max = np.max(skin_ori_value)
skin_ori_value_std = np.std(skin_ori_value)
skin_ori_value_min = np.min(skin_ori_value)
skin_ori_value_mean = np.mean(skin_ori_value)
return skin_ori_value_mean, skin_ori_value_std, skin_ori_value_max, skin_ori_value_min
def make_mask(face_mask, t):
# t
# 分别为特征点的三角形坐标
r = cv2.boundingRect(np.float32([t]))
# 获取三角形的凸包正方形 格式 xmin,ymin,wid,height
t_rect = []
for i in range(0, 3):
t_rect.append(((t[i][0] - r[0]), (t[i][1] - r[1])))
# 将坐标转换为相对正方形左上角坐标
mask = np.zeros((r[3], r[2]), dtype=np.float32)
# 包含剖分三角形的正方形区域
cv2.fillConvexPoly(mask, np.int32(t_rect), 1)
# 填充剖分三角形
face_mask[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] = face_mask[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] * (
1 - mask) + 1 * mask
return face_mask
def get_data_analysis(skin_ori):
skin_ori_flatten = skin_ori.reshape([-1, 1])
skin_ori_index = np.flatnonzero(skin_ori_flatten != 0)
skin_ori_value = skin_ori_flatten[skin_ori_index]
skin_ori_value_max = np.max(skin_ori_value)
skin_ori_value_std = np.std(skin_ori_value)
skin_ori_value_min = np.min(skin_ori_value)
skin_ori_value_mean = np.mean(skin_ori_value)
return skin_ori_value_mean, skin_ori_value_std, skin_ori_value_max, skin_ori_value_min
def smooth_light(orange_img, arr_point_tree):
# 肤色区域
dt = get_measure_triangle_skin()[47:]
face_mask2 = np.zeros(orange_img.shape[:2], dtype=np.uint8)
for i in range(0, len(dt)):
t = []
for j in range(0, 3):
t.append(arr_point_tree[dt[i][j]])
face_mask = make_mask(face_mask2, t)
face_mask = np.array(face_mask, np.float32)
orange_img_hsv = cv2.cvtColor(orange_img, cv2.COLOR_BGR2HSV)
s = np.array(orange_img_hsv[:, :, 1], np.float32)
v = np.array(orange_img_hsv[:, :, 2], np.float32)
s_skin_ori = s * face_mask
v_skin_ori = v * face_mask
s_skin_ori_value_mean, s_skin_ori_value_std, s_skin_ori_value_max, s_skin_ori_value_min = get_data_analysis(
s_skin_ori)
v_skin_ori_value_mean, v_skin_ori_value_std, v_skin_ori_value_max, v_skin_ori_value_min = get_data_analysis(
v_skin_ori)
# 去除不均匀
# res_img_h = np.clip((h - h_skin_ori_value_mean) / h_skin_ori_value_std * 20 + 50, 16, 230)
res_img_s = np.clip((s - s_skin_ori_value_mean) / s_skin_ori_value_std * 20 + .95 * s_skin_ori_value_mean, 16, 250)
res_img_v = np.clip(
(v - v_skin_ori_value_mean) / v_skin_ori_value_std * .8 * v_skin_ori_value_std + .95 * v_skin_ori_value_mean,
16, 250)
# 解决太均匀
# res_img_s = np.clip(1.1 * res_img_s, 18, 250)
# res_img_v = np.clip(1.1 * res_img_v, 18, 250)
# 赋值回原图
orange_img_hsv[:, :, 1] = res_img_s
orange_img_hsv[:, :, 2] = res_img_v
orange_img_hsv2 = cv2.cvtColor(orange_img_hsv, cv2.COLOR_HSV2BGR)
# 组合成最终图片
orange_img_hsv = orange_img * (1 - face_mask[:, :, None]) + orange_img_hsv2 * face_mask[:, :, None]
return np.uint8(orange_img_hsv)
@time_cal
def fusion(orange_path, orange_dict, temp_id, ifsave=True):
file_name = os.path.basename(orange_path).split('.')[0]
tree_file = "{}/Templates/{}/ori.jpg".format(Desktop, temp_id)
landmark_dict_tree = get_landmark_dict(tree_file)
arr_point_tree, list_point_tree = get_points(landmark_dict_tree)
tree_left_eye_center = arr_point_tree[88]
tree_right_eye_center = arr_point_tree[89]
# tree2 = cv2.imread(tree_file, cv2.IMREAD_UNCHANGED)
tree = cv2.imread(tree_file, cv2.IMREAD_COLOR)
tree_center = (tree_right_eye_center + tree_left_eye_center) / 2
tree_eye_dis = (tree_right_eye_center - tree_left_eye_center)[0]
# ---------------------------------------------------------#
# landmark_dict_orange = get_landmark_dict(orange_path)
# landmark_dict_orange = orange_dict
arr_point_orange, list_point_orange = get_points(orange_dict)
orange = cv2.imread(orange_path, cv2.IMREAD_COLOR)
# from script.mask_face_mask import kk
# orange = kk(orange_path)
orange = smooth_light(orange, arr_point_orange)
save_img(orange, '2-toushied_orange.png', ifsave)
# orange = cv2.cvtColor(orange, cv2.COLOR_BGR2HSV)
# orange[:, :, 1] = np.uint8(np.clip(1.1 * np.array(orange[:, :, 1], np.float32), 10, 250))
# orange[:, :, 2] = np.uint8(np.clip(1.1 * np.array(orange[:, :, 2], np.float32), 10, 250))
# orange = cv2.cvtColor(orange, cv2.COLOR_HSV2BGR)
orange, arr_point_orange = toushi_img(orange, arr_point_orange, arr_point_tree, yaw=orange_dict['yaw'])
save_img(orange, '2-toushied_orange.png', ifsave)
# arr_point_orange 90*2
orange_left_eye_center = arr_point_orange[88]
orange_right_eye_center = arr_point_orange[89]
orange_center = (orange_right_eye_center + orange_left_eye_center) / 2
orange_eye_dis = (orange_right_eye_center - orange_left_eye_center)[0]
# ---------------------------------------------------------#
# 矫正orange位置与tree对齐
orange2tree_matrix = get_similarity_matrix(
orange_points=[orange_left_eye_center, orange_right_eye_center,
[orange_center[0], orange_center[1] + orange_eye_dis],
[orange_center[0], orange_center[1] - orange_eye_dis]],
tree_points=[tree_left_eye_center, tree_right_eye_center,
[tree_center[0], tree_center[1] + tree_eye_dis],
[tree_center[0], tree_center[1] - tree_eye_dis]], fullAffine=False)
# 矫正后的orange图
orange_trans = cv2.warpAffine(orange, orange2tree_matrix, (tree.shape[1], tree.shape[0]))
save_img(orange_trans, '3-orange_trans.png'.format(file_name), ifsave)
# 矫正后的orange特征点
arr_point_orange_trans = landmark_trans_by_m(arr_point_orange, orange2tree_matrix)
# 将orange目标区域扣取1出来,进行比例重组
orange_mask_trans, morph_points, orange_mask = morph_img(tree, arr_point_tree, orange_trans, arr_point_orange_trans,
alpha=[.2, .2, .2, .85]) # 眼睛,脸,other
save_img(orange_mask, '4-orange_mask.png'.format(file_name), ifsave)
save_img(orange_mask_trans, '4-orange_mask_trans.png'.format(file_name), ifsave)
# 将Tree进行形变(主要是脸型轮廓)
tree_trans, alpha_tree_trans = tran_src(tree, '', arr_point_tree, morph_points)
save_img(tree_trans, '5-tree_trans.png'.format(file_name), ifsave)
rgb_img = merge_img(orange_mask_trans, np.uint8(tree_trans), orange_mask, morph_points, .88)
# rgb_img = merge_img(orange_mask_trans, np.uint8(rgb_img), orange_mask, morph_points, .8)
# save_img(orange_mask, '6-tree_trans.png'.format(file_name), ifsave)
return rgb_img
if __name__ == '__main__':
root_dir = os.path.join(Desktop, "Templates", "test_samples")
test_file = os.path.join(root_dir, '9.png')
landmark_dict_orange = get_landmark_dict(test_file)
for i in range(8, 14):
temp_id = 'temp' + str(i)
res = fusion(test_file, landmark_dict_orange, temp_id, True)
save_path = os.path.join(root_dir, '9-{}.jpg'.format(temp_id))
save_img(res, save_path, True)
```
#### File: hairstyle_try/script/make_template.py
```python
import os
import scipy.io as scio
import time
from PIL import Image
import functools
from common.utils import get_baseInfo_tx
def time_cal(func):
@functools.wraps(func)
def wrapper(*args, **kw):
t1 = time.time()
r = func(*args, **kw) # 先让函数运行一次,防止直接输出,将其赋值给一个变量
if time.time() - t1 > 0.001:
print('函数%s执行的时间为:%f' % (func.__name__, time.time() - t1))
return r
return wrapper
@time_cal
def get_landmark_dict(file_path):
mat_file = file_path.split(".")[0] + '.mat'
if os.path.exists(mat_file):
landmark_dict = scio.loadmat(mat_file)
else:
landmark_dict = get_baseInfo_tx(file_path)
# if landmark_dict['roll'] != 0:
# Image.open(file_path).rotate(-landmark_dict['roll']).save(file_path)
# landmark_dict = get_baseInfo_tx(file_path)
scio.savemat(mat_file, landmark_dict)
return landmark_dict
if __name__ == '__main__':
'''
将上传模板标准处理化
'''
root_dir = 'F:\project\dxq\hairstyle_try/resource/temp6'
back_file = os.path.join(root_dir, 'ori.jpg')
get_landmark_dict(back_file)
```
#### File: hairstyle_try/service/fusion_service.py
```python
from PIL import Image
import os
import scipy.io as scio
import numpy as np
import cv2
import functools
import time
import urllib.request
from common.utils import get_baseInfo_tx
skin_triangles = scio.loadmat("resource/mat/triangle_matrix_skin_nose.mat")['triangle']
triangles = scio.loadmat("resource/mat/triangle_matrix.mat")['triangle']
def time_cal(func):
@functools.wraps(func)
def wrapper(*args, **kw):
t1 = time.time()
r = func(*args, **kw) # 先让函数运行一次,防止直接输出,将其赋值给一个变量
if time.time() - t1 > 0.001:
print('函数%s执行的时间为:%f' % (func.__name__, time.time() - t1))
return r
return wrapper
def get_landmark_dict(file_path, status='local'):
landmark_dict = get_baseInfo_tx(file_path, status)
# if landmark_dict['roll'] != 0:
# Image.open(file_path).rotate(-landmark_dict['roll']).save(file_path)
# landmark_dict = get_baseInfo_tx(file_path)
return landmark_dict
def get_temp_landmark_dict(file_path):
mat_file = file_path.split(".")[0] + '.mat'
if os.path.exists(mat_file):
landmark_dict = scio.loadmat(mat_file)
else:
landmark_dict = get_baseInfo_tx(file_path)
scio.savemat(mat_file, landmark_dict)
return landmark_dict
def check_right_eye(points):
fixed_points = points.copy()
if points[0][0] < points[1][0]:
fixed_points[0] = points[4]
fixed_points[1] = points[3]
fixed_points[2] = points[2]
fixed_points[3] = points[1]
fixed_points[4] = points[0]
fixed_points[5] = points[7]
fixed_points[6] = points[6]
fixed_points[7] = points[5]
return fixed_points
def check_left_eye(points):
fixed_points = points.copy()
if points[0][0] > points[1][0]:
fixed_points[0] = points[4]
fixed_points[1] = points[5]
fixed_points[2] = points[6]
fixed_points[3] = points[7]
fixed_points[4] = points[0]
fixed_points[5] = points[1]
fixed_points[6] = points[2]
fixed_points[7] = points[3]
return fixed_points
def check_face_profile(points):
# fixed_points = points[16:37]
# v_x = 2 * points[10][0]
#
# left_p = [[v_x - p[0], p[1]] for p in fixed_points[11:][::-1]]
# right_p = [[v_x - p[0], p[1]] for p in fixed_points[:10][::-1]]
# merge_p = np.vstack((left_p, fixed_points[10]))
# merge_p = np.vstack((merge_p, right_p))
# fixed_points = (fixed_points + merge_p) / 2
#
# m1 = get_similarity_matrix(fixed_points, merge_p,True)
# fixed_points2 = landmark_trans_by_m(points, m1)
# print(m1)
return points
def get_points(landmark_dict):
'''
:param landmark_dict:
:return:左眼0-7 左眉8-15 脸16-36 鼻子37-49 嘴50-71 右眉72-79 右眼80-87 88-89左右眼球
'''
def _get_eye_center(points):
eye_center = [(points[0] + points[4])[0] // 2, (points[2] + points[6])[1] // 2]
return eye_center
p0 = np.vstack([check_left_eye(landmark_dict['left_eye']), landmark_dict['left_eyebrow']])
p1 = np.vstack([p0, landmark_dict['face_profile']])
p2 = np.vstack([p1, landmark_dict['nose']])
p3 = np.vstack([p2, landmark_dict['mouth']])
p4 = np.vstack([p3, landmark_dict['right_eyebrow']])
p5 = np.vstack([p4, check_right_eye(landmark_dict['right_eye'])])
p6 = np.vstack([p5, [_get_eye_center(landmark_dict['left_eye']), _get_eye_center(landmark_dict['right_eye'])]])
p6 = check_face_profile(p6)
return p6, [tuple(p) for p in p6]
def get_similarity_matrix(orange_points, tree_points, fullAffine=False):
'''
dst->src 的变换矩阵
:param dst_points: 目标特征点
:param src_points: 底图特征点
:return: matrix
'''
m = cv2.estimateRigidTransform(np.array(orange_points), np.array(tree_points), fullAffine)
if m is None:
print('异常')
m = cv2.getAffineTransform(np.float32(orange_points[:3]), np.float32(tree_points[:3]))
return m
def save_img(img_array, save_name, ifsave):
if ifsave:
cv2.imwrite(save_name, img_array)
def landmark_trans_by_m(points, m):
p1 = np.transpose(points, [1, 0])
p2 = np.pad(p1, ((0, 1), (0, 0)), 'constant', constant_values=(1, 1))
p3 = np.matmul(m, p2)
p4 = np.transpose(p3, [1, 0])
return p4
def get_measure_triangle():
return [list(t.astype(np.int32)) for t in triangles]
def get_measure_triangle_skin():
return [list(t.astype(np.int32)) for t in skin_triangles]
def affine_transform(src, src_tri, dst_tri, size):
warp_mat = cv2.getAffineTransform(np.float32(src_tri), np.float32(dst_tri))
# warp_mat = cv2.estimateRigidTransform(np.array(src_tri), np.array(dst_tri), True)
dst = cv2.warpAffine(src, warp_mat, (size[0], size[1]),
None,
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return dst
def morph_triangle(src, dst, img, face_mask, t_src, t_dst, t, base_alpha, step=0):
# t_src, t_dst, t
# 分别为特征点的三角形坐标
r1 = cv2.boundingRect(np.float32([t_src]))
r2 = cv2.boundingRect(np.float32([t_dst]))
r = cv2.boundingRect(np.float32([t]))
# 获取三角形的凸包正方形 格式 xmin,ymin,wid,height
t1_rect = []
t2_rect = []
t_rect = []
for i in range(0, 3):
t_rect.append(((t[i][0] - r[0]), (t[i][1] - r[1])))
t1_rect.append(((t_src[i][0] - r1[0]), (t_src[i][1] - r1[1])))
t2_rect.append(((t_dst[i][0] - r2[0]), (t_dst[i][1] - r2[1])))
# 将坐标转换为相对正方形左上角坐标
mask = np.zeros((r[3], r[2], 3), dtype=np.float32)
# 包含剖分三角形的正方形区域
cv2.fillConvexPoly(mask, np.int32(t_rect), (1., 1., 1.))
# 填充剖分三角形
img1_rect = src[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
img2_rect = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]]
size = (r[2], r[3])
warp_img_src = affine_transform(img1_rect, t1_rect, t_rect, size)
warp_img_dst = affine_transform(img2_rect, t2_rect, t_rect, size)
# alpha = 0.5 if step > 49 else alpha
if step < 16:
# print('眼睛')
alpha = min(1.25 * base_alpha, 1.0)
elif step < 28:
# print('鼻子')
alpha = min(1.0 * base_alpha, 1.0)
elif step < 40:
# print('眉毛')
alpha = min(1.13 * base_alpha, 1.0)
elif step < 50:
# print('眉毛')
alpha = min(1.25 * base_alpha, 1.0)
else:
alpha = min(1.0 * base_alpha, 1.0)
img_rect = (1.0 - alpha) * warp_img_src + alpha * warp_img_dst
img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] = img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] * (1 - mask) + img_rect * mask
face_mask[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] = face_mask[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] * (
1 - mask[:, :, 0]) + 255 * mask[:, :, 0]
return img, face_mask
def affine_triangle(src, dst, t_src, t_dst):
r1 = cv2.boundingRect(np.float32([t_src]))
r2 = cv2.boundingRect(np.float32([t_dst]))
t1_rect = []
t2_rect = []
t2_rect_int = []
for i in range(0, 3):
t1_rect.append((t_src[i][0] - r1[0], t_src[i][1] - r1[1]))
t2_rect.append((t_dst[i][0] - r2[0], t_dst[i][1] - r2[1]))
t2_rect_int.append((t_dst[i][0] - r2[0], t_dst[i][1] - r2[1]))
mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)
cv2.fillConvexPoly(mask, np.int32(t2_rect_int), (1.0, 1.0, 1.0))
img1_rect = src[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
size = (r2[2], r2[3])
img2_rect = affine_transform(img1_rect, t1_rect, t2_rect, size)
img2_rect = img2_rect * mask
# (1620, 280, 3)
# (800, 0, 820, 1620)
dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] * (
(1.0, 1.0, 1.0) - mask)
dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = dst[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] + img2_rect
def morph_img(tree_img, tree_points, orange_img, orange_points, alpha):
def _get_morph_points(_tree_points, _orange_points, alphas):
'''
:param src_points:
:param dst_points:
:param alphas: eye_alpha, face_alpha, other_alpha 分别为dst 占据的比例
:return:
'''
eye_alpha, face_alpha, other_alpha = alphas
_morph_points = (1 - other_alpha) * _tree_points + other_alpha * _orange_points
other_alpha2 = .5
_mask_points = (1 - other_alpha2) * _tree_points + other_alpha2 * _orange_points
eye_points = (1 - eye_alpha) * _tree_points + eye_alpha * _orange_points
face_points = (1 - face_alpha) * _tree_points + face_alpha * _orange_points
m1 = get_similarity_matrix(_morph_points[0:8] - _morph_points[88], eye_points[0:8] - eye_points[88])
_morph_points[0:8] = landmark_trans_by_m(_morph_points[0:8] - _morph_points[88], m1) + _morph_points[88]
m2 = get_similarity_matrix(_morph_points[80:88] - _morph_points[89], eye_points[80:88] - eye_points[89])
_morph_points[80:88] = landmark_trans_by_m(_morph_points[80:88] - _morph_points[89], m2) + _morph_points[89]
m3 = get_similarity_matrix(_morph_points[16:37] - _morph_points[26], face_points[16:37] - face_points[26])
_morph_points[16:37] = landmark_trans_by_m(_morph_points[16:37] - _morph_points[26], m3) + _morph_points[26]
return _mask_points, _morph_points,
tree_img = tree_img.astype(np.float32)
orange_img = orange_img.astype(np.float32)
res_img = np.zeros(tree_img.shape, dtype=tree_img.dtype)
_face_mask = np.zeros(orange_img.shape[:2], dtype=np.uint8)
mask_points, morph_points_ = _get_morph_points(tree_points, orange_points, alpha[:3])
# morph_points = dst_points
# src_point 格式[(),()]
# 根据88点获取149个三角剖分对应的88点的index
dt = get_measure_triangle()[47:]
for i in range(0, len(dt)):
t1 = []
t2 = []
t = []
for j in range(0, 3):
t1.append(tree_points[dt[i][j]])
t2.append(orange_points[dt[i][j]])
t.append(mask_points[dt[i][j]])
_, face_maskk = morph_triangle(tree_img, orange_img, res_img, _face_mask, t1, t2, t, alpha[3], i)
return res_img, morph_points_, face_maskk
def tran_src(tree_img, tree_points, orange_points):
"""
应用三角仿射转换将模板图人脸轮廓仿射成目标图像人脸轮廓
:param src_img:
:param src_points:
:param dst_points:
:param face_area:
:return:
"""
h, w, c = tree_img.shape
h -= 1
w -= 1
mask_area = cv2.boundingRect(np.float32([orange_points]))
start_x = max(.9 * mask_area[0], 1)
start_y = max(.9 * mask_area[1], 1)
end_x = min(start_x + 1.2 * mask_area[2], w - 10)
end_y = min(start_y + 1.2 * mask_area[3], h - 10)
sum_x = start_x + end_x
sum_y = start_y + end_y
bound_area = np.int32([
[start_x, start_y], [end_x, start_y], [end_x, end_y], [start_x, end_y],
[0, 0], [w, 0], [w, h], [0, h],
[0.5 * sum_x, start_y], [end_x, 0.5 * sum_y], [0.5 * sum_x, end_y], [start_x, 0.5 * sum_y]
])
tree_list = np.vstack([tree_points, bound_area])
orange_list = np.vstack([orange_points, bound_area])
res_img = np.zeros(tree_img.shape, dtype=tree_img.dtype)
dt = get_measure_triangle()
for i in range(0, len(dt)):
t_src = []
t_dst = []
for j in range(0, 3):
t_src.append(tree_list[dt[i][j]])
t_dst.append(orange_list[dt[i][j]])
affine_triangle(tree_img, res_img, t_src, t_dst)
return res_img
def merge_img(orange_img, tree_img, face_mask, orange_points, mat_rate=.88):
r = cv2.boundingRect(np.float32([orange_points]))
center = (r[0] + int(r[2] / 2), r[1] + int(int(r[3] / 2)))
mat = cv2.getRotationMatrix2D(center, 0, mat_rate)
face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))
# face_mask = cv2.blur(face_mask, (3, 3))
# face_mask = cv2.GaussianBlur(face_mask, (27, 27), 1)
# kernel = np.ones((60, 60), np.uint8)
# face_mask = cv2.dilate(face_mask, kernel) # 膨胀
# face_mask = cv2.erode(face_mask, kernel) # 腐蚀
# face_mask = cv2.medianBlur(face_mask, 19)
res = cv2.seamlessClone(np.uint8(orange_img), np.uint8(tree_img), face_mask, center, 1)
return res
def toushi_img(orange_img, orange_points, tree_points, yaw=0):
if abs(yaw) <= 5:
rate = 0.1
else:
rate = min(abs(yaw), 12) / 12
_tree = rate * tree_points + (1 - rate) * orange_points
pts1 = np.float32([orange_points[17], orange_points[18], orange_points[34], orange_points[35]])
pts2 = np.float32([_tree[17], _tree[18], _tree[34], _tree[35]])
M = cv2.getPerspectiveTransform(pts1, pts2)
p2 = np.pad(orange_points, ((0, 0), (0, 1)), 'constant', constant_values=(1, 1))
new_data1 = np.matmul(p2, M.T)
new_data1 = new_data1 / np.repeat(new_data1[:, 2:3], 3, axis=1)
new_orange_points = new_data1[:, :2]
new_orange_img = cv2.warpPerspective(orange_img, M, (2 * orange_img.shape[1], 2 * orange_img.shape[0]))
return new_orange_img, new_orange_points
def resize_img(img_array, fusion_face_wid):
img_array = img_array[..., [2, 1, 0, 3]]
img = Image.fromarray(np.uint8(img_array), "RGBA")
wid, hei = img.size
std_face_wid = 257
fixed_loc = [500, 500]
# rate = std_face_wid / fusion_face_wid
# 可优化更合理的对比指标
rate = max(0.93, std_face_wid / fusion_face_wid)
img = img.resize([int(rate * wid), int(rate * hei)])
wid2, hei2 = img.size
diff_x = abs(int((rate - 1) * fixed_loc[0]))
diff_y = abs(int((rate - 1) * fixed_loc[1]))
if wid2 <= wid:
rr = ((diff_y, wid - wid2 - diff_y), (diff_x, wid - wid2 - diff_x), (0, 0))
image = np.pad(np.array(img), rr, mode='constant', constant_values=(0, 0))
img = Image.fromarray(np.uint8(image))
else:
img = img.crop([diff_x, diff_y, diff_x + wid, diff_y + hei])
return img
def get_data_analysis(skin_ori):
skin_ori_flatten = skin_ori.reshape([-1, 1])
skin_ori_index = np.flatnonzero(skin_ori_flatten != 0)
skin_ori_value = skin_ori_flatten[skin_ori_index]
skin_ori_value_max = np.max(skin_ori_value)
skin_ori_value_std = np.std(skin_ori_value)
skin_ori_value_min = np.min(skin_ori_value)
skin_ori_value_mean = np.mean(skin_ori_value)
return skin_ori_value_mean, skin_ori_value_std, skin_ori_value_max, skin_ori_value_min
def make_mask(face_mask, t):
# t
# 分别为特征点的三角形坐标
r = cv2.boundingRect(np.float32([t]))
# 获取三角形的凸包正方形 格式 xmin,ymin,wid,height
t_rect = []
for i in range(0, 3):
t_rect.append(((t[i][0] - r[0]), (t[i][1] - r[1])))
# 将坐标转换为相对正方形左上角坐标
mask = np.zeros((r[3], r[2]), dtype=np.float32)
# 包含剖分三角形的正方形区域
cv2.fillConvexPoly(mask, np.int32(t_rect), 1)
# 填充剖分三角形
face_mask[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] = face_mask[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] * (
1 - mask) + 1 * mask
return face_mask
def get_data_analysis(skin_ori):
skin_ori_flatten = skin_ori.reshape([-1, 1])
skin_ori_index = np.flatnonzero(skin_ori_flatten != 0)
skin_ori_value = skin_ori_flatten[skin_ori_index]
skin_ori_value_max = np.max(skin_ori_value)
skin_ori_value_std = np.std(skin_ori_value)
skin_ori_value_min = np.min(skin_ori_value)
skin_ori_value_mean = np.mean(skin_ori_value)
return skin_ori_value_mean, skin_ori_value_std, skin_ori_value_max, skin_ori_value_min
def smooth_light(orange_img, arr_point_tree):
# 肤色区域
dt = get_measure_triangle_skin()[47:]
face_mask2 = np.zeros(orange_img.shape[:2], dtype=np.uint8)
for i in range(0, len(dt)):
t = []
for j in range(0, 3):
t.append(arr_point_tree[dt[i][j]])
face_mask = make_mask(face_mask2, t)
face_mask = np.array(face_mask, np.float32)
orange_img_hsv = cv2.cvtColor(orange_img, cv2.COLOR_BGR2HSV)
s = np.array(orange_img_hsv[:, :, 1], np.float32)
v = np.array(orange_img_hsv[:, :, 2], np.float32)
s_skin_ori = s * face_mask
v_skin_ori = v * face_mask
s_skin_ori_value_mean, s_skin_ori_value_std, s_skin_ori_value_max, s_skin_ori_value_min = get_data_analysis(
s_skin_ori)
v_skin_ori_value_mean, v_skin_ori_value_std, v_skin_ori_value_max, v_skin_ori_value_min = get_data_analysis(
v_skin_ori)
# 去除不均匀
# res_img_h = np.clip((h - h_skin_ori_value_mean) / h_skin_ori_value_std * 20 + 50, 16, 230)
res_img_s = np.clip((s - s_skin_ori_value_mean) / s_skin_ori_value_std * 20 + .95 * s_skin_ori_value_mean, 16, 250)
res_img_v = np.clip(
(v - v_skin_ori_value_mean) / v_skin_ori_value_std * .8 * v_skin_ori_value_std + .95 * v_skin_ori_value_mean,
16, 250)
# 解决太均匀
# res_img_s = np.clip(1.1 * res_img_s, 18, 250)
# res_img_v = np.clip(1.1 * res_img_v, 18, 250)
# 赋值回原图
orange_img_hsv[:, :, 1] = res_img_s
orange_img_hsv[:, :, 2] = res_img_v
orange_img_hsv2 = cv2.cvtColor(orange_img_hsv, cv2.COLOR_HSV2BGR)
# 组合成最终图片
orange_img_hsv = orange_img * (1 - face_mask[:, :, None]) + orange_img_hsv2 * face_mask[:, :, None]
return np.uint8(orange_img_hsv)
def preprocess(landmark_dict):
for key in ['left_eye', 'left_eyebrow', 'face_profile', 'nose', 'mouth', 'right_eyebrow', 'right_eye']:
points = landmark_dict[key]
num = len(points)
data = np.zeros([num, 2])
data[:, 0] = [p[0] for p in points]
data[:, 1] = [p[1] for p in points]
landmark_dict[key] = data
return landmark_dict
def cv2ImreadUrlImg(url):
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return image
def make_sure(path):
fdir = path[:path.rfind('/')]
if not os.path.exists(fdir):
os.makedirs(fdir)
return True
def fusion(orange_path, orange_dict, temp_id='temp1'):
t0 = time.time()
orange_dict = preprocess(orange_dict)
# orange_dict = get_landmark_dict(orange_path)
file_name = os.path.basename(orange_path).split('.')[0]
tree_file = "resource/{}/ori.jpg".format(temp_id)
landmark_dict_tree = get_temp_landmark_dict(tree_file)
landmark_dict_tree = preprocess(landmark_dict_tree)
arr_point_tree, list_point_tree = get_points(landmark_dict_tree)
tree_left_eye_center = arr_point_tree[88]
tree_right_eye_center = arr_point_tree[89]
tree = cv2.imread(tree_file, cv2.IMREAD_COLOR)
tree_center = (tree_right_eye_center + tree_left_eye_center) / 2
tree_eye_dis = (tree_right_eye_center - tree_left_eye_center)[0]
# ---------------------------------------------------------#
arr_point_orange, list_point_orange = get_points(orange_dict)
# orange = cv2ImreadUrlImg(orange_path)
orange = cv2.imread(orange_path, cv2.IMREAD_COLOR)
orange = smooth_light(orange, arr_point_orange)
t1 = time.time()
print('1:::', t1 - t0)
# orange, arr_point_orange = toushi_img(orange, arr_point_orange, arr_point_tree, yaw=orange_dict['yaw'])
# arr_point_orange 90*2
orange_left_eye_center = arr_point_orange[88]
orange_right_eye_center = arr_point_orange[89]
orange_center = (orange_right_eye_center + orange_left_eye_center) / 2
orange_eye_dis = (orange_right_eye_center - orange_left_eye_center)[0]
# ---------------------------------------------------------#
# 矫正orange位置与tree对齐
orange2tree_matrix = get_similarity_matrix(
orange_points=[orange_left_eye_center, orange_right_eye_center,
[orange_center[0], orange_center[1] + orange_eye_dis],
[orange_center[0], orange_center[1] - orange_eye_dis]],
tree_points=[tree_left_eye_center, tree_right_eye_center,
[tree_center[0], tree_center[1] + tree_eye_dis],
[tree_center[0], tree_center[1] - tree_eye_dis]], fullAffine=False)
# 矫正后的orange图
orange_trans = cv2.warpAffine(orange, orange2tree_matrix, (tree.shape[1], tree.shape[0]))
# 矫正后的orange特征点
arr_point_orange_trans = landmark_trans_by_m(arr_point_orange, orange2tree_matrix)
# 将orange目标区域扣取1出来,进行比例重组
orange_mask_trans, morph_points, orange_mask = morph_img(tree, arr_point_tree, orange_trans, arr_point_orange_trans,
alpha=[.2, .2, .2, .85]) # 眼睛,脸,other
# 将Tree进行形变(主要是脸型轮廓)
tree_trans = tran_src(tree, arr_point_tree, morph_points)
rgb_img = merge_img(orange_mask_trans, np.uint8(tree_trans), orange_mask, morph_points, .88)
t2 = time.time()
print('2:::', t2 - t1)
# await gen.sleep(1)
local_path = "userImg/download/{}/{}_res.png".format(file_name, temp_id)
local_thum_path = "userImg/download/{}/{}_thum.png".format(file_name, temp_id)
make_sure(local_path)
res_img = Image.fromarray(np.uint8(rgb_img[..., [2, 1, 0]]))
res_img.save(local_path)
res_img.thumbnail((500, 500))
res_img.save(local_thum_path)
t3 = time.time()
print('3:::', t3 - t2)
return local_path, local_thum_path
``` |
{
"source": "353solutions/nlpy",
"score": 3
} |
#### File: nlpy/nlp/httpd.py
```python
from http import HTTPStatus
from flask import Flask, jsonify, request
import nlp
app = Flask(__name__)
def json_error(error, status_code=HTTPStatus.BAD_REQUEST):
resp = jsonify(error=error)
resp.status_code = status_code
return resp
@app.route('/tokenize', methods=['POST'])
def tokenize():
data = request.get_data()
if not data:
return json_error('empty request')
try:
text = data.decode('utf-8')
except UnicodeDecodeError as err:
return json_error(str(err))
return jsonify(tokens=nlp.tokenize(text))
@app.route('/health')
def health():
return jsonify(ok=True)
def main():
from os import environ
port = int(environ.get('NLPD_PORT', '8080'))
app.run(debug='DEBUG' in environ, port=port)
if __name__ == '__main__':
main()
``` |
{
"source": "355731090990871/dataplicity-agent",
"score": 2
} |
#### File: dataplicity-agent/dataplicity/device_meta.py
```python
from __future__ import unicode_literals
import logging
import platform
from .iptool import get_all_interfaces
from . import rpi
from ._version import __version__
log = logging.getLogger("agent")
# Cache the meta dict because it never changes
_META_CACHE = None
def get_meta():
"""Get a dict containing device meta information."""
global _META_CACHE
if _META_CACHE is not None:
return _META_CACHE.copy()
meta = {}
meta["agent_version"] = __version__
meta["machine_revision"] = rpi.get_machine_revision()
meta["os_version"] = get_os_version()
meta["uname"] = get_uname()
meta["ip_list"] = get_ip_address_list()
_META_CACHE = meta
return meta
def get_uname():
"""Get uname."""
# Preferable to running a system command
uname = " ".join(platform.uname())
return uname
def get_os_version():
"""Get the OS version."""
# Linux is a fair assumption for now
distro = " ".join(platform.linux_distribution()).strip()
return distro
def get_ip_address_list():
# Get the ip addresses from all the interfaces
try:
interfaces = get_all_interfaces()
except Exception:
log.exception("unable to retrieve interface information")
# Sorry for the pokemon exception, but I don't know how
# reliable the call is, and if it fails what it will fail with.
# It needs some exception handling or the whole get_meta call
# will fail
return []
return [i[1] for i in interfaces]
```
#### File: dataplicity/m2m/echoservice.py
```python
from __future__ import print_function
from __future__ import unicode_literals
import logging
import weakref
log = logging.getLogger('m2m')
class EchoService(object):
"""
M2M echo service.
Data will be sent back on the same channel.
"""
def __init__(self, channel):
# When the channel is closed, this object should go out of scope
self.channel = weakref.ref(channel)
channel.set_callbacks(on_data=self.on_data)
def on_data(self, data):
# Send data back
self.channel().write(data)
```
#### File: dataplicity/m2m/test_packetbase.py
```python
import pytest
from dataplicity.m2m.packetbase import (PacketFormatError,
UnknownPacketError)
from dataplicity.m2m.packets import CommandSendInstructionPacket, M2MPacket
from dataplicity.m2m.packets import PingPacket
@pytest.fixture
def cmd():
return CommandSendInstructionPacket(
command_id=CommandSendInstructionPacket.type.value,
node=b'\x01\x02',
data={
b'foo': b'bar',
b'baz': [
b'1', 2, {b'3': 4}
]
}
)
def test_from_bytes(cmd):
""" unit test for from_bytes factory
"""
# let's prepare some packet. SendInstructionPacket seems to be simple
# enough for the tests, but not overly simple to omit any branch of the
# code.
cmd_binary = cmd.encode_binary()
assert cmd.as_bytes == cmd_binary
decoded = CommandSendInstructionPacket.from_bytes(cmd_binary)
assert decoded.kwargs == cmd.kwargs
# ideally, we would compare the repr's, however the keys from dict dont
# have order. However, regardless of this, the lengths should be the same.
assert repr(decoded) is not None
assert len(repr(decoded)) == len(repr(cmd))
assert decoded.attributes == cmd.attributes
def test_invalid_binary_raises_decoding_error(cmd):
"""
"""
_bin = cmd.as_bytes[:-1]
with pytest.raises(PacketFormatError):
CommandSendInstructionPacket.from_bytes(_bin)
def test_nonlist_data_raises_formaterror():
"""
"""
_bin = b'i1e' # bencode for 1 (int)
with pytest.raises(PacketFormatError):
CommandSendInstructionPacket.from_bytes(_bin)
def test_nonint_packet_type_raises_formaterror():
"""
"""
_bin = b'l1:ae'
with pytest.raises(PacketFormatError):
CommandSendInstructionPacket.from_bytes(_bin)
def test_unknown_packet_type_raises_unknown_packeterror():
"""
"""
_bin = b'li-1ee'
with pytest.raises(UnknownPacketError):
M2MPacket.from_bytes(_bin)
def test_create_packet_dynamically(cmd):
""" The M2MPacket base class has the registry of all available packets.
We can utilize the .create factory to obtain dynamic packet by passing
necesary arguments.
"""
dynamic_cmd = M2MPacket.create(
packet_type=cmd.type.value,
node=b'a-node',
command_id=cmd.command_id,
data=cmd.data
)
assert isinstance(dynamic_cmd, CommandSendInstructionPacket)
# we can also check that specifying an unknown packet type yields an error.
with pytest.raises(ValueError):
M2MPacket.create(
packet_type=-1
)
def test_validation_of_init_params_works():
""" We can assert whether the base class checks for required attributes.
"""
# PingPacket has one attribute, therefore calling the factory without
# any parameters will yield an error
with pytest.raises(PacketFormatError) as e:
M2MPacket.create(packet_type=PingPacket.type)
assert str(e.value).startswith("missing attribute")
# similary, specifying invalid type will also throw an exception.
# the PingPacket has only one attribute of type bytes
with pytest.raises(PacketFormatError) as e:
M2MPacket.create(packet_type=PingPacket.type, data=["foo"])
assert str(e.value).startswith("parameter")
def test_get_method_args(cmd):
""" this method tests the functionality of splitting constructor parameters
into args and kwargs
"""
args, kwargs = cmd.get_method_args(3)
assert len(args) + len(kwargs.keys()) == len(cmd.attributes)
```
#### File: dataplicity/subcommands/test_run.py
```python
from dataplicity.subcommands.run import Run
from dataplicity.app import App
import mock
@mock.patch('dataplicity.client.Client.run_forever')
def test_run_command(run_forever, serial_file, auth_file):
""" unit test for run subcommand.
There are couple of caveats, which I will briefly describe here:
-> we don't want to literally call run_forever on the client, because
it would deadlock the tests. Because of that, we simply want to
check whether the function in question was called. As you can see,
there is a magic parameter called run_forever, which is tightly
coupled with the mock.patch decorator. It is a mocked copy of an
actual function from the module dataplicity.client.Client
-> We have to fake command-line arguments, because otherwise the App
module won't create a client for us - hence, instead of creating
a parser and parsing an empty string, we're hard-coding two url's
which are irrelevant to our test anyway.
"""
class FakeArgs(object):
""" fake stdargs object
"""
server_url = 'http://example.com'
m2m_url = 'ws://example.com'
serial = None
auth_token = None
app = App()
# set fake command-line args
app.args = FakeArgs()
# initiate subcommand
cmd = Run(app)
# execute subcommand. This should call run_forever on the client ...
cmd.run()
# ... as asserted here.
assert run_forever.call_count == 1
```
#### File: tests/dataplicity/test_client.py
```python
import pytest
from dataplicity import client as mclient
from dataplicity import device_meta
from mock import patch, call
from freezegun import freeze_time
from datetime import datetime
import random
def test_client_initialization(auth_file, serial_file):
""" this function tests 'succesful' initialization of the client
"""
mclient.Client()
def test_client_unsuccesful_init(tmpdir):
""" the client won't start if the file is missing.
serial file is read first, so we have to fake the location there in
order to raise IOError.
"""
non_existing_path = tmpdir.join("non-existing-file")
with patch(
'dataplicity.constants.SERIAL_LOCATION', str(non_existing_path)
):
with pytest.raises(IOError):
mclient.Client()
def test_system_exit_call(serial_file, auth_file, mocker):
""" test client initialization with error handling
"""
client = mclient.Client()
def poll_which_raises(self):
raise SystemExit
def poll_which_raises_keyboardint(self):
raise KeyboardInterrupt
# this attaches to client.close() method which should be called at the end
# of run_forever. The method won't be monkeypatched, but we'll be able
# to check whether the method was called or not.
mocker.spy(client, 'close')
with patch('dataplicity.client.Client.poll', poll_which_raises):
client.run_forever()
assert client.close.call_count == 1
with patch(
'dataplicity.client.Client.poll', poll_which_raises_keyboardint
):
client.run_forever()
assert client.close.call_count == 2
@freeze_time("2017-01-03 11:00:00", tz_offset=0)
def test_disk_poll(serial_file, auth_file):
""" test code for disk_poll
"""
client = mclient.Client()
client.disk_poll()
assert datetime.utcfromtimestamp(client.next_disk_poll_time) == \
datetime(2017, 1, 3, 12, 00)
def test_client_sync_id_generation(mocker):
""" check sync_id generation
"""
mocker.spy(random, 'choice')
sync_id = mclient.Client.make_sync_id()
assert len(sync_id) == 12
assert random.choice.call_args == call('abcdefghijklmnopqrstuvwxyz')
def test_client_sync_with_error(serial_file, auth_file, caplog, httpserver):
"""
"""
client = mclient.Client(rpc_url=httpserver.url)
client.sync()
# teardown for meta cache
device_meta._META_CACHE = None
assert 'sync failed' in caplog.text
```
#### File: tests/dataplicity/test_jsonrpc.py
```python
from json import dumps
import pytest
from dataplicity.jsonrpc import (JSONRPC, ErrorCode, InvalidResponseError,
ProtocolError, RemoteError, RemoteMethodError,
Batch)
import six
@pytest.fixture
def response():
return {
"jsonrpc": '2.0',
"id": 2
}
def test_call_id_increments(httpserver, response):
""" test code for incrementation of message id
"""
httpserver.serve_content(dumps(response))
client = JSONRPC(httpserver.url)
client.call('foo', bar='baz')
assert client.call_id == 2
def test_jsonrpc_client_errors(httpserver, response):
""" unit test for JSONRPC client code.
uses pytest-localserver plugin
"""
client = JSONRPC(httpserver.url)
httpserver.serve_content("invalid-json")
with pytest.raises(InvalidResponseError) as exc:
client.call('foo')
assert str(exc.value) == 'unable to decode response as JSON'
response['jsonrpc'] = '1'
httpserver.serve_content(dumps(response))
with pytest.raises(ProtocolError) as exc:
client.call('foo')
assert str(exc.value) == 'Client only understands JSONRPC v2.0'
del response['jsonrpc']
httpserver.serve_content(dumps(response))
with pytest.raises(ProtocolError) as exc:
client.call('foo')
assert str(exc.value) == 'Invalid response from server'
def test_that_id_in_response_must_match(httpserver, response):
""" test code for matching id
"""
response["id"] = 20
httpserver.serve_content(dumps(response))
client = JSONRPC(httpserver.url)
with pytest.raises(ProtocolError) as exc:
client.call("foo")
assert str(exc.value) == "Invalid response from the server, 'id' field does not match" # noqa
def test_remote_error(httpserver, response):
""" test code for handling RemoteError / RemoteMethodError
"""
response['error'] = {
'code': ErrorCode.parse_error,
'message': 'test-message'
}
httpserver.serve_content(dumps(response))
client = JSONRPC(httpserver.url)
with pytest.raises(RemoteError) as exc:
client.call("foo")
assert str(exc.value) == 'test-message'
# imitate an error which is not a known RemoteError
response['error']['code'] = 0
httpserver.serve_content(dumps(response))
client = JSONRPC(httpserver.url)
with pytest.raises(RemoteMethodError) as exc:
client.call("foo")
assert str(exc.value) == 'test-message'
def test_notify(httpserver, response):
""" call_id in the notify method should stay the same
"""
httpserver.serve_content(dumps(response))
client = JSONRPC(httpserver.url)
client.notify("foo")
assert client.call_id == 1
def test_batch_factory():
""" testing Batch object creation
"""
client = JSONRPC(None)
batch = client.batch()
assert isinstance(batch, Batch)
batch.call("foo")
batch.call("bar")
assert len(batch.calls) == 2
def test_abandon_call():
""" no httpserver here, therefore the method should raise an Exception,
if it weren't for the abandon() call.
"""
client = JSONRPC(None)
with Batch(client) as b:
b.call("foo")
b.abandon()
assert b._abandoned is True
def test_send_batch_calls(httpserver, response):
""" testing issuing calls via Batch interface
"""
httpserver.serve_content(dumps(response))
client = JSONRPC(httpserver.url)
with pytest.raises(ProtocolError) as exc:
with client.batch() as batch:
batch = Batch(client)
batch.call("foo")
assert str(exc.value) == 'Expected a list of response from the server'
response = [
response,
{'jsonrpc': '2.0', 'result': 'test-result', 'id': 3}
]
httpserver.serve_content(dumps(response))
client.call_id = 1
with client.batch() as foo:
foo.call("Foo")
foo.call("FFF")
assert foo.get_result(2) is None
assert foo.get_result(3) == 'test-result'
with pytest.raises(KeyError) as exc:
foo.get_result(1111)
expected_message = 'No such call_id in response'
if six.PY2:
assert str(exc.value.message) == expected_message
elif six.PY3:
assert exc.value.args[0] == expected_message
``` |
{
"source": "356255531/ulgie_poc",
"score": 3
} |
#### File: e2cnn/diffops/basis.py
```python
from dataclasses import dataclass
import itertools
from e2cnn.kernels.basis import EmptyBasisException
import numpy as np
from typing import Iterable, List, Optional, Type, Union, Tuple
from e2cnn.kernels import Basis
from e2cnn.group import SO2, Representation
# we need SteerableDiffopBasis, but importing that directly
# leads to cyclic imports because it also relies on this file.
# So we just import the diffops module instead
import e2cnn.diffops as diffops
from .utils import discretize_homogeneous_polynomial, multiply_polynomials, laplacian_power, display_diffop, transform_polynomial
@dataclass(frozen=True)
class DiscretizationArgs:
r"""Parameters specifying a discretization procedure for PDOs.
Attributes:
~.method (str, optional): the discretization method to use,
either ``"rbffd"``, ``"fd"`` or ``"gauss"``.
~.smoothing (float, optional): ``smoothing`` is the standard
deviation of the Gaussian used for discretization. Must be set if ``method="gauss"``,
has no effect otherwise.
~.angle_offset (float, optional): if not ``None``, rotate the PDOs by this many radians.
~.phi (str, optional): which RBF to use (only relevant for RBF-FD).
Can be any of the abbreviations `here <https://rbf.readthedocs.io/en/latest/basis.html>`_.
"""
method: str = "fd"
smoothing: Optional[float] = None
angle_offset: Optional[float] = None
phi: str = "ga"
class DiffopBasis(Basis):
def __init__(self,
coefficients: List[np.ndarray],
discretization: DiscretizationArgs = DiscretizationArgs(),
):
r"""
Abstract class for implementing the basis of a space of differential operators.
Such a space consists of :math:`c_\text{out} \times c_\text{in}` matrices with
partial differential operators as entries.
Args:
coefficients (list): a list of ndarrays. Each array describes one element
of the basis and has shape ``(c_out, c_in, n + 1)``, where ``n``
is the derivative order of the entries of the matrix.
PDOs are encoded as the coefficients of :math:`\frac{\partial^n}{\partial x^n}`,
:math:`\frac{\partial^n}{\partial x^{n - 1}\partial y}`, ...,
:math:`\frac{\partial^n}{\partial y^n}`.
discretization (optional): additional parameters specifying parameters for
the discretization procedure. See :class:`~e2cnn.diffops.DiscretizationArgs`.
Attributes:
~.coefficients (list): an analytical description of the PDO basis elements, see above
~.dim (int): the dimensionality of the basis :math:`|\mathcal{K}|` (number of elements)
~.shape (tuple): a tuple containing :math:`c_\text{out}` and :math:`c_\text{in}`
~.maximum_order (int): the largest derivative order occuring in the basis
"""
dim = len(coefficients)
if dim == 0:
raise EmptyBasisException
shape = coefficients[0].shape[:2]
self.disc = discretization
self.maximum_order = 0
for element in coefficients:
assert element.shape[:2] == shape
assert len(element.shape) == 3
# we sometimes get very small coefficients (on the order of 1e-17)
# through rounding errors, those should be 0
# this is important to get the derivative orders right for basis filters
element[np.abs(element) < 1e-8] = 0
# We want to know the maximum order that appears in this basis.
# The last axis contains the actual derivative, and has length order + 1
self.maximum_order = max(self.maximum_order, element.shape[-1] - 1)
self.coefficients = coefficients
super().__init__(dim, shape)
def sample(self,
points: np.ndarray,
) -> np.ndarray:
r"""
Discretize the basis on a set of points.
See :meth:`~e2cnn.diffops.DiffopBasis.sampled_masked` for details.
"""
return self.sample_masked(points)
def sample_masked(self,
points: np.ndarray,
mask: np.ndarray = None,
) -> np.ndarray:
r"""
Discretize the basis on a set of points.
Args:
points (ndarray): a `2 x N` array with `N` points on which to discretize.
If FD is used (default), this has to be a flattened version of
a regular sorted grid (like you would get using np.meshgrid on ranges).
mask (ndarray, optional): Boolean array of shape (dim, ), where ``dim`` is the number of basis elements.
True for elements to discretize and False for elements to discard.
Returns:
ndarray of with shape `(C_out, C_in, num_basis_elements, n_in)`, where
`num_basis_elements` are the number of elements after applying the mask, and `n_in` is the number
of points.
"""
if mask is None:
# if no mask is given, we use all basis elements
mask = np.array([True] * self.dim)
assert isinstance(mask, np.ndarray)
assert mask.shape == (self.dim, )
if self.disc.angle_offset is not None:
so2 = SO2(1)
# rotation matrix by angle_offset
matrix = so2.irrep(1)(self.disc.angle_offset)
# we transform the polynomial with the matrix
coefficients = (transform_polynomial(element, matrix) for element in self.coefficients)
else:
coefficients = self.coefficients
coefficients = (coeff for coeff, m in zip(coefficients, mask) if m)
assert isinstance(points, np.ndarray)
assert len(points.shape) == 2
assert points.shape[0] == 2
num_points = points.shape[1]
basis = np.empty((np.sum(mask), ) + self.shape + (num_points, ))
for k, element in enumerate(coefficients):
for i in range(self.shape[0]):
for j in range(self.shape[1]):
basis[k, i, j] = discretize_homogeneous_polynomial(
points,
element[i, j],
self.disc.smoothing,
phi=self.disc.phi,
method=self.disc.method,
)
# Finally, we move the len_basis axis to the third position
basis = basis.transpose(1, 2, 0, 3)
return basis
def pretty_print(self) -> str:
"""Return a human-readable representation of all basis elements."""
out = ""
for element in self.coefficients:
out += display_matrix(element)
out += "\n----------------------------------\n"
return out
class LaplaceProfile(DiffopBasis):
def __init__(self, max_power: int, discretization: DiscretizationArgs = DiscretizationArgs()):
r"""
Basis for rotationally invariant PDOs.
Each basis element is defined as a power of a Laplacian.
In order to build a complete basis of PDOs, you should combine this basis
with a basis which defines the angular profile through :class:`~e2cnn.diffops.TensorBasis`.
Args:
max_power (int): the maximum power of the Laplace operator that will be used.
The maximum degree (as a differential operator) will be two times this maximum
power.
discretization (DiscretizationArgs, optional): additional parameters specifying parameters for
the discretization procedure. See :class:`~e2cnn.diffops.DiscretizationArgs`.
"""
assert isinstance(max_power, int)
assert max_power >= 0
coefficients = [
laplacian_power(k).reshape(1, 1, -1) for k in range(max_power + 1)
]
super().__init__(coefficients, discretization)
self.max_power = max_power
def __getitem__(self, r):
assert r < self.dim
return {"power": r, "order": 2 * r, "idx": r}
def __eq__(self, other):
if isinstance(other, LaplaceProfile):
return self.max_power == other.max_power
else:
return False
def __hash__(self):
return hash(self.max_power)
class TensorBasis(DiffopBasis):
def __init__(self,
irreps_basis: Type[DiffopBasis],
in_repr: Representation,
out_repr: Representation,
max_power: int,
discretization: DiscretizationArgs = DiscretizationArgs(),
**kwargs
):
r"""
Build the tensor product basis of two PDO bases over the
plane. Given two bases :math:`A = \{a_i\}_i` and :math:`B = \{b_j\}_j`, this basis is defined as
.. math::
C = A \otimes B = \left\{ c_{i,j} := a_i \circ b_j \right\}_{i,j}.
The arguments are passed on to :class:`~e2cnn.diffops.SteerableDiffopBasis` and
:class:`~e2cnn.diffops.LaplaceProfile`, see their documentation.
Attributes:
~.basis1 (SteerableDiffopBasis): the first basis
~.basis2 (LaplaceProfile): the second basis
"""
basis1 = diffops.SteerableDiffopBasis(
irreps_basis, in_repr, out_repr, discretization, **kwargs
)
basis2 = LaplaceProfile(max_power, discretization)
coefficients = []
for a, b in itertools.product(basis1.coefficients, basis2.coefficients):
order = a.shape[2] + b.shape[2] - 2
out = np.empty((a.shape[0], b.shape[0], a.shape[1], b.shape[1], order + 1))
for i, j, k, l in itertools.product(range(a.shape[0]),
range(b.shape[0]),
range(a.shape[1]),
range(b.shape[1])):
out[i, j, k, l] = multiply_polynomials(a[i, k], b[j, l])
out = out.reshape(a.shape[0] * b.shape[0], a.shape[1] * b.shape[1], order + 1)
coefficients.append(out)
super().__init__(coefficients, discretization)
self.basis1 = basis1
self.basis2 = basis2
def __getitem__(self, idx):
assert idx < self.dim
idx1, idx2 = divmod(idx, self.basis2.dim)
attr1 = self.basis1[idx1]
attr2 = self.basis2[idx2]
attr = dict()
attr.update(attr1)
attr.update(attr2)
attr["order"] = attr1["order"] + attr2["order"]
attr["idx"] = idx
attr["idx1"] = idx1
attr["idx2"] = idx2
return attr
def __iter__(self):
idx = 0
for attr1 in self.basis1:
for attr2 in self.basis2:
attr = dict()
attr.update(attr1)
attr.update(attr2)
attr["order"] = attr1["order"] + attr2["order"]
attr["idx"] = idx
attr["idx1"] = attr1["idx"]
attr["idx2"] = attr2["idx"]
yield attr
idx += 1
def __eq__(self, other):
if isinstance(other, TensorBasis):
return self.basis1 == other.basis1 and self.basis2 == other.basis2
else:
return False
def __hash__(self):
return hash(self.basis1) + hash(self.basis2)
def display_matrix(element):
out = ""
for i in range(element.shape[0]):
for j in range(element.shape[1]):
out += display_diffop(element[i, j]) + "\t"
out += "\n"
return out
```
#### File: modules/r2_conv/basisexpansion_singleblock.py
```python
from e2cnn.kernels import Basis, EmptyBasisException
from .basisexpansion import BasisExpansion
from typing import Callable, Dict, List, Iterable, Union
import torch
import numpy as np
__all__ = ["SingleBlockBasisExpansion", "block_basisexpansion"]
class SingleBlockBasisExpansion(BasisExpansion):
def __init__(self,
basis: Basis,
points: np.ndarray,
basis_filter: Callable[[dict], bool] = None,
):
r"""
Basis expansion method for a single contiguous block, i.e. for kernels/PDOs whose input type and output type contain
only fields of one type.
This class should be instantiated through the factory method
:func:`~e2cnn.nn.modules.r2_conv.block_basisexpansion` to enable caching.
Args:
basis (Basis): analytical basis to sample
points (ndarray): points where the analytical basis should be sampled
basis_filter (callable, optional): filter for the basis elements. Should take a dictionary containing an
element's attributes and return whether to keep it or not.
"""
super(SingleBlockBasisExpansion, self).__init__()
self.basis = basis
# compute the mask of the sampled basis containing only the elements allowed by the filter
mask = np.zeros(len(basis), dtype=bool)
for b, attr in enumerate(basis):
mask[b] = basis_filter(attr)
if not any(mask):
raise EmptyBasisException
attributes = [attr for b, attr in enumerate(basis) if mask[b]]
# we need to know the real output size of the basis elements (i.e. without the change of basis and the padding)
# to perform the normalization
sizes = []
for attr in attributes:
sizes.append(attr["shape"][0])
# sample the basis on the grid
# and filter out the basis elements discarded by the filter
sampled_basis = torch.Tensor(basis.sample_masked(points, mask=mask)).permute(2, 0, 1, 3)
# DEPRECATED FROM PyTorch 1.2
# PyTorch 1.2 suggests using BoolTensor instead of ByteTensor for boolean indexing
# but BoolTensor have been introduced only in PyTorch 1.2
# Hence, for the moment we use ByteTensor
mask = mask.astype(np.uint8)
mask = torch.tensor(mask)
# normalize the basis
sizes = torch.tensor(sizes, dtype=sampled_basis.dtype)
assert sizes.shape[0] == mask.to(torch.int).sum(), sizes.shape
assert sizes.shape[0] == sampled_basis.shape[0], (sizes.shape, sampled_basis.shape)
sampled_basis = normalize_basis(sampled_basis, sizes)
# discard the basis which are close to zero everywhere
norms = (sampled_basis ** 2).reshape(sampled_basis.shape[0], -1).sum(1) > 1e-2
if not any(norms):
raise EmptyBasisException
sampled_basis = sampled_basis[norms, ...]
full_mask = torch.zeros_like(mask)
full_mask[mask] = norms.to(torch.uint8)
self._mask = full_mask
self.attributes = [attr for b, attr in enumerate(attributes) if norms[b]]
# register the bases tensors as parameters of this module
self.register_buffer('sampled_basis', sampled_basis)
self._idx_to_ids = []
self._ids_to_idx = {}
for idx, attr in enumerate(self.attributes):
if "radius" in attr:
radial_info = attr["radius"]
elif "order" in attr:
radial_info = attr["order"]
else:
raise ValueError("No radial information found.")
id = '({}-{},{}-{})_({}/{})_{}'.format(
attr["in_irrep"], attr["in_irrep_idx"], # name and index within the field of the input irrep
attr["out_irrep"], attr["out_irrep_idx"], # name and index within the field of the output irrep
radial_info,
attr["frequency"], # frequency of the basis element
# int(np.abs(attr["frequency"])), # absolute frequency of the basis element
attr["inner_idx"],
# index of the basis element within the basis of radially independent kernels between the irreps
)
attr["id"] = id
self._ids_to_idx[id] = idx
self._idx_to_ids.append(id)
def forward(self, weights: torch.Tensor) -> torch.Tensor:
assert len(weights.shape) == 2 and weights.shape[1] == self.dimension()
# expand the current subset of basis vectors and set the result in the appropriate place in the filter
return torch.einsum('boi...,kb->koi...', self.sampled_basis, weights) #.transpose(1, 2).contiguous()
def get_basis_names(self) -> List[str]:
return self._idx_to_ids
def get_element_info(self, name: Union[str, int]) -> Dict:
if isinstance(name, str):
name = self._ids_to_idx[name]
return self.attributes[name]
def get_basis_info(self) -> Iterable:
return iter(self.attributes)
def dimension(self) -> int:
return self.sampled_basis.shape[0]
def __eq__(self, other):
if isinstance(other, SingleBlockBasisExpansion):
return (
self.basis == other.basis and
torch.allclose(self.sampled_basis, other.sampled_basis) and
(self._mask == other._mask).all()
)
else:
return False
def __hash__(self):
return 10000 * hash(self.basis) + 100 * hash(self.sampled_basis) + hash(self._mask)
# dictionary storing references to already built basis tensors
# when a new filter tensor is built, it is also stored here
# when the same basis is built again (eg. in another layer), the already existing filter tensor is retrieved
_stored_filters = {}
def block_basisexpansion(basis: Basis,
points: np.ndarray,
basis_filter: Callable[[dict], bool] = None,
recompute: bool = False
) -> SingleBlockBasisExpansion:
r"""
Return an instance of :class:`~e2cnn.nn.modules.r2_conv.SingleBlockBasisExpansion`.
This function support caching through the argument ``recompute``.
Args:
basis (Basis): basis defining the space of kernels
points (~np.ndarray): points where the analytical basis should be sampled
basis_filter (callable, optional): filter for the basis elements. Should take a dictionary containing an
element's attributes and return whether to keep it or not.
recompute (bool, optional): whether to recompute new bases (``True``) or reuse, if possible,
already built tensors (``False``, default).
"""
if not recompute:
# compute the mask of the sampled basis containing only the elements allowed by the filter
mask = np.zeros(len(basis), dtype=bool)
for b, attr in enumerate(basis):
mask[b] = basis_filter(attr)
key = (basis, mask.tobytes(), points.tobytes())
if key not in _stored_filters:
_stored_filters[key] = SingleBlockBasisExpansion(basis, points, basis_filter)
return _stored_filters[key]
else:
return SingleBlockBasisExpansion(basis, points, basis_filter)
def normalize_basis(basis: torch.Tensor, sizes: torch.Tensor) -> torch.Tensor:
r"""
Normalize the filters in the input tensor.
The tensor of shape :math:`(B, O, I, ...)` is interpreted as a basis containing ``B`` filters/elements, each with
``I`` inputs and ``O`` outputs. The spatial dimensions ``...`` can be anything.
.. notice ::
Notice that the method changes the input tensor inplace
Args:
basis (torch.Tensor): tensor containing the basis to normalize
sizes (torch.Tensor): original input size of the basis elements, without the padding and the change of basis
Returns:
the normalized basis (the operation is done inplace, so this is ust a reference to the input tensor)
"""
b = basis.shape[0]
assert len(basis.shape) > 2
assert sizes.shape == (b,)
# compute the norm of each basis vector
norms = torch.einsum('bop...,bpq...->boq...', (basis, basis.transpose(1, 2)))
# Removing the change of basis, these matrices should be multiples of the identity
# where the scalar on the diagonal is the variance
# in order to find this variance, we can compute the trace (which is invariant to the change of basis)
# and divide by the number of elements in the diagonal ignoring the padding.
# Therefore, we need to know the original size of each basis element.
norms = torch.einsum("bii...->b", norms)
# norms = norms.reshape(b, -1).sum(1)
norms /= sizes
norms[norms < 1e-15] = 0
norms = torch.sqrt(norms)
norms[norms < 1e-6] = 1
norms[norms != norms] = 1
norms = norms.view(b, *([1] * (len(basis.shape) - 1)))
# divide by the norm
basis /= norms
return basis
```
#### File: test/diffops/test_cache.py
```python
import numpy as np
from e2cnn.diffops import store_cache, load_cache
from e2cnn.diffops.utils import discretize_homogeneous_polynomial
import unittest
from unittest import TestCase
def make_grid(n):
x = np.arange(-n, n + 1)
return np.stack(np.meshgrid(x, -x)).reshape(2, -1)
class TestCache(TestCase):
def test_cache(self):
# generate a few diffops:
coefficients = [
np.array([2, 0]),
np.array([0, 1, 0, 3]),
np.array([1, -2, 1]),
]
diffops = []
points = make_grid(2)
for c in coefficients:
diffops.append(discretize_homogeneous_polynomial(points, c))
store_cache()
load_cache()
for i, c in enumerate(coefficients):
assert np.allclose(diffops[i], discretize_homogeneous_polynomial(points, c))
if __name__ == '__main__':
unittest.main()
```
#### File: ulgie_poc/ulgie/encoder.py
```python
import torch
from e2cnn import gspaces
from e2cnn import nn
class C8SteerableCNN(torch.nn.Module):
def __init__(self, n_classes=10):
super(C8SteerableCNN, self).__init__()
# the model is equivariant under rotations by 45 degrees, modelled by C8
self.r2_act = gspaces.Rot2dOnR2(N=8)
# the input image is a scalar field, corresponding to the trivial representation
in_type = nn.FieldType(self.r2_act, [self.r2_act.trivial_repr])
# we store the input type for wrapping the images into a geometric tensor during the forward pass
self.input_type = in_type
# convolution 1
# first specify the output type of the convolutional layer
# we choose 24 feature fields, each transforming under the regular representation of C8
out_type = nn.FieldType(self.r2_act, 24 * [self.r2_act.regular_repr])
self.block1 = nn.SequentialModule(
nn.MaskModule(in_type, 29, margin=1),
nn.R2Conv(in_type, out_type, kernel_size=7, padding=1, bias=False),
nn.InnerBatchNorm(out_type),
nn.ReLU(out_type, inplace=True)
)
# convolution 2
# the old output type is the input type to the next layer
in_type = self.block1.out_type
# the output type of the second convolution layer are 48 regular feature fields of C8
out_type = nn.FieldType(self.r2_act, 48 * [self.r2_act.regular_repr])
self.block2 = nn.SequentialModule(
nn.R2Conv(in_type, out_type, kernel_size=5, padding=2, bias=False),
nn.InnerBatchNorm(out_type),
nn.ReLU(out_type, inplace=True)
)
self.pool1 = nn.SequentialModule(
nn.PointwiseAvgPoolAntialiased(out_type, sigma=0.66, stride=2)
)
# convolution 3
# the old output type is the input type to the next layer
in_type = self.block2.out_type
# the output type of the third convolution layer are 48 regular feature fields of C8
out_type = nn.FieldType(self.r2_act, 48 * [self.r2_act.regular_repr])
self.block3 = nn.SequentialModule(
nn.R2Conv(in_type, out_type, kernel_size=5, padding=2, bias=False),
nn.InnerBatchNorm(out_type),
nn.ReLU(out_type, inplace=True)
)
# convolution 4
# the old output type is the input type to the next layer
in_type = self.block3.out_type
# the output type of the fourth convolution layer are 96 regular feature fields of C8
out_type = nn.FieldType(self.r2_act, 96 * [self.r2_act.regular_repr])
self.block4 = nn.SequentialModule(
nn.R2Conv(in_type, out_type, kernel_size=5, padding=2, bias=False),
nn.InnerBatchNorm(out_type),
nn.ReLU(out_type, inplace=True)
)
self.pool2 = nn.SequentialModule(
nn.PointwiseAvgPoolAntialiased(out_type, sigma=0.66, stride=2)
)
# convolution 5
# the old output type is the input type to the next layer
in_type = self.block4.out_type
# the output type of the fifth convolution layer are 96 regular feature fields of C8
out_type = nn.FieldType(self.r2_act, 96 * [self.r2_act.regular_repr])
self.block5 = nn.SequentialModule(
nn.R2Conv(in_type, out_type, kernel_size=5, padding=2, bias=False),
nn.InnerBatchNorm(out_type),
nn.ReLU(out_type, inplace=True)
)
# convolution 6
# the old output type is the input type to the next layer
in_type = self.block5.out_type
# the output type of the sixth convolution layer are 64 regular feature fields of C8
out_type = nn.FieldType(self.r2_act, 64 * [self.r2_act.regular_repr])
self.block6 = nn.SequentialModule(
nn.R2Conv(in_type, out_type, kernel_size=5, padding=1, bias=False),
nn.InnerBatchNorm(out_type),
nn.ReLU(out_type, inplace=True)
)
self.pool3 = nn.PointwiseAvgPoolAntialiased(out_type, sigma=0.66, stride=1, padding=0)
self.gpool = nn.GroupPooling(out_type)
# number of output channels
c = self.gpool.out_type.size
def forward(self, input: torch.Tensor):
# wrap the input tensor in a GeometricTensor
# (associate it with the input type)
x = nn.GeometricTensor(input, self.input_type)
# apply each equivariant block
# Each layer has an input and an output type
# A layer takes a GeometricTensor in input.
# This tensor needs to be associated with the same representation of the layer's input type
#
# The Layer outputs a new GeometricTensor, associated with the layer's output type.
# As a result, consecutive layers need to have matching input/output types
x = self.block1(x)
x = self.block2(x)
x = self.pool1(x)
x = self.block3(x)
x = self.block4(x)
x = self.pool2(x)
x = self.block5(x)
x = self.block6(x)
# pool over the spatial dimensions
x = self.pool3(x)
# pool over the group
x = self.gpool(x)
# unwrap the output GeometricTensor
# (take the Pytorch tensor and discard the associated representation)
x = x.tensor
return x
```
#### File: ulgie_poc/visualizations/animation.py
```python
import numpy as np
from e2cnn.nn import *
from e2cnn.group import *
from e2cnn.gspaces import *
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
from skimage.transform import resize
import scipy.ndimage
import torch
from typing import Union
plt.rcParams['image.cmap'] = 'hot'
plt.rcParams['axes.titlepad'] = 30
# the irrep of frequency 1 of SO(2) produces the usual 2x2 rotation matrices
rot_matrix = SO2(1).irrep(1)
def build_mask(s: int, margin: float = 2., dtype=torch.float32):
mask = torch.zeros(1, 1, s, s, dtype=dtype)
c = (s - 1) / 2
t = (c - margin / 100. * c) ** 2
sig = 2.
for x in range(s):
for y in range(s):
r = (x - c) ** 2 + (y - c) ** 2
if r > t:
mask[..., x, y] = np.exp((t - r) / sig ** 2)
else:
mask[..., x, y] = 1.
return mask
def domask(x: Union[np.ndarray, torch.Tensor], margin=2, fmt="torch"):
if fmt == "image":
s = x.shape[0]
mask = build_mask(s, margin)
mask = mask.permute(0, 2, 3, 1).squeeze()
else:
s = x.shape[2]
mask = build_mask(s, margin)
if isinstance(x, np.ndarray):
mask = mask.numpy()
# use an inverse mask to create a white background (value = 1) instead of a black background (value = 0)
return mask * x + 1. - mask
def animate(model: EquivariantModule,
image: Union[str, np.ndarray],
outfile: str,
drawer: callable,
R: int = 72,
S: int = 71,
duration: float = 10.,
figsize=(21, 10),
):
r'''
Build a video animation
Args:
model: the equivariant model
image: the input image
outfile: name of the output file
drawer: method which plots the output field. use one of the methods ``draw_scalar_field``, ``draw_vector_field`` or ``draw_mixed_field``
R: number of rotations of the input to render, i.e. number of frames in the video
S: size the input image is downsampled to before being fed in the model
duration: duration (in seconds) of the video
figsize: shape of the video (see matplotlib.pyplot.figure())
'''
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='Movie Test', artist='Matplotlib', comment='Movie support!')
writer = FFMpegWriter(fps=R / duration, metadata=metadata)
fig, axs = plt.subplots(1, 3, figsize=figsize)
fig.set_tight_layout(True)
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
model.eval()
if isinstance(image, str):
image = mpimg.imread(image).transpose((2, 0, 1))
image = image[np.newaxis, :, :, :]
_, C, w, h = image.shape
# resize the image to have a squared shape
# the image is initially upsampled to (up to) 4 times the specified size S
# rotations are performed at this higher resolution and are later downsampled to size S
# this helps reducing the interpolation artifacts for rotations which are not multiple of pi/2
T = max(4 * S + 1, 513)
image = resize(image, (1, C, T, T), anti_aliasing=True)
print('Image Loaded')
original_inputs = []
for r in range(R):
print(f"{r}/{R}")
# Rotate the image
# N.B.: this only works for trivial (i.e. scalar) input fields like RGB images.
# In case vector fields are used in input, one should also rotate the channels using the group representation
# of the corresponding FieldType
rot_input = scipy.ndimage.rotate(image, r * 360.0 / R, (-2, -1), reshape=False, order=2)
# discard non-RGB channels
rot_input = rot_input[:, :3, ...]
original_inputs.append(rot_input)
original_inputs = np.concatenate(original_inputs, axis=0)
# mask the input images to remove the pixels which would be moved outside the grid by a rotation
original_inputs *= build_mask(T, margin=5).numpy()
# downsample the images
inputs = resize(original_inputs, (original_inputs.shape[0], C, S, S), anti_aliasing=True)
rotated_input = torch.tensor(inputs, dtype=torch.float32)
rotated_input *= build_mask(S, margin=5.2)
# normalize the colors of the images before feeding them into the model
rotated_input -= rotated_input[0, ...].view(3, -1).mean(dim=1).view(1, 3, 1, 1)
rotated_input /= rotated_input[0, ...].view(3, -1).std(dim=1).view(1, 3, 1, 1)
del inputs
rotated_input = rotated_input.to(device)
# wrap the tensor in a GeometricTensor
rotated_input = GeometricTensor(rotated_input, model.in_type)
# pass the images through the model to compute the output field
with torch.no_grad():
# In training mode, the batch normalization layers normalize the features with the batch statistics
# This sometimes produces nicer output fields
# model.train()
output = model(rotated_input)
# extract the underlying torch.Tensor
output = output.tensor
output = output.cpu()
output = output.detach()
output = output.numpy().transpose(0, 2, 3, 1)
# mask the inputs with a white background for visualization purpose
original_inputs = domask(original_inputs, margin=5)
# visualize each rotated image and its corresponding output in a different frame of the video
with writer.saving(fig, outfile, 100):
for r in range(R):
print(f"{r}/{R}")
# render the input image
axs[0].clear()
axs[0].imshow(original_inputs[r, ...].transpose(1, 2, 0))
axs[0].set_title("input", fontdict={'fontsize': 30})
# render the output and the stabilized output
drawer(axs[1:], output, r)
for ax in axs:
ax.axis('off')
fig.set_tight_layout(True)
plt.draw()
writer.grab_frame()
def draw_scalar_field(axs, scalarfield, r: int):
r'''
Draw a scalar field
'''
D = 3
m, M = scalarfield.min(), scalarfield.max()
R = scalarfield.shape[0]
angle = r * 2 * np.pi / R
scalarfield = scalarfield[r, ...].squeeze()
axs[0].clear()
sf = axs[0].imshow(domask(scalarfield.repeat(D, axis=0).repeat(D, axis=1), margin=8, fmt="image"))
axs[0].set_title("feature map", fontdict={'fontsize': 30})
sf.set_clim(m, M)
stable_view = scipy.ndimage.rotate(scalarfield, -angle * 180.0 / np.pi, (-2, -1), reshape=False, order=2)
axs[1].clear()
sf = axs[1].imshow(domask(stable_view.repeat(D, axis=0).repeat(D, axis=1), margin=8, fmt="image"))
axs[1].set_title("stabilized view", fontdict={'fontsize': 30})
sf.set_clim(m, M)
def draw_vector_field(axs, vectorfield, r: int):
r'''
Draw a vector field
'''
D = 21
extent = 0, vectorfield.shape[1], 0, vectorfield.shape[2]
mask = build_mask(D * vectorfield.shape[1], margin=2).numpy().transpose(0, 2, 3, 1).squeeze()
R = vectorfield.shape[0]
angle = r * 2 * np.pi / R
norms = np.sqrt((vectorfield ** 2).sum(axis=3))
m, M = norms.min(), norms.max()
vectorfield = vectorfield[r, ...]
norms = norms[r, ...]
X = range(D // 2, D * extent[1], D)
Y = range(D // 2, D * extent[3], D)
submask = mask[D // 2:D * extent[1]:D, D // 2:D * extent[3]:D]
axs[0].clear()
sf = axs[0].imshow(domask(norms.repeat(D, axis=0).repeat(D, axis=1), fmt='image'))
sf.set_clim(m, M)
vf = axs[0].quiver(X, Y, vectorfield[:, :, 0] * submask, vectorfield[:, :, 1] * submask, color="green", units="xy",
width=1)
axs[0].set_title("feature field", fontdict={'fontsize': 30})
stable_view = scipy.ndimage.rotate(vectorfield, -angle * 180.0 / np.pi, (-3, -2), reshape=False, order=2)
rm = rot_matrix(-angle)
stable_view = np.einsum("oc,xyc->xyo", rm, stable_view)
stable_norms = np.sqrt((stable_view ** 2).sum(axis=2))
axs[1].clear()
sf = axs[1].imshow(domask(stable_norms.repeat(D, axis=0).repeat(D, axis=1), fmt='image'))
sf.set_clim(m, M)
vf = axs[1].quiver(Y, X, stable_view[:, :, 0] * submask, stable_view[:, :, 1] * submask, color="green", units='xy',
width=1)
axs[1].set_title("stabilized view", fontdict={'fontsize': 30})
def quiver(ax, X, Y, U, V):
scale = 1. / 20.
X, Y = np.meshgrid(X, Y)
mask = V ** 2 + U ** 2 > 1e-3
ax.quiver(X[mask], Y[mask], U[mask], V[mask], color="forestgreen", angles='xy', units="xy", scale=scale, width=1.3)
def draw_mixed_field(axs, featurefield, r):
r'''
Draw a field containing a scalar field and a vector field
'''
D = 3
V = 3
extent = 0, D * featurefield.shape[1], 0, D * featurefield.shape[2]
mask = build_mask(featurefield.shape[1], margin=8).numpy().transpose(0, 2, 3, 1).squeeze()
R = featurefield.shape[0]
angle = r * 2 * np.pi / R
scalarfield = featurefield[:, ..., 0]
m, M = scalarfield.min(), scalarfield.max()
vectorfield = featurefield[r, ..., 1:]
scalarfield = featurefield[r, ..., 0]
featurefield = featurefield[r, ...]
X = range(V * D // 2, extent[1], V * D)
Y = range(V * D // 2, extent[3], V * D)
submask = mask[V // 2:extent[1]:V, V // 2:extent[3]:V]
axs[0].clear()
sf = axs[0].imshow(domask(scalarfield.repeat(D, axis=0).repeat(D, axis=1), margin=8, fmt="image"))
sf.set_clim(m, M)
quiver(axs[0],
X, Y,
vectorfield[V // 2:extent[1]:V, V // 2:extent[3]:V, 0] * submask,
vectorfield[V // 2:extent[1]:V, V // 2:extent[3]:V, 1] * submask,
)
axs[0].set_title("feature fields", fontdict={'fontsize': 30})
stable_view = scipy.ndimage.rotate(featurefield, -angle * 180.0 / np.pi, (-3, -2), reshape=False, order=2)
stable_vectorfield = stable_view[..., 1:]
stable_scalarfield = stable_view[..., 0]
rm = rot_matrix(-angle)
stable_vectorfield = np.einsum("oc,xyc->xyo", rm, stable_vectorfield)
axs[1].clear()
sf = axs[1].imshow(domask(stable_scalarfield.repeat(D, axis=0).repeat(D, axis=1), margin=8, fmt="image"))
sf.set_clim(m, M)
quiver(axs[1],
X, Y,
stable_vectorfield[V // 2:extent[1]:V, V // 2:extent[3]:V, 0] * submask,
stable_vectorfield[V // 2:extent[1]:V, V // 2:extent[3]:V, 1] * submask,
)
axs[1].set_title("stabilized view", fontdict={'fontsize': 30})
def build_gcnn(N: int, output: str):
r'''
Build an encoder-decoder.py model equivariant to N rotations.
``output`` speicifies the type of output field of the model, which will then be used for the animation.
'''
# build the g-space for N rotations
if N == 1:
gc = TrivialOnR2()
else:
gc = Rot2dOnR2(N)
# the input contains 3 scalar channels (RGB colors)
r1 = FieldType(gc, [gc.trivial_repr]*3)
# let's build a few inner layers
# we will build a small encoder-decoder.py convolutional architecture
layers = []
r2 = FieldType(gc, [gc.regular_repr] * 8)
cl1 = R2Conv(r1, r2, 5, bias=True, padding=0)
layers.append(cl1)
layers.append(ELU(layers[-1].out_type, inplace=True))
for i in range(3):
# every two layers we downsample the feature map
if i % 2 == 0:
layers.append(PointwiseAvgPoolAntialiased(layers[-1].out_type, 0.66, stride=2))
cl = R2Conv(r2, r2, 5, bias=True, padding=0)
layers.append(cl)
layers.append(ELU(layers[-1].out_type, inplace=True))
for i in range(3):
# every two layers we upsample the feature map
if i % 2 == 0:
layers.append(R2Upsampling(layers[-1].out_type, 2, align_corners=True))
cl = R2Conv(r2, r2, 5, bias=True, padding=0)
layers.append(cl)
layers.append(ELU(layers[-1].out_type, inplace=True))
# finally, map to the output field which will then be visualized
so2 = SO2(1)
# A vector field contains two channels transforming according to the frequency-1 irrep of SO(2)
# (the common 2x2 rotation matrices)
# the representation needs to be restricted to the group of N discrete rotations considered
vector_f = FieldType(gc, [so2.irrep(1).restrict(N)])
# A scalar field contains one channel transforming according to the trivial representation of SO(2)
# i.e., its values do not change when a rotation is applied
# the representation needs to be restricted to the group of N discrete rotations considered
scalar_f = FieldType(gc, [so2.trivial_representation.restrict(N)])
# build the output field type
if output == "vector":
r3 = vector_f
elif output == "scalar":
r3 = scalar_f
elif output == "both":
# in this case we outputs both a scalar and a vector field
r3 = scalar_f + vector_f
else:
raise ValueError()
cl2 = R2Conv(layers[-1].out_type, r3, 5, padding=0, bias=False)
layers.append(cl2)
# for visualization purpose, apply a non-linearity on the output to restrict the range of values it takes
if output == "vector":
layers.append(NormNonLinearity(layers[-1].out_type, "squash", bias=False))
elif output == "scalar":
layers.append(SequentialModule(InnerBatchNorm(r3), PointwiseNonLinearity(r3, "p_sigmoid")))
elif output == "both":
labels = ["scalar", "vector"]
nnl = [
(
SequentialModule(InnerBatchNorm(scalar_f), PointwiseNonLinearity(scalar_f, "p_sigmoid")),
"scalar"
),
(
NormNonLinearity(vector_f, "squash", bias=False),
"vector"
),
]
layers.append(MultipleModule(r3, labels, nnl))
else:
raise ValueError()
model = SequentialModule(*layers)
return model
if __name__ == "__main__":
output = "both"
# output = "vector"
# output = "scalar"
N = 24
# build a model equivariant to N rotations
model = build_gcnn(N, output).eval()
# read the input image and retrieve the central patch
IMG_PATH = "./input_image.jpeg"
image = mpimg.imread(IMG_PATH).transpose((2, 0, 1))
px = 314
D = 1252
image = image[:, :, px:px + D]
# build the animation
if output == "vector":
animate(model, image, "animation_vector.mp4", draw_vector_field, R=72, S=129)
elif output == "scalar":
animate(model, image, "animation_scalar.mp4", draw_scalar_field, R=72, S=161)
elif output == "both":
animate(model, image, "animation_mixed.mp4", draw_mixed_field, R=72, S=161)
else:
raise ValueError()
``` |
{
"source": "357112130/robotframework",
"score": 2
} |
#### File: robotide/postinstall/desktopshortcut.py
```python
import wx
import atexit
import sys
from robotide.pluginapi import Plugin, ActionInfo
from robotide import widgets
from robotide.postinstall import __main__ as postinstall
class ShortcutPlugin(Plugin):
"""Creator of RIDE Desktop Shortcuts."""
def __init__(self, app):
Plugin.__init__(self, app, default_settings={
'desktop_shortcut_exists': False,
'initial_project': None
})
self._window = None
atexit.register(self._close)
def _close(self):
pass
def enable(self):
self._create_menu()
def disable(self):
self.unregister_actions()
if self._window:
self._window.close(self.notebook)
def _create_menu(self):
self.unregister_actions()
self.register_action(ActionInfo('Tools',
'Create RIDE Desktop Shortcut',
self.OnViewShortcutCreate,
position=85))
def OnViewShortcutCreate(self, event):
if not self._window:
self._window = _ShortcutCreateWindow(self.notebook)
else:
self.notebook.show_tab(self._window)
self._window.call_creator()
# self.disable()
class _ShortcutCreateWindow(wx.TextCtrl):
def __init__(self, notebook):
wx.TextCtrl.__init__(
self, notebook, style=wx.TE_MULTILINE) # DEBUG wx.TE_READONLY |
self._create_ui()
self._add_to_notebook(notebook)
self.SetFont(widgets.Font().fixed_log)
def _create_ui(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self)
self.SetSizer(sizer)
def _add_to_notebook(self, notebook):
notebook.add_tab(self, 'Create RIDE Desktop Shortcut',
allow_closing=True)
def close(self, notebook):
notebook.delete_tab(self)
def call_creator(self):
return postinstall.caller(self.GetParent(), sys.platform.lower())
``` |
{
"source": "35niavlys/teeworlds-fng2-mod",
"score": 2
} |
#### File: teeworlds-fng2-mod/scripts/mass_server.py
```python
import random
import os
masterservers = ["localhost 8300"]
maps = [
["dm1", "dm2", "dm6"],
["dm1", "dm2", "dm6"],
["ctf1", "ctf2", "ctf3"],
]
servernames = [
"%s playhouse",
"%s own server",
]
nicks = []
for l in file("scripts/nicks.txt"):
nicks += l.replace(":port80c.se.quakenet.org 353 matricks_ = #pcw :", "").strip().split()
inick = 0
def get_nick():
global inick, nicks
inick = (inick+1)%len(nicks)
return nicks[inick].replace("`", "\`")
for s in xrange(0, 350):
cmd = "./fake_server_d_d "
cmd += '-n "%s" ' % (random.choice(servernames) % get_nick())
for m in masterservers:
cmd += '-m %s '%m
max = random.randint(2, 16)
cmd += "-x %d " % max
t = random.randint(0, 2)
cmd += '-a "%s" ' % random.choice(maps[t])
cmd += '-g %d ' % random.randint(0, 100)
cmd += '-t %d ' % t # dm, tdm, ctf
cmd += "-f %d " % random.randint(0, 1) # password protected
for p in xrange(0, random.randint(0, max)):
cmd += '-p "%s" %d ' % (get_nick(), random.randint(0, 20))
print cmd
os.popen2(cmd)
``` |
{
"source": "36000/cnn_colorflow",
"score": 2
} |
#### File: cnn_colorflow/final_scripts/combine_plots.py
```python
import sys
import os
from PIL import Image, ImageDraw, ImageFont
datasets = ['s8_gg', 'h_qq', 'qx_qg', 'cp_qq', 'h_gg', 'zp_qq']
datasets_c = ['s8_gg_rot_charged', 'h_qq_rot_charged', 'qx_qg_rot_charged', 'cp_qq_rot_charged', 'h_gg_rot_charged', 'zp_qq_rot_charged']
# for combine, 0 is pcc, 1 is roc, 2 is sic
def u_tri(using_charged, combine):
images = []
for i in range(6):
for j in range(6):
if i >= j:
continue
if combine == 0:
f_path = pcc_path
elif combine == 1:
f_path = roc_path
else:
f_path = sic_path
if using_charged:
path = c_path(f_path, datasets_c[i], datasets_c[j], True)
else:
path = c_path(f_path, datasets[i], datasets[j])
images.append(Image.open(path))
width, height = images[0].size
txt_offset = 200
comb_im = Image.new('RGB', (width * 6 + txt_offset, height * 6 + txt_offset), color=(255,255,255))
draw = ImageDraw.Draw(comb_im)
font = ImageFont.truetype("../../Roboto-Black.ttf", 50)
for i in range(6):
center_offset = 180
draw.text((txt_offset + center_offset + width*i, 0), datasets[i], (0,0,0),font=font)
draw.text((0, txt_offset + center_offset + height*i), datasets[i], (0,0,0),font=font)
x_offset = 0
y_offset = 0
for im in images:
comb_im.paste(im, (x_offset * width + txt_offset + width, y_offset * height + txt_offset))
x_offset += 1
if x_offset >= 5:
y_offset += 1
x_offset = y_offset
path = 'final_curves/combined/all_'
if combine == 0:
path = path + 'pcc'
elif combine == 1:
path = path + 'roc'
else:
path = path + 'sic'
if using_charged:
path = path + '_charged'
path = path + '.png'
comb_im.save(path)
def c_path(path_f, sig, bg, charged = False):
if os.path.isfile(path_f(sig, bg)):
return path_f(sig, bg)
else:
return path_f(bg, sig)
def pcc_path(sig, bg):
return 'final_curves/pearsons/truths/' + sig + '_vs_' + bg + '_pearson_truth.png'
def sic_path(sig, bg):
return 'final_curves/sic_' + sig + '_vs_' + bg + '.png'
def roc_path(sig, bg):
return 'final_curves/roc_' + sig + '_vs_' + bg + '.png'
def img_path(sig):
return 'final_curves/Average_' + sig + '.png'
def all_img(charged):
images = []
for i in range(6):
if charged:
images.append(Image.open(img_path(datasets_c[i])))
else:
images.append(Image.open(img_path(datasets[i])))
width, height = images[0].size
comb_im = Image.new('RGB', (width * 3, height * 2), color=(255,255,255))
x_offset = 0
y_offset = 0
for im in images:
comb_im.paste(im, (x_offset * width, y_offset * height))
x_offset += 1
if x_offset >= 3:
y_offset += 1
x_offset = 0
if charged:
comb_im.save('final_curves/combined/all_img_charged.png')
else:
comb_im.save('final_curves/combined/all_img.png')
def cp_main():
for i in [False, True]:
all_img(i)
for j in range(3):
u_tri(i, j)
if __name__ == '__main__':
cp_main()
```
#### File: cnn_colorflow/final_scripts/sensitivity_study.py
```python
import numpy as np
import sys
import os
sys.path.append("../utilities")
sys.path.append("../visualization")
import constants
from data import get_train_test
from keras.models import load_model
from metrics import plot_n_roc_sic
def sen_stud(datasets, ischarged):
for i in range(4):
for j in range(4):
if j >= i:
continue
sig = datasets[i]
bg = datasets[j]
if ischarged:
constants.SIG_H5 = os.path.join(constants.DATA_DIR, sig + '_rot_charged.h5')
constants.BG_H5 = os.path.join(constants.DATA_DIR, bg + '_rot_charged.h5')
charge = 'charged'
else:
constants.SIG_H5 = os.path.join(constants.DATA_DIR, sig + '.h5')
constants.BG_H5 = os.path.join(constants.DATA_DIR, bg + '.h5')
charge = 'standard'
if ischarged:
model_name = sig + '_vs_' + bg
else:
model_name = sig + '_rot_charged_vs_' + bg + '_rot_charged'
constants.MODEL_NAME= model_name + '_model'
model = load_model('../best_model/' + model_name + '_model')
_, X_test_14, _, y_test_14, \
_, _, _, _ = get_train_test(n=150000)
if not "qx_qg" in model_name:
constants.SIG_H5 = os.path.join(constants.DATA_DIR, 'sensitivity_study/' + sig + '_col_1_' + charge + '.h5')
constants.BG_H5 = os.path.join(constants.DATA_DIR, 'sensitivity_study/' + bg + '_col_1_' + charge + '.h5')
_, X_test_1, _, y_test_1, \
_, _, _, _ = get_train_test(n=30000, train_size=0)
np.save('final_curves/sensitivity_study/yvals/true_'+ sig + '_vs_' + bg + '_col_1_' + charge, y_test_1)
np.save('final_curves/sensitivity_study/yvals/hat_'+ sig + '_vs_' + bg + '_col_1_' + charge, model.predict(X_test_1))
constants.SIG_H5 = os.path.join(constants.DATA_DIR, 'sensitivity_study/' + sig + '_col_2_' + charge + '.h5')
constants.BG_H5 = os.path.join(constants.DATA_DIR, 'sensitivity_study/' + bg + '_col_2_' + charge + '.h5')
_, X_test_2, _, y_test_2, \
_, _, _, _ = get_train_test(n=30000, train_size=0)
np.save('final_curves/sensitivity_study/yvals/true_'+ sig + '_vs_' + bg + '_col_2_' + charge, y_test_2)
np.save('final_curves/sensitivity_study/yvals/hat_'+ sig + '_vs_' + bg + '_col_2_' + charge, model.predict(X_test_2))
constants.SIG_H5 = os.path.join(constants.DATA_DIR, 'sensitivity_study/' + sig + '_pp_21_' + charge + '.h5')
constants.BG_H5 = os.path.join(constants.DATA_DIR, 'sensitivity_study/' + bg + '_pp_21_' + charge + '.h5')
_, X_test_21, _, y_test_21, \
_, _, _, _ = get_train_test(n=30000, train_size=0)
np.save('final_curves/sensitivity_study/yvals/true_'+ sig + '_vs_' + bg + '_pp_21_' + charge, y_test_21)
np.save('final_curves/sensitivity_study/yvals/hat_'+ sig + '_vs_' + bg + '_pp_21_' + charge, model.predict(X_test_21))
constants.SIG_H5 = os.path.join(constants.DATA_DIR, 'sensitivity_study/' + sig + '_pp_25_' + charge + '.h5')
constants.BG_H5 = os.path.join(constants.DATA_DIR, 'sensitivity_study/' + bg + '_pp_25_' + charge + '.h5')
_, X_test_25, _, y_test_25, \
_, _, _, _ = get_train_test(n=30000, train_size=0)
np.save('final_curves/sensitivity_study/yvals/true_'+ sig + '_vs_' + bg + '_pp_25_' + charge, y_test_25)
np.save('final_curves/sensitivity_study/yvals/hat_'+ sig + '_vs_' + bg + '_pp_25_' + charge, model.predict(X_test_25))
constants.SIG_H5 = os.path.join(constants.DATA_DIR, 'sensitivity_study/' + sig + '_pp_26_' + charge + '.h5')
constants.BG_H5 = os.path.join(constants.DATA_DIR, 'sensitivity_study/' + bg + '_pp_26_' + charge + '.h5')
_, X_test_26, _, y_test_26, \
_, _, _, _ = get_train_test(n=30000, train_size=0)
np.save('final_curves/sensitivity_study/yvals/true_'+ sig + '_vs_' + bg + '_pp_26_' + charge, y_test_26)
np.save('final_curves/sensitivity_study/yvals/hat_'+ sig + '_vs_' + bg + '_pp_26_' + charge, model.predict(X_test_26))
if not "qx_qg" in model_name:
X_tests = [X_test_1, X_test_2, X_test_14, X_test_21, X_test_25, X_test_26]
y_tests = [y_test_1, y_test_2, y_test_14, y_test_21, y_test_25, y_test_26]
models = [model, model, model, model, model, model]
model_types = [True, True, True, True, True, True]
labels = ['Color 1', 'Color 2', 'pp 14', 'pp 21', 'pp 25', 'pp 26']
else:
X_tests = [X_test_2, X_test_14, X_test_21, X_test_25, X_test_26]
y_tests = [y_test_2, y_test_14, y_test_21, y_test_25, y_test_26]
models = [model, model, model, model, model]
model_types = [True, True, True, True, True]
labels = ['Color 2', 'pp 14', 'pp 21', 'pp 25', 'pp 26']
plot_n_roc_sic(model_name, 'final_curves/sensitivity_study/sic_sens_'+model_name, X_tests, y_tests, models, model_types, labels, True)
plot_n_roc_sic(model_name, 'final_curves/sensitivity_study/roc_sens_'+model_name, X_tests, y_tests, models, model_types, labels, False)
def main():
datasets = ['h_qq', 'h_gg', 'cp_qq', 'qx_qg', 's8_gg', 'zp_qq']
sen_stud(datasets, True)
sen_stud(datasets, False)
if __name__ == '__main__':
main()
```
#### File: cnn_colorflow/final_scripts/specific_plot.py
```python
import numpy as np
import sys
import os
from keras.models import load_model
sys.path.append("../utilities")
import constants
from data import get_train_test
from metrics import plot_n_roc_sic
datasets_c = ['h_qq_rot_charged', 'h_gg_rot_charged', 'cp_qq_rot_charged', 'qx_qg_rot_charged', 's8_gg_rot_charged', 'zp_qq_rot_charged']
datasets_s = ['h_qq', 'h_gg', 'cp_qq', 'qx_qg', 's8_gg', 'zp_qq']
def comp_all(i, datasets = datasets_s, n = 150000):
name = 'all_' + datasets[i] + '_comps'
X_tests = []
y_yests = []
models = []
model_types = []
labels = []
sig = datasets[i]
for j in range(6):
if j == i:
continue
bg = datasets[j]
constants.SIG_H5 = os.path.join(constants.DATA_DIR, sig + '.h5')
constants.BG_H5 = os.path.join(constants.DATA_DIR, bg + '.h5')
X_train, X_test, y_train, y_test, \
_, _, sig_metadata, \
bg_metadata, _ = get_train_test(n=n)
if os.path.isfile('../best_model/' + sig + '_vs_' + bg + '_model'):
model_name = sig + '_vs_' + bg
else:
model_name = bg + '_vs_' + sig
model = load_model('../best_model/' + model_name + '_model')
X_tests.append(X_test)
y_yests.append(y_test)
models.append(model)
model_types.append(True)
labels.append(model_name)
plot_n_roc_sic(name, 'final_curves/sic_'+name, X_tests, y_yests, models, model_types, labels, True, fontfac=0.5)
plot_n_roc_sic(name, 'final_curves/roc_'+name, X_tests, y_yests, models, model_types, labels, False, fontfac=0.5)
if __name__ == '__main__':
for i in range(len(datasets_s)):
comp_all(i)
```
#### File: cnn_colorflow/utilities/data.py
```python
import os
import numpy as np
import h5py
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import gc
import constants
def get_pixels_metadata(bg=False, n=-1, delta_R_min=float('-inf'), delta_R_max=float('inf'), same_file=False):
"""Return pixel data and metadata for either the octets or singlets.
Return:
pixels -- a (n, jet image width^2) numpy array of the pixel data.
metadata -- a (n, 4) pandas dataframe containing all other data, such as
mass, jet pull, and delta R.
Arguments:
octet -- true (false) if the octet (singlet) data should be collected.
n -- the number of samples to collect. If n == -1, all samples will be collected.
delta_R_min -- the minimum delta R allowed for a sample to be included.
delta_R_max -- the maximum delta R allowed for a sample to be included.
The pixel data is a (n, jet image width^2) numpy array.
The metadata is a (n, 4) pandas array.
"""
print("[data] Getting pixel data ...")
if bg:
h5file = constants.BG_H5
else:
h5file = constants.SIG_H5
print("[data] Loading from {} ...".format(h5file))
data = h5py.File(h5file, 'r')
sig_cutoff = int(np.sum(data['meta_variables/signal'][()]))
size = data['meta_variables/pull1'][()].shape[0]
if n == -1:
metadata = np.zeros((size, 4))
metadata[:, 0] = np.array(data['meta_variables/pull1'][()])
metadata[:, 1] = np.array(data['meta_variables/pull2'][()])
metadata[:, 2] = np.array(data['meta_variables/jet_mass'][()])
metadata[:, 3] = np.array(data['meta_variables/jet_delta_R'][()])
pixels = data['images'][()]
else:
metadata = np.zeros((n, 4))
metadata[:, 0] = np.array(data['meta_variables/pull1'][:n])
metadata[:, 1] = np.array(data['meta_variables/pull2'][:n])
metadata[:, 2] = np.array(data['meta_variables/jet_mass'][:n])
metadata[:, 3] = np.array(data['meta_variables/jet_delta_R'][:n])
pixels = data['images'][:n]
metadata = pd.DataFrame(metadata, columns=['pull_1', 'pull_2', 'mass', 'delta_R'])
# Restrict delta R
pixels = pixels[np.where((metadata['delta_R'] <= delta_R_max) & (metadata['delta_R'] >= delta_R_min))]
print("[data] {} pixels shape: {}".format("bg" if bg else "sig", pixels.shape))
print("[data] {} metadata head:\n {}".format("bg" if bg else "sig", metadata.head()))
if (same_file):
if (bg):
pixels = pixels[sig_cutoff:]
metadata = metadata[sig_cutoff:]
else:
pixels = pixels[:sig_cutoff]
metadata = metadata[:sig_cutoff]
return pixels[:n], metadata[:n]
def preprocess(x_train, x_test, no_train=False):
def safeNorm(arr, ord): #implement own norm, this one sucks
def shape(arr):
return arr.reshape(arr.shape[0], arr.shape[1] * arr.shape[2] * arr.shape[3])
def deshape(arr):
deshape_dim = int(arr.shape[0]**0.5)
return arr.reshape(deshape_dim, deshape_dim, 1)
def myNorm(arr, ord):
size = arr.shape[0]
return np.power(np.sum(np.power(np.abs(arr), ord), axis=0), 1.0/float(size))
arr = shape(arr)
arr = myNorm(arr, ord)
arr[arr==0] = 1 #only occurs in pixels that are always 0 anyways
return deshape(arr)
def safeLog(arr):
eps = -6
mask = (arr==0)
arr = np.log(arr)
arr[mask] = eps
arr[arr<eps] = eps
return 1+arr/6 #put back into reasonable range
if not no_train:
x_train = safeLog(x_train)
x_test = safeLog(x_test)
norm = safeNorm(x_train, 1)
np.divide(x_train, norm, out=x_train)
np.divide(x_test, norm, out=x_test)
else:
x_test = safeLog(x_test)
np.divide(x_test, safeNorm(x_test, 1), out=x_test)
return x_train, x_test
def get_train_test(n=-1, delta_R_min=float("-inf"), delta_R_max=float("inf"), weighting_mass=False, same_file=False, train_size=0.8):
"""Returns X, y, and weight arrays for training and testing.
Return:
X_train -- numpy array of shape (train_size * n, 1, 32, 32)
X_test -- numpy array of shape ((1 - train_size) * n, 1, 32, 32)
y_train -- numpy array of shape (train_size * n)
y_test -- numpy array of shape ((1 - train_size) * n)
weights_train -- numpy array of shape (train_size * n)
weights_test -- numpy array of shape ((1 - train_size) * n)
Arguments:
n -- same as in get_pixels_metadata()
delta_R_min -- same as in get_pixels_metadata()
delta_R_max -- same as in get_pixels_metadata()
weighting_mass -- if true, modify weights such that when the samples are
binned by mass, the number of weighted singlet samples in
each bin is equivalent to the number of octet samples.
"""
bg_pixels, bg_metadata = get_pixels_metadata(bg=True, n=n, delta_R_min=delta_R_min, delta_R_max=delta_R_max, same_file=same_file)
sig_pixels, sig_metadata = get_pixels_metadata(bg=False, n=n, delta_R_min=delta_R_min, delta_R_max=delta_R_max, same_file=same_file)
sig_weights = np.ones(sig_pixels.shape[0])
# Calculate weights.
if weighting_mass:
mass_min = 0
mass_max = 400
mass_num_bins = 100
mass_bins = np.linspace(mass_min, mass_max, mass_num_bins)
for i in range(1, mass_num_bins):
bg_bin = (bg_metadata['mass'] < mass_bins[i]) & (bg_metadata['mass'] >= mass_bins[i-1])
sig_bin = (sig_metadata['mass'] < mass_bins[i]) & (sig_metadata['mass'] >= mass_bins[i-1])
bg_count = np.sum(bg_bin)
sig_count = np.sum(sig_bin)
if sig_count == 0:
sig_weights[sig_bin] = 0.0
else:
sig_weights[sig_bin] = float(bg_count) / float(sig_count)
bg_weights = np.ones(bg_pixels.shape[0])
weights = np.concatenate((bg_weights, sig_weights), axis=0)
# Reshape the pixels into 2D images
bg_pixels = bg_pixels.reshape(bg_pixels.shape[0], bg_pixels.shape[1], bg_pixels.shape[2], 1)
sig_pixels = sig_pixels.reshape(sig_pixels.shape[0], sig_pixels.shape[1], sig_pixels.shape[2], 1)
bg_y = np.zeros(bg_pixels.shape[0])
sig_y = np.ones(sig_pixels.shape[0])
bg_X_train, bg_X_test, sig_X_train, sig_X_test, \
bg_y_train, bg_y_test, sig_y_train, sig_y_test, \
bg_weights_train, bg_weights_test, sig_weights_train, sig_weights_test = \
train_test_split(bg_pixels, sig_pixels, bg_y, sig_y, bg_weights, sig_weights, train_size=train_size, shuffle=False)
X_train = np.concatenate((bg_X_train, sig_X_train), axis=0)
X_test = np.concatenate((bg_X_test, sig_X_test), axis=0)
y_train = np.concatenate((bg_y_train, sig_y_train), axis=0)
y_test = np.concatenate((bg_y_test, sig_y_test), axis=0)
weights_train = np.concatenate((bg_weights_train, sig_weights_train), axis=0)
weights_test = np.concatenate((bg_weights_test, sig_weights_test), axis=0)
X_train, y_train, weights_train = \
shuffle(X_train, y_train, weights_train, random_state = np.random.RandomState(seed=100))
X_test, y_test, weights_test = \
shuffle(X_test, y_test, weights_test, random_state = np.random.RandomState(seed=100))
del sig_y
del bg_y
del bg_pixels
del sig_pixels
gc.collect() # clean up memory
X_train, X_test = preprocess(X_train, X_test, no_train=(train_size==0))
return [X_train, X_test, y_train, y_test, weights_train, weights_test, sig_metadata, bg_metadata]
def main():
X_train, X_test, y_train, y_test, weights_train, weights_test , _, _ = get_train_test()
if __name__ == '__main__':
main()
``` |
{
"source": "36000/myow",
"score": 2
} |
#### File: myow/scripts/monkey-train.py
```python
import os
import copy
from absl import app
from absl import flags
import torch
import tensorflow as tf
from torch.utils.data import DataLoader
from tqdm import tqdm
from self_supervised.data import ReachNeuralDataset, get_angular_data
from self_supervised.data import generators, utils
from self_supervised.transforms import neural_transforms as transforms
from self_supervised.trainer import MYOWTrainer
from self_supervised.nets import MLP
from self_supervised.tasks import neural_tasks
from self_supervised.utils import set_random_seeds
FLAGS = flags.FLAGS
# Dataset
flags.DEFINE_string('data_path', './data/mihi-chewie', 'Path to monkey data.')
flags.DEFINE_enum('primate', 'chewie', ['chewie', 'mihi'], 'Primate name.')
flags.DEFINE_integer('day', 1, 'Day of recording.', lower_bound=1, upper_bound=2)
flags.DEFINE_float('train_split', 0.8, 'train/test split', lower_bound=0., upper_bound=0.99)
# Transforms
flags.DEFINE_integer('max_lookahead', 5, 'Max lookahead.')
flags.DEFINE_float('noise_sigma', 0.2, 'Noise sigma.', lower_bound=0.)
flags.DEFINE_float('dropout_p', 0.8, 'Dropout probability.', lower_bound=0., upper_bound=1.)
flags.DEFINE_float('dropout_apply_p', 0.9, 'Probability of applying dropout.', lower_bound=0., upper_bound=1.)
flags.DEFINE_float('pepper_p', 0.0, 'Pepper probability.', lower_bound=0., upper_bound=1.)
flags.DEFINE_float('pepper_sigma', 0.3, 'Pepper sigma.', lower_bound=0.)
flags.DEFINE_float('pepper_apply_p', 0.0, 'Probability of applying pepper.', lower_bound=0., upper_bound=1.)
flags.DEFINE_boolean('structured_transform', True, 'Whether the transformations are consistent across temporal shift.')
# Dataloader
flags.DEFINE_integer('batch_size', 256, 'Batch size.')
flags.DEFINE_integer('pool_batch_size', 512, 'Batch size.')
flags.DEFINE_integer('num_workers', 4, 'Number of workers.')
# architecture
flags.DEFINE_integer('representation_size', 128, 'Representation size.')
flags.DEFINE_list('encoder_hidden_layers', [128, 128, 128], 'Sizes of hidden layers in encoder.')
flags.DEFINE_integer('projection_size', 32, 'Size of first projector.')
flags.DEFINE_integer('projection_hidden_size', 256, 'Size of hidden layer in first projector.')
flags.DEFINE_integer('projection_size_2', 16, 'Size of second projector.')
flags.DEFINE_integer('projection_hidden_size_2', 64, 'Size of hidden layer in second projector.')
# Training parameters
flags.DEFINE_float('lr', 0.8, 'Base learning rate.')
flags.DEFINE_float('mm', 0.9, 'Momentum for exponential moving average.')
flags.DEFINE_float('weight_decay', 1e-6, 'Weight decay.')
flags.DEFINE_float('myow_weight', 0.1, 'Base learning rate.')
flags.DEFINE_integer('miner_k', 3, 'k in knn during mining.')
flags.DEFINE_integer('num_epochs', 1000, 'Number of training epochs.')
flags.DEFINE_integer('lr_warmup_epochs', 10, 'Warmup period for learning rate.')
flags.DEFINE_integer('myow_warmup_epochs', 10, 'Warmup period during which mining is inactive.')
flags.DEFINE_integer('myow_rampup_epochs', 110, 'Rampup period for myow weight.')
# Random seed
flags.DEFINE_integer('random_seed', 100, 'Random seed.')
def main(argv):
set_random_seeds(FLAGS.random_seed)
# load dataset
dataset = ReachNeuralDataset(FLAGS.data_path, primate=FLAGS.primate, day=FLAGS.day, binning_period=0.1,
scale_firing_rates=False, train_split=FLAGS.train_split)
dataset.train()
firing_rates = dataset.firing_rates
raw_labels = dataset.labels
sequence_lengths = dataset.trial_lengths
transform = transforms.Compose(transforms.RandomizedDropout(FLAGS.dropout_p, apply_p=FLAGS.dropout_apply_p),
transforms.Normalize(torch.tensor(dataset.mean), torch.tensor(dataset.std)),
transforms.Noise(FLAGS.noise_sigma),
transforms.Pepper(FLAGS.pepper_p, FLAGS.pepper_sigma, apply_p=FLAGS.pepper_apply_p),
)
transform_val = transforms.Compose(transforms.Normalize(torch.tensor(dataset.mean), torch.tensor(dataset.std)),)
pair_sets = utils.onlywithin_indices(sequence_lengths, k_min=-FLAGS.max_lookahead, k_max=FLAGS.max_lookahead)
generator = generators.LocalGlobalGenerator(firing_rates, pair_sets, sequence_lengths,
num_examples=firing_rates.shape[0],
batch_size=FLAGS.batch_size,
pool_batch_size=FLAGS.pool_batch_size,
transform=transform, num_workers=FLAGS.num_workers,
structured_transform=FLAGS.structured_transform)
dataloader = DataLoader(generator, num_workers=FLAGS.num_workers, drop_last=True)
# build encoder network
input_size = firing_rates.shape[1]
encoder = MLP([input_size, *FLAGS.encoder_hidden_layers, FLAGS.representation_size], batchnorm=True)
trainer = MYOWTrainer(encoder=encoder,
representation_size=FLAGS.representation_size, projection_size=FLAGS.projection_size,
projection_hidden_size=FLAGS.projection_hidden_size,
projection_size_2=FLAGS.projection_size_2,
projection_hidden_size_2=FLAGS.projection_hidden_size_2,
base_lr=FLAGS.lr, base_momentum=FLAGS.mm, momentum=0.9, weight_decay=FLAGS.weight_decay,
optimizer_type='lars', batch_size=FLAGS.batch_size, total_epochs=FLAGS.num_epochs,
exclude_bias_and_bn=True, train_dataloader=dataloader, prepare_views=generator.prepare_views,
warmup_epochs=FLAGS.lr_warmup_epochs, myow_warmup_epochs=FLAGS.myow_warmup_epochs,
myow_rampup_epochs=FLAGS.myow_rampup_epochs, myow_max_weight=FLAGS.myow_weight,
view_miner_k=FLAGS.miner_k, gpu=0, log_step=10,
log_dir='runs-chewie1/myow_run_1')
data_train, data_test = get_angular_data(dataset, device=trainer.device, velocity_threshold=5)
def evaluate():
trainer.model.eval()
encoder_eval = copy.deepcopy(trainer.model.online_encoder)
classifier = torch.nn.Sequential(torch.nn.Linear(FLAGS.representation_size, 2)).to(trainer.device)
class_optimizer = torch.optim.Adam(classifier.parameters(), lr=0.01, weight_decay=1e-5)
acc, delta_acc = neural_tasks.train_angle_classifier(
encoder_eval, classifier, data_train, data_test, class_optimizer,
transform=transform, transform_val=transform_val, device=trainer.device,
num_epochs=100, batch_size=FLAGS.batch_size)
trainer.writer.add_scalar('trial_angles/acc_train', acc.train_smooth, trainer.step)
trainer.writer.add_scalar('trial_angles/delta_acc_train', delta_acc.train_smooth, trainer.step)
trainer.writer.add_scalar('trial_angles/acc_test', acc.val_smooth, trainer.step)
trainer.writer.add_scalar('trial_angles/delta_acc_test', delta_acc.val_smooth, trainer.step)
for epoch in tqdm(range(FLAGS.num_epochs + 1)):
trainer.model.train()
trainer.train_epoch()
if epoch % 20 == 0:
trainer.model.eval()
evaluate()
if __name__ == "__main__":
print(f'PyTorch version: {torch.__version__}')
app.run(main)
```
#### File: self_supervised/data/monkey_reach_dataset.py
```python
import os
import pickle
import numpy as np
from tqdm import tqdm
import torch
from self_supervised.data.io import loadmat
FILENAMES = {
('mihi', 1): 'full-mihi-03032014',
('mihi', 2): 'full-mihi-03062014',
('chewie', 1): 'full-chewie-10032013',
('chewie', 2): 'full-chewie-12192013',
}
class ReachNeuralDataset:
def __init__(self, path, primate='mihi', day=1,
binning_period=0.1, binning_overlap=0.0, train_split=0.8,
scale_firing_rates=False, scale_velocity=False, sort_by_reach=True):
self.path = path
# get path to data
assert primate in ['mihi', 'chewie']
assert day in [1, 2]
self.primate = primate
self.filename = FILENAMES[(self.primate, day)]
self.raw_path = os.path.join(self.path, 'raw/%s.mat') % self.filename
self.processed_path = os.path.join(self.path, 'processed/%s.pkl') % (self.filename + '-%.2f' % binning_period)
# get binning parameters
self.binning_period = binning_period
self.binning_overlap = binning_overlap
if self.binning_overlap != 0:
raise NotImplemented
# train/val split
self.train_split = train_split
# initialize some parameters
self.dataset_ = {}
self.subset = 'train' # default selected subset
### Process data
# load data
if not os.path.exists(self.processed_path):
data_train_test = self._process_data()
else:
data_train_test = self._load_processed_data()
# split data
data_train, data_test = self._split_data(data_train_test)
self._num_trials = {'train': len(data_train['firing_rates']),
'test': len(data_test['firing_rates'])}
# compute mean and std of firing rates
self.mean, self.std = self._compute_mean_std(data_train, feature='firing_rates')
# remove neurons with no variance
data_train, data_test = self._remove_static_neurons(data_train, data_test)
# scale data
if scale_firing_rates:
data_train, data_test = self._scale_data(data_train, data_test, feature='firing_rates')
if scale_velocity:
data_train, data_test = self._scale_data(data_train, data_test, feature='velocity')
# sort by reach direction
if sort_by_reach:
data_train = self._sort_by_reach_direction(data_train)
data_test = self._sort_by_reach_direction(data_test)
# build sequences
trial_lengths_train = [seq.shape[0] for seq in data_train['firing_rates']]
# merge everything
for feature in data_train.keys():
data_train[feature] = np.concatenate(data_train[feature]).squeeze()
data_test[feature] = np.concatenate(data_test[feature]).squeeze()
data_train['trial_lengths'] = trial_lengths_train
data_train['reach_directions'] = np.unique(data_train['labels']).tolist()
data_train['reach_lengths'] = [np.sum(data_train['labels'] == reach_id)
for reach_id in data_train['reach_directions']]
# map labels to 0 .. N-1 for training
data_train['raw_labels'] = data_train['labels'].copy()
data_test['raw_labels'] = data_test['labels'].copy()
data_train['labels'] = self._map_labels(data_train)
data_test['labels'] = self._map_labels(data_test)
self.dataset_['train'] = data_train
self.dataset_['test'] = data_test
@property
def dataset(self):
return self.dataset_[self.subset]
def __getattr__(self, item):
return self.dataset[item]
def train(self):
self.subset = 'train'
def test(self):
self.subset = 'test'
@property
def num_trials(self):
return self._num_trials[self.subset]
@property
def num_neurons(self):
return self[0]['firing_rates'].shape[1]
def _process_data(self):
print('Preparing dataset: Binning data.')
# load data
mat_dict = loadmat(self.raw_path)
# bin data
data = self._bin_data(mat_dict)
self._save_processed_data(data)
return data
def _save_processed_data(self, data):
with open(self.processed_path, 'wb') as output:
pickle.dump({'data': data}, output)
def _load_processed_data(self):
with open(self.processed_path, "rb") as fp:
data = pickle.load(fp)['data']
return data
def _bin_data(self, mat_dict):
# load matrix
trialtable = mat_dict['trial_table']
neurons = mat_dict['out_struct']['units']
pos = np.array(mat_dict['out_struct']['pos'])
vel = np.array(mat_dict['out_struct']['vel'])
acc = np.array(mat_dict['out_struct']['acc'])
force = np.array(mat_dict['out_struct']['force'])
time = vel[:, 0]
num_neurons = len(neurons)
num_trials = trialtable.shape[0]
data = {'firing_rates': [], 'position': [], 'velocity': [], 'acceleration': [],
'force': [], 'labels': [], 'sequence': []}
for trial_id in tqdm(range(num_trials)):
min_T = trialtable[trial_id, 9]
max_T = trialtable[trial_id, 12]
# grids= minT:(delT-TO):(maxT-delT);
grid = np.arange(min_T, max_T + self.binning_period, self.binning_period)
grids = grid[:-1]
gride = grid[1:]
num_bins = len(grids)
neurons_binned = np.zeros((num_bins, num_neurons))
pos_binned = np.zeros((num_bins, 2))
vel_binned = np.zeros((num_bins, 2))
acc_binned = np.zeros((num_bins, 2))
force_binned = np.zeros((num_bins, 2))
targets_binned = np.zeros((num_bins, 1))
id_binned = trial_id * np.ones((num_bins, 1))
for k in range(num_bins):
bin_mask = (time >= grids[k]) & (time <= gride[k])
if len(pos) > 0:
pos_binned[k, :] = np.mean(pos[bin_mask, 1:], axis=0)
vel_binned[k, :] = np.mean(vel[bin_mask, 1:], axis=0)
if len(acc):
acc_binned[k, :] = np.mean(acc[bin_mask, 1:], axis=0)
if len(force) > 0:
force_binned[k, :] = np.mean(force[bin_mask, 1:], axis=0)
targets_binned[k, 0] = trialtable[trial_id, 1]
for i in range(num_neurons):
for k in range(num_bins):
spike_times = neurons[i]['ts']
bin_mask = (spike_times >= grids[k]) & (spike_times <= gride[k])
neurons_binned[k, i] = np.sum(bin_mask) / self.binning_period
data['firing_rates'].append(neurons_binned)
data['position'].append(pos_binned)
data['velocity'].append(vel_binned)
data['acceleration'].append(acc_binned)
data['force'].append(force_binned)
data['labels'].append(targets_binned)
data['sequence'].append(id_binned)
return data
def _split_data(self, data):
num_trials = len(data['firing_rates'])
split_id = int(num_trials * self.train_split)
data_train = {}
data_test = {}
for key, feature in data.items():
data_train[key] = feature[:split_id]
data_test[key] = feature[split_id:]
return data_train, data_test
def _remove_static_neurons(self, data_train, data_test):
for i in range(len(data_train['firing_rates'])):
data_train['firing_rates'][i] = data_train['firing_rates'][i][:, self.std > 1e-3]
for i in range(len(data_test['firing_rates'])):
data_test['firing_rates'][i] = data_test['firing_rates'][i][:, self.std > 1e-3]
self.mean = self.mean[self.std > 1e-3]
self.std = self.std[self.std > 1e-3]
return data_train, data_test
def _compute_mean_std(self, data, feature='firing_rates'):
concatenated_data = np.concatenate(data[feature])
mean = concatenated_data.mean(axis=0)
std = concatenated_data.std(axis=0)
return mean, std
def _scale_data(self, data_train, data_test, feature):
concatenated_data = np.concatenate(data_train[feature])
mean = concatenated_data.mean(axis=0)
std = concatenated_data.std(axis=0)
for i in range(len(data_train[feature])):
data_train[feature][i] = (data_train[feature][i] - mean) / std
for i in range(len(data_test[feature])):
data_test[feature][i] = (data_test[feature][i] - mean) / std
return data_train, data_test
def _sort_by_reach_direction(self, data):
sorted_by_label = np.argsort(np.array([reach_dir[0, 0] for reach_dir in data['labels']]))
for feature in data.keys():
data[feature] = np.array(data[feature])[sorted_by_label]
return data
def _map_labels(self, data):
labels = data['labels']
for i, l in enumerate(np.unique(labels)):
labels[data['labels']==l] = i
return labels
def get_class_data(dataset, device='cpu'):
def get_data():
firing_rates = dataset.firing_rates
labels = dataset.labels
data = [torch.tensor(firing_rates, dtype=torch.float32, device=device),
torch.tensor(labels, dtype=torch.long, device=device)]
return data
dataset.train()
data_train = get_data()
dataset.test()
data_test = get_data()
dataset.train()
return data_train, data_test
def get_angular_data(dataset, velocity_threshold=-1., device='cpu'):
def get_data():
velocity_mask = np.linalg.norm(dataset.velocity, 2, axis=1) > velocity_threshold
firing_rates = dataset.firing_rates[velocity_mask]
labels = dataset.labels[velocity_mask]
angles = (2 * np.pi / 8 * labels)[:, np.newaxis]
cos_sin = np.concatenate([np.cos(angles), np.sin(angles)], axis=1)
data = [torch.tensor(firing_rates, dtype=torch.float32, device=device),
torch.tensor(angles, dtype=torch.float32, device=device),
torch.tensor(cos_sin, dtype=torch.float32, device=device)]
return data
dataset.train()
data_train = get_data()
dataset.test()
data_test = get_data()
dataset.train()
return data_train, data_test
```
#### File: self_supervised/model/mlp3.py
```python
from torch import nn
class MLP3(nn.Module):
r"""MLP class used for projector and predictor in :class:`BYOL`. The MLP has one hidden layer.
.. note::
The hidden layer should be larger than both input and output layers, according to the
:class:`BYOL` paper.
Args:
input_size (int): Size of input features.
output_size (int): Size of output features (projection or prediction).
hidden_size (int): Size of hidden layer.
"""
def __init__(self, input_size, output_size, hidden_size):
super().__init__()
self.net = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, output_size)
)
def forward(self, x):
return self.net(x)
```
#### File: self_supervised/model/myow_factory.py
```python
import torch
import torch.nn.functional as F
def myow_factory(byol_class):
r"""Factory function for adding mining feature to an architecture."""
class MYOW(byol_class):
r"""
Class that adds ability to mine views to base class :obj:`byol_class`.
Args:
n_neighbors (int, optional): Number of neighbors used in knn. (default: :obj:`1`)
"""
def __init__(self, *args, n_neighbors=1):
super().__init__(*args)
self.k = n_neighbors
def _compute_distance(self, x, y):
x = F.normalize(x, dim=-1, p=2)
y = F.normalize(y, dim=-1, p=2)
dist = 2 - 2 * torch.sum(x.view(x.shape[0], 1, x.shape[1]) *
y.view(1, y.shape[0], y.shape[1]), -1)
return dist
def _knn(self, x, y):
# compute distance
dist = self._compute_distance(x, y)
# compute k nearest neighbors
values, indices = torch.topk(dist, k=self.k, largest=False)
# randomly select one of the neighbors
selection_mask = torch.randint(self.k, size=(indices.size(0),))
mined_views_ids = indices[torch.arange(indices.size(0)).to(selection_mask), selection_mask]
return mined_views_ids
def mine_views(self, y, y_pool):
r"""Finds, for each element in batch :obj:`y`, its nearest neighbors in :obj:`y_pool`, randomly selects one
of them and returns the corresponding index.
Args:
y (torch.Tensor): batch of representation vectors.
y_pool (torch.Tensor): pool of candidate representation vectors.
Returns:
torch.Tensor: Indices of mined views in :obj:`y_pool`.
"""
mined_views_ids = self._knn(y, y_pool)
return mined_views_ids
return MYOW
```
#### File: self_supervised/tasks/classification.py
```python
import torch
from tqdm import tqdm
from self_supervised.data import utils
from self_supervised.utils import MetricLogger
def compute_accuracy(net, classifier, data, transform=None, device='cpu'):
r"""Evaluates the classification accuracy when a list of :class:`torch.Tensor` is given.
Args:
net (torch.nn.Module): Frozen encoder.
classifier (torch.nn.Module): Linear layer.
data (list of torch.nn.Tensor): Inputs, target class and target angles.
transform (Callable, Optional): Transformation to use. Added for the purposes of
normalization. (default: :obj:`None`)
device (String, Optional): Device used. (default: :obj:`"cpu"`)
Returns:
float: Accuracy.
"""
classifier.eval()
# prepare inputs
x, label = data
x = x.to(device)
label = label.to(device)
if transform is not None:
x = transform(x)
# feed to network and classifier
with torch.no_grad():
representation = net(x)
pred_logits = classifier(representation)
# compute accuracy
_, pred_class = torch.max(pred_logits, 1)
acc = (pred_class == label).sum().item() / label.size(0)
return acc
def compute_accuracy_dataloader(net, classifier, dataloader, transform=None, device='cpu'):
r"""Evaluates the classification accuracy when a :obj:`torch.data.DataLoader` is given.
Args:
net (torch.nn.Module): Frozen encoder.
classifier (torch.nn.Module): Linear layer.
dataloader (torch.data.DataLoader): Dataloader.
transform (Callable, Optional): Transformation to use. Added for the purposes of
normalization. (default: :obj:`None`)
device (String, Optional): Device used. (default: :obj:`"cpu"`)
Returns:
float: Accuracy.
"""
classifier.eval()
acc = []
for x, label in dataloader:
x = x.to(device)
label = label.to(device)
if transform is not None:
x = transform(x)
# feed to network and classifier
with torch.no_grad():
representation = net(x)
representation = representation.view(representation.shape[0], -1)
pred_logits = classifier(representation)
# compute accuracy
_, pred_class = torch.max(pred_logits, 1)
acc.append((pred_class == label).sum().item() / label.size(0))
return sum(acc)/len(acc)
def train_classifier(net, classifier, data_train, data_val, optimizer, scheduler=None, transform=None,
transform_val=None, batch_size=256, num_epochs=10, device='cpu',
writer=None, tag='', tqdm_progress=False):
r"""Trains linear layer to predict angle.
Args:
net (torch.nn.Module): Frozen encoder.
classifier (torch.nn.Module): Trainable linear layer.
data_train (torch.data.DataLoader or list of torch.nn.Tensor): Inputs and target class.
data_val (torch.data.DataLoader or list of torch.nn.Tensor): Inputs and target class.
optimizer (torch.optim.Optimizer): Optimizer for :obj:`classifier`.
scheduler (torch.optim._LRScheduler, Optional): Learning rate scheduler. (default: :obj:`None`)
transform (Callable, Optional): Transformation to use during training. (default: :obj:`None`)
transform_val (Callable, Optional): Transformation to use during validation. Added for the purposes of
normalization. (default: :obj:`None`)
batch_size (int, Optional): Batch size used during training. (default: :obj:`256`)
num_epochs (int, Optional): Number of training epochs. (default: :obj:`10`)
device (String, Optional): Device used. (default: :obj:`"cpu"`)
writer (torch.utils.tensorboard.SummaryWriter, Optional): Summary writer. (default: :obj:`None`)
tag (String, Optional): Tag used in :obj:`writer`. (default: :obj:`""`)
tqdm_progress (bool, Optional): If :obj:`True`, show training progress.
Returns:
MetricLogger: Accuracy.
"""
class_criterion = torch.nn.CrossEntropyLoss()
acc = MetricLogger()
for epoch in tqdm(range(num_epochs), disable=not tqdm_progress):
classifier.train()
if isinstance(data_train, list):
iterator = utils.batch_iter(*data_train, batch_size=batch_size)
else:
iterator = iter(data_train)
for x, label in iterator:
optimizer.zero_grad()
# load data
x = x.to(device)
label = label.to(device)
if transform is not None:
x = transform(x)
# forward
with torch.no_grad():
representation = net(x)
representation = representation.view(representation.shape[0], -1)
pred_class = classifier(representation)
# loss
loss = class_criterion(pred_class, label)
# backward
loss.backward()
optimizer.step()
if scheduler is not None:
scheduler.step()
# compute classification accuracies
if isinstance(data_train, list):
acc_val = compute_accuracy(net, classifier, data_val, transform=transform_val, device=device)
else:
acc_val = compute_accuracy_dataloader(net, classifier, data_val, transform=transform_val, device=device)
acc.update(0., acc_val)
if writer is not None:
writer.add_scalar('eval_acc/val-%r' % tag, acc_val, epoch)
if isinstance(data_train, list):
acc_train = compute_accuracy(net, classifier, data_train, transform=transform_val, device=device)
else:
acc_train = compute_accuracy_dataloader(net, classifier, data_train, transform=transform_val, device=device)
acc.update(acc_train, acc_val)
return acc
```
#### File: self_supervised/trainer/myow_trainer.py
```python
import numpy as np
import torch
import torch.distributed as dist
from self_supervised.model import MYOW, MLP3
from self_supervised.trainer import BYOLTrainer
class MYOWTrainer(BYOLTrainer):
def __init__(self, view_pool_dataloader=None, transform_m=None,
myow_warmup_epochs=0, myow_rampup_epochs=None, myow_max_weight=1., view_miner_k=4,
log_img_step=0, untransform_vis=None, projection_size_2=None, projection_hidden_size_2=None, **kwargs):
self.projection_size_2 = projection_size_2 if projection_size_2 is not None else kwargs['projection_size']
self.projection_hidden_size_2 = projection_hidden_size_2 if projection_hidden_size_2 is not None \
else kwargs['projection_hidden_size']
# view pool dataloader
self.view_pool_dataloader = view_pool_dataloader
# view miner
self.view_miner_k = view_miner_k
# transform class for minning
self.transform_m = transform_m
# myow loss
self.mined_loss_weight = 0.
self.myow_max_weight = myow_max_weight
self.myow_warmup_epochs = myow_warmup_epochs if myow_warmup_epochs is not None else 0
self.myow_rampup_epochs = myow_rampup_epochs if myow_rampup_epochs is not None else kwargs['total_epochs']
# convert to steps
world_size = kwargs['world_size'] if 'world_size' in kwargs else 1
self.num_examples = len(kwargs['train_dataloader'].dataset)
self.train_batch_size = kwargs['batch_size']
self.global_batch_size = world_size * self.train_batch_size
self.myow_warmup_steps = self.myow_warmup_epochs * self.num_examples // self.global_batch_size
self.myow_rampup_steps = self.myow_rampup_epochs * self.num_examples // self.global_batch_size
self.total_steps = kwargs['total_epochs'] * self.num_examples // self.global_batch_size
# logger
self.log_img_step = log_img_step
self.untransform_vis = untransform_vis
super().__init__(**kwargs)
def build_model(self, encoder):
projector_1 = MLP3(self.representation_size, self.projection_size, self.projection_hidden_size)
projector_2 = MLP3(self.projection_size, self.projection_size_2, self.projection_hidden_size_2)
predictor_1 = MLP3(self.projection_size, self.projection_size, self.projection_hidden_size)
predictor_2 = MLP3(self.projection_size_2, self.projection_size_2, self.projection_hidden_size_2)
net = MYOW(encoder, projector_1, projector_2, predictor_1, predictor_2, n_neighbors=self.view_miner_k)
return net.to(self.device)
def update_mined_loss_weight(self, step):
max_w = self.myow_max_weight
min_w = 0.
if step < self.myow_warmup_steps:
self.mined_loss_weight = min_w
elif step > self.myow_rampup_steps:
self.mined_loss_weight = max_w
else:
self.mined_loss_weight = min_w + (max_w - min_w) * (step - self.myow_warmup_steps) / \
(self.myow_rampup_steps - self.myow_warmup_steps)
def log_schedule(self, loss):
super().log_schedule(loss)
self.writer.add_scalar('myow_weight', self.mined_loss_weight, self.step)
def log_correspondance(self, view, view_mined):
""" currently only implements 2d images"""
img_batch = np.zeros((16, view.shape[1], view.shape[2], view.shape[3]))
for i in range(8):
img_batch[i] = self.untransform_vis(view[i]).detach().cpu().numpy()
img_batch[8+i] = self.untransform_vis(view_mined[i]).detach().cpu().numpy()
self.writer.add_images('correspondence', img_batch, self.step)
def train_epoch(self):
self.model.train()
if self.view_pool_dataloader is not None:
view_pooler = iter(self.view_pool_dataloader)
for inputs in self.train_dataloader:
# update parameters
self.update_learning_rate(self.step)
self.update_momentum(self.step)
self.update_mined_loss_weight(self.step)
self.optimizer.zero_grad()
inputs = self.prepare_views(inputs)
view1 = inputs['view1'].to(self.device)
view2 = inputs['view2'].to(self.device)
if self.transform_1 is not None:
# apply transforms
view1 = self.transform_1(view1)
view2 = self.transform_2(view2)
# forward
outputs = self.model({'online_view': view1, 'target_view':view2})
weight = 1 / (1. + self.mined_loss_weight)
if self.symmetric_loss:
weight /= 2.
loss = weight * self.forward_loss(outputs['online_q'], outputs['target_z'])
if self.distributed and self.mined_loss_weight > 0 and not self.symmetric_loss:
with self.model.no_sync():
loss.backward()
else:
loss.backward()
if self.symmetric_loss:
outputs = self.model({'online_view': view2, 'target_view': view1})
weight = 1 / (1. + self.mined_loss_weight) / 2.
loss = weight * self.forward_loss(outputs['online_q'], outputs['target_z'])
if self.distributed and self.mined_loss_weight > 0:
with self.model.no_sync():
loss.backward()
else:
loss.backward()
# mine view
if self.mined_loss_weight > 0:
if self.view_pool_dataloader is not None:
try:
# currently only supports img, label
view_pool, label_pool = next(view_pooler)
view_pool = view_pool.to(self.device).squeeze()
except StopIteration:
# reinit the dataloader
view_pooler = iter(self.view_pool_dataloader)
view_pool, label_pool = next(view_pooler)
view_pool = view_pool.to(self.device).squeeze()
view3 = inputs['view1'].to(self.device)
else:
view3 = inputs['view3'].to(self.device).squeeze() \
if 'view3' in inputs else inputs['view1'].to(self.device).squeeze()
view_pool = inputs['view_pool'].to(self.device).squeeze()
# apply transform
if self.transform_m is not None:
# apply transforms
view3 = self.transform_m(view3)
view_pool = self.transform_m(view_pool)
# compute representations
outputs = self.model({'online_view': view3}, get_embedding='encoder')
online_y = outputs['online_y']
outputs_pool = self.model({'target_view': view_pool}, get_embedding='encoder')
target_y_pool = outputs_pool['target_y']
# mine views
if self.distributed:
gather_list = [torch.zeros_like(target_y_pool) for _ in range(self.world_size)]
dist.all_gather(gather_list, target_y_pool, self.group)
target_y_pool = torch.cat(gather_list, dim=0)
selection_mask = self.model.module.mine_views(online_y, target_y_pool)
else:
selection_mask = self.model.mine_views(online_y, target_y_pool)
target_y_mined = target_y_pool[selection_mask].contiguous()
outputs_mined = self.model({'online_y': online_y,'target_y': target_y_mined}, get_embedding='predictor_m')
weight = self.mined_loss_weight / (1. + self.mined_loss_weight)
loss = weight * self.forward_loss(outputs_mined['online_q_m'], outputs_mined['target_v'])
loss.backward()
self.optimizer.step()
# update moving average
self.update_target_network()
# log
if self.step % self.log_step == 0 and self.rank == 0:
self.log_schedule(loss=loss.item())
# log images
if self.mined_loss_weight > 0 and self.log_img_step > 0 and self.step % self.log_img_step == 0 and self.rank == 0:
if self.distributed:
# get image pools from all gpus
gather_list = [torch.zeros_like(view_pool) for _ in range(self.world_size)]
dist.all_gather(gather_list, view_pool, self.group)
view_pool = torch.cat(gather_list, dim=0)
self.log_correspondance(view3, view_pool[selection_mask])
# update parameters
self.step += 1
return loss.item()
``` |
{
"source": "36000/pyAFQ",
"score": 2
} |
#### File: AFQ/definitions/mapping.py
```python
import nibabel as nib
import numpy as np
from time import time
import os.path as op
from AFQ.definitions.utils import Definition, find_file
from dipy.align import syn_registration, affine_registration
import AFQ.registration as reg
import AFQ.data as afd
from AFQ.tasks.utils import get_fname
from dipy.align.imaffine import AffineMap
try:
from fsl.data.image import Image
from fsl.transform.fnirt import readFnirt
from fsl.transform.nonlinear import applyDeformation
has_fslpy = True
except ModuleNotFoundError:
has_fslpy = False
try:
import h5py
has_h5py = True
except ModuleNotFoundError:
has_h5py = False
__all__ = ["FnirtMap", "SynMap", "SlrMap", "AffMap"]
# For map defintions, get_for_subses should return only the mapping
# Where the mapping has transform and transform_inverse functions
# which each accept data, **kwargs
class FnirtMap(Definition):
"""
Use an existing FNIRT map. Expects a warp file
and an image file for each subject / session; image file
is used as src space for warp.
Parameters
----------
warp_suffix : str
suffix to pass to bids_layout.get() to identify the warp file.
space_suffix : str
suffix to pass to bids_layout.get() to identify the space file.
warp_filters : str
Additional filters to pass to bids_layout.get() to identify
the warp file.
Default: {}
space_filters : str
Additional filters to pass to bids_layout.get() to identify
the space file.
Default: {}
Examples
--------
fnirt_map = FnirtMap(
"warp",
"MNI",
{"scope": "TBSS"},
{"scope": "TBSS"})
api.AFQ(mapping=fnirt_map)
"""
def __init__(self, warp_suffix, space_suffix,
warp_filters={}, space_filters={}):
if not has_fslpy:
raise ImportError(
"Please install fslpy if you want to use FnirtMap")
self.warp_suffix = warp_suffix
self.space_suffix = space_suffix
self.warp_filters = warp_filters
self.space_filters = space_filters
self.fnames = {}
def find_path(self, bids_layout, from_path, subject, session):
if session not in self.fnames:
self.fnames[session] = {}
nearest_warp = find_file(
bids_layout, from_path, self.warp_filters, self.warp_suffix,
session, subject)
nearest_space = find_file(
bids_layout, from_path, self.space_filters, self.space_suffix,
session, subject)
self.fnames[session][subject] = (nearest_warp, nearest_space)
def get_for_subses(self, subses_dict, reg_subject, reg_template):
nearest_warp, nearest_space = self.fnames[
subses_dict['ses']][subses_dict['subject']]
our_templ = reg_template
subj = Image(subses_dict['dwi_file'])
their_templ = Image(nearest_space)
warp = readFnirt(nearest_warp, their_templ, subj)
return ConformedFnirtMapping(warp, our_templ.affine)
class ConformedFnirtMapping():
"""
ConformedFnirtMapping which matches the generic mapping API.
"""
def __init__(self, warp, ref_affine):
self.ref_affine = ref_affine
self.warp = warp
def transform_inverse(self, data, **kwargs):
data_img = Image(nib.Nifti1Image(
data.astype(np.float32), self.ref_affine))
xform_data = np.asarray(applyDeformation(data_img, self.warp).data)
return xform_data
def transform(self, data, **kwargs):
raise NotImplementedError(
"Fnirt based mappings can currently"
+ " only transform from template to subject space")
class ItkMap(Definition):
"""
Use an existing Itk map (e.g., from ANTS). Expects the warp file
from MNI to T1.
Parameters
----------
warp_suffix : str
suffix to pass to bids_layout.get() to identify the warp file.
warp_filters : str
Additional filters to pass to bids_layout.get() to identify
the warp file.
Default: {}
Examples
--------
itk_map = ItkMap(
"xfm",
{"scope": "qsiprep",
"from": "MNI152NLin2009cAsym",
"to": "T1w"})
api.AFQ(mapping=itk_map)
"""
def __init__(self, warp_suffix, warp_filters={}):
if not has_h5py:
raise ImportError(
"Please install h5py if you want to use ItkMap")
self.warp_suffix = warp_suffix
self.warp_filters = warp_filters
self.fnames = {}
def find_path(self, bids_layout, from_path, subject, session):
if session not in self.fnames:
self.fnames[session] = {}
self.fnames[session][subject] = find_file(
bids_layout, from_path, self.warp_filters, self.warp_suffix,
session, subject, extension="h5")
def get_for_subses(self, subses_dict, reg_subject, reg_template):
nearest_warp = self.fnames[subses_dict['ses']][subses_dict['subject']]
warp_f5 = h5py.File(nearest_warp)
their_shape = np.asarray(warp_f5["TransformGroup"]['1'][
'TransformFixedParameters'], dtype=int)[:3]
our_shape = reg_template.get_fdata().shape
if (our_shape != their_shape).any():
raise ValueError((
f"The shape of your ITK mapping ({their_shape})"
f" is not the same as your template for registration"
f" ({our_shape})"))
their_forward = np.asarray(warp_f5["TransformGroup"]['1'][
'TransformParameters']).reshape([*their_shape, 3])
their_disp = np.zeros((*their_shape, 3, 2))
their_disp[..., 0] = their_forward
their_disp = nib.Nifti1Image(
their_disp, reg_template.affine)
their_prealign = np.zeros((4, 4))
their_prealign[:3, :3] = np.asarray(warp_f5["TransformGroup"]["2"][
"TransformParameters"])[:9].reshape((3, 3))
their_prealign[:3, 3] = np.asarray(warp_f5["TransformGroup"]["2"][
"TransformParameters"])[9:]
their_prealign[3, 3] = 1.0
warp_f5.close()
return reg.read_mapping(
their_disp, subses_dict['dwi_file'],
reg_template, prealign=their_prealign)
class GeneratedMapMixin(object):
"""
Helper Class
Useful for maps that are generated by pyAFQ
"""
def get_fnames(self, extension, subses_dict):
mapping_file = get_fname(
subses_dict,
'_mapping_from-DWI_to_MNI_xfm')
meta_fname = get_fname(subses_dict, '_mapping_reg')
mapping_file = mapping_file + extension
meta_fname = meta_fname + '.json'
return mapping_file, meta_fname
def prealign(self, subses_dict, reg_subject, reg_template, save=True):
prealign_file = get_fname(
subses_dict, '_prealign_from-DWI_to-MNI_xfm.npy')
if not op.exists(prealign_file):
start_time = time()
_, aff = affine_registration(
reg_subject,
reg_template,
**self.affine_kwargs)
meta = dict(
type="rigid",
timing=time() - start_time)
if save:
np.save(prealign_file, aff)
meta_fname = get_fname(
subses_dict, '_prealign_from-DWI_to-MNI_xfm.json')
afd.write_json(meta_fname, meta)
else:
return aff
if save:
return prealign_file
else:
return np.load(prealign_file)
def get_for_subses(self, subses_dict, reg_subject, reg_template,
subject_sls=None, template_sls=None):
mapping_file, meta_fname = self.get_fnames(
self.extension, subses_dict)
if self.use_prealign:
reg_prealign = np.load(self.prealign(
subses_dict, reg_subject, reg_template))
else:
reg_prealign = None
if not op.exists(mapping_file):
start_time = time()
mapping = self.gen_mapping(
subses_dict, reg_subject, reg_template,
subject_sls, template_sls,
reg_prealign)
total_time = time() - start_time
reg.write_mapping(mapping, mapping_file)
meta = dict(
type="displacementfield",
timing=total_time)
afd.write_json(meta_fname, meta)
if self.use_prealign:
reg_prealign_inv = np.linalg.inv(reg_prealign)
else:
reg_prealign_inv = None
mapping = reg.read_mapping(
mapping_file,
subses_dict['dwi_file'],
reg_template,
prealign=reg_prealign_inv)
return mapping
class SynMap(GeneratedMapMixin, Definition):
"""
Calculate a Syn registration for each subject/session
using reg_subject and reg_template.
Parameters
----------
use_prealign : bool
Whether to perform a linear pre-registration.
Default: True
affine_kwargs : dictionary, optional
Parameters to pass to affine_registration
in dipy.align, which does the linear pre-alignment.
Only used if use_prealign is True.
Default: {}
syn_kwargs : dictionary, optional
Parameters to pass to syn_registration
in dipy.align, which does the SyN alignment.
Default: {}
Examples
--------
api.AFQ(mapping=SynMap())
"""
def __init__(self, use_prealign=True, affine_kwargs={}, syn_kwargs={}):
self.use_prealign = use_prealign
self.affine_kwargs = affine_kwargs
self.syn_kwargs = syn_kwargs
self.extension = ".nii.gz"
def find_path(self, bids_layout, from_path, subject, session):
pass
def gen_mapping(self, subses_dict, reg_subject, reg_template,
subject_sls, template_sls,
reg_prealign):
_, mapping = syn_registration(
reg_subject.get_fdata(),
reg_template.get_fdata(),
moving_affine=reg_subject.affine,
static_affine=reg_template.affine,
prealign=reg_prealign,
**self.syn_kwargs)
if self.use_prealign:
mapping.codomain_world2grid = np.linalg.inv(reg_prealign)
return mapping
class SlrMap(GeneratedMapMixin, Definition):
"""
Calculate a SLR registration for each subject/session
using reg_subject and reg_template.
slr_kwargs : dictionary, optional
Parameters to pass to whole_brain_slr
in dipy, which does the SLR alignment.
Default: {}
Examples
--------
api.AFQ(mapping=SlrMap())
"""
def __init__(self, slr_kwargs={}):
self.slr_kwargs = {}
self.use_prealign = False
self.extension = ".npy"
def find_path(self, bids_layout, from_path, subject, session):
pass
def gen_mapping(self, subses_dict, reg_template, reg_subject,
subject_sls, template_sls, reg_prealign):
return reg.slr_registration(
subject_sls, template_sls,
moving_affine=reg_subject.affine,
moving_shape=reg_subject.shape,
static_affine=reg_template.affine,
static_shape=reg_template.shape,
**self.slr_kwargs)
class AffMap(GeneratedMapMixin, Definition):
"""
Calculate an affine registration for each subject/session
using reg_subject and reg_template.
affine_kwargs : dictionary, optional
Parameters to pass to affine_registration
in dipy.align, which does the linear pre-alignment.
Default: {}
Examples
--------
api.AFQ(mapping=AffMap())
"""
def __init__(self, affine_kwargs={}):
self.use_prealign = False
self.affine_kwargs = affine_kwargs
self.extension = ".npy"
def find_path(self, bids_layout, from_path, subject, session):
pass
def gen_mapping(self, subses_dict, reg_subject, reg_template,
subject_sls, template_sls,
reg_prealign):
return ConformedAffineMapping(np.linalg.inv(self.prealign(
subses_dict, reg_subject, reg_template, save=False)))
class ConformedAffineMapping(AffineMap):
"""
Modifies AffineMap API to match DiffeomorphicMap API.
Important for SLR maps API to be indistinguishable from SYN maps API.
"""
def transform(self, *args, interpolation='linear', **kwargs):
kwargs['interp'] = interpolation
return super().transform_inverse(*args, **kwargs)
def transform_inverse(self, *args, interpolation='linear', **kwargs):
kwargs['interp'] = interpolation
return super().transform(*args, **kwargs)
```
#### File: AFQ/tasks/tractography.py
```python
import nibabel as nib
from time import time
import pimms
from AFQ.tasks.decorators import as_file, as_img
from AFQ.tasks.utils import with_name
from AFQ.definitions.utils import Definition
import AFQ.tractography as aft
outputs = {
"seed_file": """full path to a nifti file containing the
tractography seed mask""",
"stop_file": """full path to a nifti file containing the
tractography stop mask""",
"streamlines_file": """full path to the complete,
unsegmented tractography file"""}
@pimms.calc("seed_file")
@as_file('_seed_mask.nii.gz')
@as_img
def export_seed_mask(subses_dict, dwi_affine, tracking_params):
seed_mask = tracking_params['seed_mask']
seed_mask_desc = dict(source=tracking_params['seed_mask'])
return seed_mask, seed_mask_desc
@pimms.calc("stop_file")
@as_file('_stop_mask.nii.gz')
@as_img
def export_stop_mask(subses_dict, dwi_affine, tracking_params):
stop_mask = tracking_params['stop_mask']
stop_mask_desc = dict(source=tracking_params['stop_mask'])
return stop_mask, stop_mask_desc
@pimms.calc("stop_file")
def export_stop_mask_pft(pve_wm, pve_gm, pve_csf):
return {"stop_file": [pve_wm, pve_gm, pve_csf]}
@pimms.calc("streamlines_file")
@as_file('_tractography.trk', include_track=True)
def streamlines(subses_dict, data_imap, seed_file, stop_file,
tracking_params):
this_tracking_params = tracking_params.copy()
# get odf_model
odf_model = this_tracking_params["odf_model"]
if odf_model == "DTI":
params_file = data_imap["dti_params_file"]
elif odf_model == "CSD" or odf_model == "MSMT":
params_file = data_imap["csd_params_file"]
elif odf_model == "DKI":
params_file = data_imap["dki_params_file"]
else:
raise TypeError((
f"The ODF model you gave ({odf_model}) was not recognized"))
# get masks
this_tracking_params['seed_mask'] = nib.load(seed_file).get_fdata()
if isinstance(stop_file, str):
this_tracking_params['stop_mask'] = nib.load(stop_file).get_fdata()
else:
this_tracking_params['stop_mask'] = stop_file
# perform tractography
start_time = time()
sft = aft.track(params_file, **this_tracking_params)
sft.to_vox()
meta_directions = {
"det": "deterministic",
"prob": "probabilistic"}
meta = dict(
TractographyClass="local",
TractographyMethod=meta_directions[
tracking_params["directions"]],
Count=len(sft.streamlines),
Seeding=dict(
ROI=seed_file,
n_seeds=tracking_params["n_seeds"],
random_seeds=tracking_params["random_seeds"]),
Constraints=dict(ROI=stop_file),
Parameters=dict(
Units="mm",
StepSize=tracking_params["step_size"],
MinimumLength=tracking_params["min_length"],
MaximumLength=tracking_params["max_length"],
Unidirectional=False),
Timing=time() - start_time)
return sft, meta
@pimms.calc("streamlines_file")
def custom_tractography(custom_tract_file):
return custom_tract_file
def get_tractography_plan(custom_tract_file, tracking_params):
tractography_tasks = with_name([
export_seed_mask, export_stop_mask, streamlines])
if custom_tract_file is not None:
tractography_tasks["streamlines_res"] = custom_tractography
stop_mask = tracking_params['stop_mask']
if tracking_params["tracker"] == "pft":
probseg_funcs = stop_mask.get_mask_getter()
tractography_tasks["wm_res"] = pimms.calc("pve_wm")(probseg_funcs[0])
tractography_tasks["gm_res"] = pimms.calc("pve_gm")(probseg_funcs[1])
tractography_tasks["csf_res"] = pimms.calc("pve_csf")(probseg_funcs[2])
tractography_tasks["export_stop_mask_res"] = \
export_stop_mask_pft
else:
if isinstance(stop_mask, Definition):
tractography_tasks["export_stop_mask_res"] =\
pimms.calc("stop_file")(as_file('_stop_mask.nii.gz')(
stop_mask.get_mask_getter()))
if isinstance(tracking_params['seed_mask'], Definition):
tractography_tasks["export_seed_mask_res"] = pimms.calc("seed_file")(
as_file('_seed_mask.nii.gz')(
tracking_params['seed_mask'].get_mask_getter()))
return pimms.plan(**tractography_tasks)
``` |
{
"source": "36000/qsiprep",
"score": 2
} |
#### File: workflows/recon/dsi_studio.py
```python
import nipype.pipeline.engine as pe
from nipype.interfaces import afni, utility as niu
from qsiprep.interfaces.dsi_studio import (DSIStudioCreateSrc, DSIStudioGQIReconstruction,
DSIStudioAtlasGraph, DSIStudioExport,
DSIStudioTracking,
FixDSIStudioExportHeader)
import logging
from qsiprep.interfaces.bids import ReconDerivativesDataSink
from .interchange import recon_workflow_input_fields
from ...engine import Workflow
from ...interfaces.reports import CLIReconPeaksReport, ConnectivityReport
LOGGER = logging.getLogger('nipype.interface')
def init_dsi_studio_recon_wf(omp_nthreads, available_anatomical_data, name="dsi_studio_recon",
output_suffix="", params={}):
"""Reconstructs diffusion data using DSI Studio.
This workflow creates a ``.src.gz`` file from the input dwi, bvals and bvecs,
then reconstructs ODFs using GQI.
Inputs
*Default qsiprep inputs*
Outputs
fibgz
A DSI Studio fib file containing GQI ODFs, peaks and scalar values.
Params
ratio_of_mean_diffusion_distance: float
Default 1.25. Distance to sample EAP at.
"""
inputnode = pe.Node(niu.IdentityInterface(fields=recon_workflow_input_fields + ['odf_rois']),
name="inputnode")
outputnode = pe.Node(
niu.IdentityInterface(
fields=['fibgz']),
name="outputnode")
workflow = Workflow(name=name)
plot_reports = params.pop("plot_reports", True)
desc = """DSI Studio Reconstruction
: """
create_src = pe.Node(DSIStudioCreateSrc(), name="create_src")
romdd = params.get("ratio_of_mean_diffusion_distance", 1.25)
gqi_recon = pe.Node(
DSIStudioGQIReconstruction(ratio_of_mean_diffusion_distance=romdd),
name="gqi_recon")
desc += """\
Diffusion orientation distribution functions (ODFs) were reconstructed using
generalized q-sampling imaging (GQI, @yeh2010gqi) with a ratio of mean diffusion
distance of %02f.""" % romdd
# Make a visual report of the model
plot_peaks = pe.Node(CLIReconPeaksReport(subtract_iso=True), name='plot_peaks')
ds_report_peaks = pe.Node(
ReconDerivativesDataSink(extension='.png',
desc="GQIODF",
suffix='peaks'),
name='ds_report_peaks',
run_without_submitting=True)
# Plot targeted regions
if available_anatomical_data['has_qsiprep_t1w_transforms'] and plot_reports:
ds_report_odfs = pe.Node(
ReconDerivativesDataSink(extension='.png',
desc="GQIODF",
suffix='odfs'),
name='ds_report_odfs',
run_without_submitting=True)
workflow.connect(plot_peaks, 'odf_report', ds_report_odfs, 'in_file')
workflow.connect([
(inputnode, create_src, [('dwi_file', 'input_nifti_file'),
('bval_file', 'input_bvals_file'),
('bvec_file', 'input_bvecs_file')]),
(create_src, gqi_recon, [('output_src', 'input_src_file')]),
(inputnode, gqi_recon, [('dwi_mask', 'mask')]),
(gqi_recon, outputnode, [('output_fib', 'fibgz')])])
if plot_reports:
workflow.connect([
(gqi_recon, plot_peaks, [('output_fib', 'fib_file')]),
(inputnode, plot_peaks, [('dwi_ref', 'background_image'),
('odf_rois', 'odf_rois'),
('dwi_mask', 'mask_file')]),
(plot_peaks, ds_report_peaks, [('peak_report', 'in_file')])])
if output_suffix:
# Save the output in the outputs directory
ds_gqi_fibgz = pe.Node(
ReconDerivativesDataSink(
extension='.fib.gz',
suffix=output_suffix,
compress=True),
name='ds_gqi_fibgz',
run_without_submitting=True)
workflow.connect(gqi_recon, 'output_fib', ds_gqi_fibgz, 'in_file')
workflow.__desc__ = desc
return workflow
def init_dsi_studio_tractography_wf(omp_nthreads, available_anatomical_data, name="dsi_studio_tractography",
params={}, output_suffix=""):
"""Calculate streamline-based connectivity matrices using DSI Studio.
DSI Studio has a deterministic tractography algorithm that can be used to
estimate pairwise regional connectivity. It calculates multiple connectivity
measures.
Inputs
fibgz
A DSI Studio fib file produced by DSI Studio reconstruction.
trk_file
a DSI Studio trk.gz file
Outputs
trk_file
A DSI-Studio format trk file
fibgz
The input fib file, as it is needed by downstream nodes in addition to
the trk file.
Params
fiber_count
number of streamlines to generate. Cannot also specify seed_count
seed_count
Number of seeds to track from. Does not guarantee a fixed number of
streamlines and cannot be used with the fiber_count option.
method
0: streamline (Euler) 4: Runge Kutta
seed_plan
0: = traits.Enum((0, 1), argstr="--seed_plan=%d")
initial_dir
Seeds begin oriented as 0: the primary orientation of the ODF 1: a random orientation
or 2: all orientations
connectivity_type
"pass" to count streamlines passing through a region. "end" to force
streamlines to terminate in regions they count as connecting.
connectivity_value
"count", "ncount", "fa" used to quantify connection strength.
random_seed
Setting to True generates truly random (not-reproducible) seeding.
fa_threshold
If not specified, will use the DSI Studio Otsu threshold. Otherwise
specigies the minimum qa value per fixed to be used for tracking.
step_size
Streamline propagation step size in millimeters.
turning_angle
Maximum turning angle in degrees for steamline propagation.
smoothing
DSI Studio smoothing factor
min_length
Minimum streamline length in millimeters.
max_length
Maximum streamline length in millimeters.
"""
inputnode = pe.Node(
niu.IdentityInterface(
fields=recon_workflow_input_fields + ['fibgz']),
name="inputnode")
outputnode = pe.Node(niu.IdentityInterface(fields=['trk_file', 'fibgz']),
name="outputnode")
plot_reports = params.pop("plot_reports", True)
workflow = Workflow(name=name)
tracking = pe.Node(DSIStudioTracking(nthreads=omp_nthreads, **params),
name='tracking')
workflow.connect([
(inputnode, tracking, [('fibgz', 'input_fib')]),
(tracking, outputnode, [('output_trk', 'trk_file')]),
(inputnode, outputnode, [('fibgz', 'fibgz')])
])
if output_suffix:
# Save the output in the outputs directory
ds_tracking = pe.Node(ReconDerivativesDataSink(suffix=output_suffix),
name='ds_' + name,
run_without_submitting=True)
workflow.connect(tracking, 'output_trk', ds_tracking, 'in_file')
return workflow
def init_dsi_studio_connectivity_wf(omp_nthreads, available_anatomical_data, name="dsi_studio_connectivity",
params={}, output_suffix=""):
"""Calculate streamline-based connectivity matrices using DSI Studio.
DSI Studio has a deterministic tractography algorithm that can be used to
estimate pairwise regional connectivity. It calculates multiple connectivity
measures.
Inputs
fibgz
A DSI Studio fib file produced by DSI Studio reconstruction.
trk_file
a DSI Studio trk.gz file
Outputs
matfile
A MATLAB-format file with numerous connectivity matrices for each
atlas.
Params
fiber_count
number of streamlines to generate. Cannot also specify seed_count
seed_count
Number of seeds to track from. Does not guarantee a fixed number of
streamlines and cannot be used with the fiber_count option.
method
0: streamline (Euler) 4: Runge Kutta
seed_plan
0: = traits.Enum((0, 1), argstr="--seed_plan=%d")
initial_dir
Seeds begin oriented as 0: the primary orientation of the ODF 1: a random orientation
or 2: all orientations
connectivity_type
"pass" to count streamlines passing through a region. "end" to force
streamlines to terminate in regions they count as connecting.
connectivity_value
"count", "ncount", "fa" used to quantify connection strength.
random_seed
Setting to True generates truly random (not-reproducible) seeding.
fa_threshold
If not specified, will use the DSI Studio Otsu threshold. Otherwise
specigies the minimum qa value per fixed to be used for tracking.
step_size
Streamline propagation step size in millimeters.
turning_angle
Maximum turning angle in degrees for steamline propagation.
smoothing
DSI Studio smoothing factor
min_length
Minimum streamline length in millimeters.
max_length
Maximum streamline length in millimeters.
"""
inputnode = pe.Node(
niu.IdentityInterface(
fields=recon_workflow_input_fields + ['fibgz', 'trk_file', 'atlas_configs']),
name="inputnode")
outputnode = pe.Node(niu.IdentityInterface(fields=['matfile']),
name="outputnode")
plot_reports = params.pop("plot_reports", True)
workflow = pe.Workflow(name=name)
calc_connectivity = pe.Node(DSIStudioAtlasGraph(nthreads=omp_nthreads, **params),
name='calc_connectivity')
plot_connectivity = pe.Node(ConnectivityReport(), name='plot_connectivity')
ds_report_connectivity = pe.Node(
ReconDerivativesDataSink(extension='.svg',
desc="DSIStudioConnectivity",
suffix='matrices'),
name='ds_report_connectivity',
run_without_submitting=True)
workflow.connect([
(inputnode, calc_connectivity, [('atlas_configs', 'atlas_configs'),
('fibgz', 'input_fib'),
('trk_file', 'trk_file')]),
(calc_connectivity, plot_connectivity, [
('connectivity_matfile', 'connectivity_matfile')]),
(plot_connectivity, ds_report_connectivity, [('out_report', 'in_file')]),
(calc_connectivity, outputnode, [('connectivity_matfile', 'matfile')])
])
if output_suffix:
# Save the output in the outputs directory
ds_connectivity = pe.Node(ReconDerivativesDataSink(suffix=output_suffix),
name='ds_' + name,
run_without_submitting=True)
workflow.connect(calc_connectivity, 'connectivity_matfile', ds_connectivity, 'in_file')
return workflow
def init_dsi_studio_export_wf(omp_nthreads, available_anatomical_data, name="dsi_studio_export",
params={}, output_suffix=""):
"""Export scalar maps from a DSI Studio fib file into NIfTI files with correct headers.
This workflow exports gfa, fa0, fa1, fa2 and iso.
Inputs
fibgz
A DSI Studio fib file
Outputs
gfa
NIfTI file containing generalized fractional anisotropy (GFA).
fa0
Quantitative Anisotropy for the largest fixel in each voxel.
fa1
Quantitative Anisotropy for the second-largest fixel in each voxel.
fa2
Quantitative Anisotropy for the third-largest fixel in each voxel.
iso
Isotropic component of the ODF in each voxel.
"""
inputnode = pe.Node(
niu.IdentityInterface(
fields=recon_workflow_input_fields + ['fibgz']),
name="inputnode")
plot_reports = params.pop("plot_reports", True)
scalar_names = ['gfa', 'fa0', 'fa1', 'fa2', 'iso', 'dti_fa', 'md', 'rd', 'ad']
outputnode = pe.Node(
niu.IdentityInterface(fields=[name + "_file" for name in scalar_names]),
name="outputnode")
workflow = pe.Workflow(name=name)
export = pe.Node(DSIStudioExport(to_export=",".join(scalar_names)), name='export')
fixhdr_nodes = {}
for scalar_name in scalar_names:
output_name = scalar_name + '_file'
fixhdr_nodes[scalar_name] = pe.Node(FixDSIStudioExportHeader(), name='fix_'+scalar_name)
connections = [(export, fixhdr_nodes[scalar_name], [(output_name, 'dsi_studio_nifti')]),
(inputnode, fixhdr_nodes[scalar_name], [('dwi_file',
'correct_header_nifti')]),
(fixhdr_nodes[scalar_name], outputnode, [('out_file', scalar_name)])]
if output_suffix:
connections += [(fixhdr_nodes[scalar_name],
pe.Node(
ReconDerivativesDataSink(desc=scalar_name,
suffix=output_suffix),
name='ds_%s_%s' % (name, scalar_name)),
[('out_file', 'in_file')])]
workflow.connect(connections)
workflow.connect([(inputnode, export, [('fibgz', 'input_file')])])
return workflow
``` |
{
"source": "360abhimanyu/Azure_Project_2",
"score": 2
} |
#### File: 360abhimanyu/Azure_Project_2/test_index_page.py
```python
from flask import Flask, request, jsonify, json, current_app
from app import app, predict
import pytest
@pytest.fixture
def client():
with app.test_client() as client:
with app.app_context():
assert current_app.config["ENV"] == "production"
yield client
def test_index_page(client):
response = client.get('/')
assert response.status_code == 200
assert b'Sklearn Prediction Home' in response.data
``` |
{
"source": "360cid/curlylint",
"score": 3
} |
#### File: curlylint/curlylint/cli_test.py
```python
import unittest
from curlylint.tests.utils import BlackRunner
from curlylint.cli import main
class TestParser(unittest.TestCase):
def test_flag_help(self):
runner = BlackRunner()
result = runner.invoke(main, ["--help"])
self.assertIn(
"Prototype linter for Jinja and Django templates",
runner.stdout_bytes.decode(),
)
self.assertEqual(runner.stderr_bytes.decode(), "")
self.assertEqual(result.exit_code, 0)
``` |
{
"source": "360jinrong/GBST",
"score": 3
} |
#### File: gbst_package/gbst/metrics.py
```python
import numpy as np
from sklearn.metrics import roc_auc_score
def evalauc(preds, deval):
"""
Average AUC metric as default, but written in python.
This serves as an example of implementing new metrics.
Parameters
----------
preds: the predicted f() of deval, calculated by GBST model.
deval: the eval dataset.
Returns
-------
metric_name : str
metric_value: float
"""
labels = deval.get_label().astype(int)
y_arr = np.zeros([preds.shape[0], preds.shape[1]])
for i, label in enumerate(labels):
y_arr[i, :label] = 1
y_arr[i, label:] = 0
hazards = 1./(1.+np.exp(-preds))
mults = np.ones(hazards.shape[0])
auc_total = []
for timestep in range(0, hazards.shape[1]):
mults = mults * (1 - hazards[:, timestep])
try:
auc = roc_auc_score(y_true=y_arr[:, timestep], y_score=mults)
auc_total.append(auc)
except Exception as e:
# If all candidates are alive/default, then roc_auc_score will throw an exception.
# Such cases are excluded from aggregation.
pass
return 'AUC', float(np.sum(auc_total)) / len(auc_total)
``` |
{
"source": "360ls/360ls-stitcher",
"score": 2
} |
#### File: app/profile/profile.py
```python
from __future__ import absolute_import, division, print_function
import argparse
import cProfile
from app.util.feed import CameraFeed
from app.util.feed import VideoFeed
from app.stitcher.core.feedhandler import MultiFeedHandler
from app.util.configure import get_configuration
def main():
"""
Responsible for profiling stitch handler.
"""
args = parse_args()
pr = cProfile.Profile()
pr.enable()
if args.profile_single_stitch:
stitch_single()
else:
stitch_double()
pr.disable()
pr.print_stats(sort='time')
def stitch_single():
"""
Responsible for handling stitch of one camera.
"""
config = get_configuration("config/profiles/singlecamerastitch.yml")
camera_feed = CameraFeed(config['camera-index'], config['width'], config['height'])
handler = MultiFeedHandler([camera_feed])
handler.stitch_feeds(
config['should-stream'], config['output-path'],
config['width'], config['height'], config['rtmp_url'])
def stitch_double():
"""
Responsible for handling stitch of two videos.
"""
config = get_configuration("config/profiles/twovideostitch.yml")
left_feed = VideoFeed(config['left-video-path'], config['width'], config['height'])
right_feed = VideoFeed(config['right-video-path'], config['width'], config['height'])
handler = MultiFeedHandler([left_feed, right_feed])
handler.stitch_feeds()
def parse_args():
"""
Returns parsed arguments
"""
parser = argparse.ArgumentParser(description="Stitcher profiler")
parser.add_argument('--single', dest='profile_single_stitch',
action='store_true', default=False)
parser.add_argument('--double', dest='profile_double_stitch',
action='store_true', default=False)
return parser.parse_args()
if __name__ == "__main__":
main()
```
#### File: stitcher/flex/flexor.py
```python
from __future__ import absolute_import, division, print_function
import cv2
import numpy as np
from app.util.feed import VideoFeed
from app.util.textformatter import TextFormatter
def main():
"""
Runs an example of checking the main color of a video feed.
"""
video = VideoFeed("app/storage/flex/naiveflex.mp4")
if video.is_valid():
while video.has_next():
frame = video.get_next(True, False)
average_frame_color = get_average_color(frame)
zero_array = np.array([0, 0, 0])
if np.array_equal(average_frame_color, zero_array):
TextFormatter.print_error("Invalid frame.")
else:
# TextFormatter.print_info(get_average_color(frame))
TextFormatter.print_info("Valid frame.")
title = "Flex Video Feed"
cv2.imshow(title, frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
TextFormatter.print_info("Cleaning up video feed.")
video.close()
cv2.destroyAllWindows()
cv2.waitKey(1)
def get_average_color(frame):
""" Returns the average color of the provided frame. """
# Computes average color for each row in frame.
average_row_colors = np.average(frame, axis=0)
# Computes the average of the average row colors
average_frame_color_raw = np.average(average_row_colors, axis=0)
# Converts average frame color to uint8 form for true color match
average_frame_color = np.uint8(average_frame_color_raw)
return average_frame_color
if __name__ == "__main__":
main()
```
#### File: test/nocv/test_inputscanner.py
```python
from __future__ import absolute_import, division, print_function
from app.util.inputscanner import InputScanner
def test_read_int():
""" Checks to make sure output of read_int is the same as input. """
assert 1 == 1
```
#### File: app/util/calibrate.py
```python
from __future__ import print_function
import os
import sys
import getopt
from glob import glob
import numpy as np
import cv2
import imutils
# pylint: disable=R0914
def main():
"""
Responsible for run of calibration from the command line.
"""
calibrate()
def calibrate():
"""
Calibration routine
"""
args, img_mask = getopt.getopt(sys.argv[1:], '', ['debug=', 'square_size='])
args = dict(args)
args.setdefault('--debug', './output/')
args.setdefault('--square_size', 1.0)
if not img_mask:
img_mask = 'app/storage/calibration_inputs/*'
else:
img_mask = img_mask[0]
img_names = glob(img_mask)
debug_dir = args.get('--debug')
if not os.path.isdir(debug_dir):
os.mkdir(debug_dir)
square_size = float(args.get('--square_size'))
pattern_size = (9, 6)
pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_points = []
height, width = 0, 0
for filename in img_names:
print('processing %s... ' % filename, end='')
img = cv2.imread(filename, 0)
if img is None:
print("Failed to load", filename)
continue
height, width = img.shape[:2]
found, corners = cv2.findChessboardCorners(img, pattern_size)
if found:
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
if not found:
print('Chessboard not found.')
continue
img_points.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
print('ok')
# calculate camera distortion
rms, camera_matrix, dist_coefs, _, _ = cv2.calibrateCamera(obj_points,
img_points,
(width, height),
None, None)
print("\nRMS:", rms)
print("camera matrix:\n", camera_matrix)
print("distortion coefficients: ", dist_coefs.ravel())
dist_coefs = dist_coefs.ravel()
for i in range(3):
i = i+2
dist_coefs[i] = 0
print(dist_coefs)
verify_calibration(camera_matrix, dist_coefs)
cv2.destroyAllWindows()
def splitfilename(filename):
"""
splits file name into parent directory, name and extension
"""
path, filename = os.path.split(filename)
name, ext = os.path.splitext(filename)
return path, name, ext
def verify_calibration(camera_matrix, distortion_coefficients):
"""
Verifies calibration of a test image
based on an incoming camera_matrix and a
set of distortion_coefficients pre-determined during calibration.
"""
# Read in the image for correction
src = cv2.imread("app/storage/calibration_inputs/104_0009.JPG")
height, width = src.shape[:2]
# Correct the radial distortion
newcamera, _ = cv2.getOptimalNewCameraMatrix(camera_matrix,
distortion_coefficients,
(width, height), 0)
newimg = cv2.undistort(src, camera_matrix, distortion_coefficients, None, newcamera)
# Display a comparison between the original image and the corrected image
cv2.imshow("original", imutils.resize(src, width=720))
cv2.imshow("corrected", imutils.resize(newimg, width=720))
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
``` |
{
"source": "360medics/has-keyword-extractor",
"score": 3
} |
#### File: has-keyword-extractor/has_keyword_extractor/hwo.py
```python
from typing import Dict
def compute_hwo(word: str, monogram_frequency: Dict, cumul_monogram: Dict):
if word not in monogram_frequency:
return -100
key_max = max(list(cumul_monogram.keys()))
return cumul_monogram[monogram_frequency[word]] / cumul_monogram[key_max]
```
#### File: has-keyword-extractor/has_keyword_extractor/_preprocessing_text.py
```python
import re
def preprocessing_text(text: str, nlp, sep="[PUNCT]") -> list:
# tokenize text
doc = nlp(text)
# remove stopwords
words_list = []
text = (
re.sub(r"\d+", "", text)
.lower()
.replace("\\n", "")
.replace("\\t", "")
.replace("\\xa", "")
.replace("\n", "")
.replace("'", "")
.strip()
)
for token in doc:
if token.is_punct or token.is_stop:
words_list.append(sep)
elif not token.is_space:
words_list.append(token.text)
return words_list
```
#### File: has-keyword-extractor/has_keyword_extractor/_runner.py
```python
from concurrent import futures
from concurrent.futures.thread import ThreadPoolExecutor
from typing import Dict
from alive_progress import alive_bar
from has_keyword_extractor._preprocessing_text import preprocessing_text
from has_keyword_extractor._statistics_keywords_extraction import StatisticsKeywordsExtraction
def st_process_doc(text: str, nlp, alpha: float, threshold: float):
return StatisticsKeywordsExtraction(
preprocessing_text(text, nlp), alpha, threshold
).extract_keywords()
def st_process_multiple_doc(
documents: Dict, nlp, alpha: float, threshold: float, workers=20
):
print("🔎 Begin keyword extraction : ")
result = {}
with alive_bar(
len(documents), title="number of processed documents", length=100
) as progress_bar:
with ThreadPoolExecutor(workers) as executor:
future_to_url = {
executor.submit(st_process_doc, content, nlp, alpha, threshold): title
for title, content in documents.items()
}
for future in futures.as_completed(future_to_url):
result[future_to_url[future]] = future.result()
progress_bar()
print("keyword extraction is ended")
return result
``` |
{
"source": "360modder/current-music-trends",
"score": 3
} |
#### File: 360modder/current-music-trends/update.py
```python
import argparse
import traceback
import utils
import requests
from typing import List
from tqdm import tqdm
from youtube_v3_api import YoutubeService, Playlists
pl_desc = f"""Last Updated {utils.timestamp()}
A playlist which lists outs the trending music videos from youtube, across its all regions. This playlist updates occasionally in a week.
Website: https://clitic.github.io/current-music-trends/home
Source Code: https://github.com/clitic/current-music-trends
"""
def video_ids_from_json(json_file: str, fetch: bool) -> List[str]:
"""get video ids to be added from json file"""
link = "https://raw.githubusercontent.com/clitic/current-music-trends/gh-pages/data.json"
data = requests.get(link).json() if fetch else utils.load_json(json_file)
return [video_id for _, _, video_id in data]
def main(args: argparse.Namespace):
add_video_ids = video_ids_from_json(args.input, args.fetch)
youtube = YoutubeService().create_oauth_service(
"safe/client_secrets.json", ["https://www.googleapis.com/auth/youtube"],
token_file="safe/token_youtube_v3.pkl", relogin=args.relogin
)
pl = Playlists(args.playlist, youtube, progress_bars=True)
pl.update(pl.title, pl_desc)
if not args.no_clear:
pl.clear(skip_ids=list(set(pl.video_ids).intersection(add_video_ids)))
videos_not_added = 0
add_video = True
for video_id in tqdm(add_video_ids, desc="adding videos"):
try:
if add_video:
pl.add_video(video_id)
else:
videos_not_added += 1
except:
traceback.print_exc()
videos_not_added += 1
add_video = False
if videos_not_added > 0:
time_left, clock_time = utils.time_left_for_pacific_midnight()
print(f"total {videos_not_added} videos not added")
print(f"re-run this script after {time_left} @ {clock_time}, when qouta is renewed")
else:
print("all videos added to playlist")
print(f"script run-up costs {pl.cost} api units")
print(f"visit here: {pl.link}")
if __name__ == "__main__":
parser = argparse.ArgumentParser("update", description="updates youtube playlist")
parser.add_argument("-p", "--playlist", dest="playlist", default="PLv5KLCzERve6UI32k8kvAcUY077i3VWE6",
help="youtube playlist id (default: PLv5KLCzERve6UI32k8kvAcUY077i3VWE6)")
parser.add_argument("-i", "--input", dest="input", default="docs/data.json",
help="path of data.json file (default: docs/data.json)")
parser.add_argument("-f", "--fetch", dest="fetch", default=False, action="store_true",
help="fetch data.json from github instead of local file (default: false)")
parser.add_argument("--relogin", dest="relogin", default=False, action="store_true",
help="relogin to cloud project (default: false)")
parser.add_argument("--no-clear", dest="no_clear", default=False, action="store_true",
help="skip clearing youtube playlist (default: false)")
args = parser.parse_args()
print("sometimes some videos are not removed or added to playlist, try re-running the script")
main(args)
```
#### File: current-music-trends/youtube_v3_api/playlists.py
```python
import pickle
from typing import Any, Dict, List, Optional, Tuple, Union
from tqdm import tqdm
from .create import Create
class _Messages:
"""custom class for messages
"""
add_msg = "adding videos"
rm_msg = "removing videos"
class Playlists:
"""Playlists class interacts with youtube playlists
"""
def __init__(self, playlist_id: str, youtube: Any, snippet: Optional[bool] = False, progress_bars: Optional[bool] = False) -> None:
"""Playlists class interacts with youtube playlists
cost = 1 for playlist information + 1 per page for 50 max results
min cost = 2, max cost = 101
Args:
playlist_id (str): playlist id
youtube (Any): a resource object with methods for interacting with the service.
snippet (Optional[bool], optional): request playlist items with snippet part for more info. Defaults to False.
progress_bars (Optional[bool], optional): display task status with progress bars. Defaults to False.
Note:
most of the methods require OAuth client access
Examples:
>>> pl = Playlists("PLQeIlACGt47P3nQEVGWmaU3669iw6q7mQ", youtube)
>>> pl.responses
>>> pl.video_ids
"""
self._youtube, self._snippet, self._progress_bars = youtube, snippet, progress_bars
self.playlist_id = playlist_id
self.link = f"https://www.youtube.com/playlist?list={playlist_id}"
self.cost = 0
self.title, self.desc, self.status = self._playlist_info()
self.responses = self._pl_responses(snippet=self._snippet)
self._playlist_items = self._playlist_item_ids()
self.video_ids = list(self._playlist_items.keys())
@classmethod
def load(cls, filepath: str, youtube: Any, playlist_id: Optional[Union[str, None]] = None,
remove: Optional[bool] = False, **kwargs):
"""Construct Playlist class from saved pickled file
This constructor creates a new playlist if playlist_id is not provided.
If you use playlist_id and want a complete sync with pickled file, then set remove=True.
Args:
filepath (str): pickled file path
youtube (Any): a resource object with methods for interacting with the service.
playlist_id (Optional[Union[str, None]], optional): playlist id. Defaults to None.
remove (Optional[bool], optional): remove uncessary videos from playlist. Defaults to False.
Returns:
Playlist: instance of Playlists class
"""
progress_bars = bool("progress_bars" in kwargs.keys() and kwargs["progress_bars"])
# loading pickled instance of ResurrectPlaylist class
loaded_pl = ResurrectPlaylist.load(filepath)
if playlist_id is None:
# create a new playlist
create_item = Create(youtube)
new_pl_id = create_item.playlist(loaded_pl.title, loaded_pl.desc, loaded_pl.status)
# load newly created playlist
new_pl = cls(new_pl_id, youtube, **kwargs)
new_pl.cost += create_item.cost
else:
new_pl = cls(playlist_id, youtube, **kwargs) # load the given playlist
new_pl.update(loaded_pl.title, loaded_pl.desc, loaded_pl.status)
# adding videos
video_ids = loaded_pl.video_ids
if progress_bars:
video_ids = tqdm(video_ids, desc=_Messages.add_msg)
for video_id in video_ids:
new_pl.add_video(video_id)
# removing videos
if playlist_id is not None and remove:
video_ids = new_pl.video_ids
if progress_bars:
video_ids = tqdm(video_ids, desc=_Messages.add_msg)
for video_id in video_ids:
if video_id not in loaded_pl.video_ids:
new_pl.remove_video(video_id)
new_pl.refresh()
return new_pl
def __len__(self) -> int:
return self.responses[0]["pageInfo"]["totalResults"]
def _playlist_info(self) -> Tuple[str, str, str]:
request = self._youtube.playlists().list(part="id,snippet,status", id=self.playlist_id)
response = request.execute()
self.cost += 1
title = response["items"][0]["snippet"]["title"]
desc = response["items"][0]["snippet"]["description"]
status = response["items"][0]["status"]["privacyStatus"]
return title, desc, status
def _pl_responses(self, playlist_id: Optional[Union[str, None]] = None, snippet: Optional[bool] = False):
if playlist_id is None:
playlist_id = self.playlist_id
part = "id,snippet,contentDetails" if snippet else "id,contentDetails"
responses = []
playlist_api_queries = {"part": part, "playlistId": playlist_id, "maxResults": 50}
request = self._youtube.playlistItems().list(**playlist_api_queries)
response = request.execute()
self.cost += 1
responses.append(response)
next_page_token = response.get("nextPageToken")
while next_page_token:
request = self._youtube.playlistItems().list(**playlist_api_queries, pageToken=next_page_token)
response = request.execute()
self.cost += 1
responses.append(response)
next_page_token = response.get("nextPageToken")
return responses
def _playlist_item_ids(self) -> Dict[str, List[str]]:
video_ids_dict = {}
for response in self.responses:
for item in response["items"]:
video_id = item["contentDetails"]["videoId"]
if video_id in video_ids_dict:
video_ids_dict[video_id] = video_ids_dict[video_id].append(item["id"])
else:
video_ids_dict[video_id] = [item["id"]]
return video_ids_dict
def refresh(self) -> None:
"""resfresh playlist responses
cost = 1 per page for 50 max results
"""
self.responses = self._pl_responses(snippet=self._snippet)
self._playlist_items = self._playlist_item_ids()
self.video_ids = list(self._playlist_items.keys())
def update(self, title: str, desc: Optional[Union[str, None]] = None, status: Optional[Union[str, None]] = None) -> dict:
"""update playlist title, description and privacy status
cost = 50
Args:
title (str): title for playlist
desc (Optional[str], optional): description for playlist. Defaults to "".
status (Optional[str], optional): privacy status for playlist. Defaults to "private".
Returns:
dict: response
"""
request_body = {
"id": self.playlist_id,
"kind": "youtube#playlist",
"snippet": {
"title": title,
}
}
if desc is not None:
request_body["snippet"]["description"] = desc
if status is not None:
request_body["status"] = {
"privacyStatus": status
}
request = self._youtube.playlists().update(part="id,snippet,status", body=request_body)
response = request.execute()
self.cost += 50
title = response["snippet"]["title"]
desc = response["snippet"]["description"]
status = response["status"]["privacyStatus"]
self.title, self.desc, self.status = title, desc, status
return response
def delete(self) -> None:
"""delete the intialized playlist from youtube forever
cost = 50
"""
request = self._youtube.playlists().delete(id=self.playlist_id)
request.execute()
self.cost += 50
def add_video(self, video_id: str) -> Union[dict, None]:
"""add videos to intialized playlist by using video id only if not present
cost = 50
Args:
video_id (str): video id
Returns:
Union[dict, None]: returns response if video id is added to playlist else None
"""
if video_id in self.video_ids:
return None
request_body = {
"snippet": {
"playlistId": self.playlist_id,
"resourceId": {
"kind": "youtube#video",
"videoId": video_id
}
}
}
request = self._youtube.playlistItems().insert(part="snippet", body=request_body)
response = request.execute()
self.cost += 50
self.video_ids.append(video_id)
return response
def copy_from(self, playlist_id: str) -> None:
"""copy videos from a given playlist to intialized playlist
Args:
playlist_id (str): playlist id
"""
copy_videos_ids = []
for item in self._pl_responses(playlist_id)["items"]:
video_id = item["contentDetails"]["videoId"]
if video_id not in copy_videos_ids and video_id not in self.video_ids:
copy_videos_ids.append(video_id)
if self._progress_bars:
copy_videos_ids = tqdm(copy_videos_ids, desc=_Messages.add_msg)
for video_id in copy_videos_ids:
self.add_video(video_id)
def remove_video(self, video_id: str, recursive: Optional[bool] = True) -> Union[dict, None]:
"""remove video from intialized playlist by using video id only if it's present
cost = 50 per removal of video
Args:
video_id (str): video id to remove
recursive (Optional[bool], optional): remove all videos with same video id. Defaults to True.
Returns:
Union[dict, None]: returns last response if removed else None
"""
if video_id not in self.video_ids:
return None
for playlist_item_id in self._playlist_items[video_id]:
request = self._youtube.playlistItems().delete(id=playlist_item_id)
response = request.execute()
self.cost += 50
self.video_ids.remove(video_id)
if not recursive:
break
return response
def clear(self, skip_ids: Optional[List[str]] = []) -> None:
"""clear/remove all videos from intialized playlist
Args:
skip_ids (Optional[List[str]], optional): list video ids to skip. Defaults to [].
"""
remove_video_ids = [video_id for video_id in self.video_ids if video_id not in skip_ids]
if self._progress_bars:
remove_video_ids = tqdm(remove_video_ids, desc=_Messages.rm_msg)
for video_id in remove_video_ids:
self.remove_video(video_id)
def remove_duplicate(self) -> None:
"""remove duplicate videos from intialized playlist
"""
remove_video_ids = [
video_id
for video_id, playlist_item_id in self._playlist_items.items()
if len(playlist_item_id) > 1
]
if self._progress_bars:
remove_video_ids = tqdm(remove_video_ids, desc=_Messages.rm_msg)
for video_id in remove_video_ids:
self.remove_video(video_id)
def save(self, filepath: str):
"""save the intialized playlist to a pickle file
Args:
filepath (str): pickle file path
Examples:
>>> pl.save("my_music_playlist.pkl")
>>> from youtube_v3_api import ResurrectPlaylist
>>> pl_data = ResurrectPlaylist.load("my_music_playlist.pkl")
>>> pl_data.video_ids
['h329290', 'hj2832']
"""
pl = ResurrectPlaylist(self.title, self.desc, self.status, self.video_ids)
pl.save(filepath)
class ResurrectPlaylist:
"""ResurrectPlaylist class saves and loads its instance in and from a pickled file
"""
def __init__(self, title: str, desc: str, status: str, video_ids: List[str]) -> None:
"""ResurrectPlaylist class saves and loads its instance in a pickled file
"""
self.title, self.desc, self.status = title, desc, status
self.video_ids = video_ids
@classmethod
def load(cls, filepath: str):
"""Construct ResurrectPlaylist class from a pickled file
Args:
filepath (str): pickled file path
Returns:
ResurrectPlaylist: instance of ResurrectPlaylist
"""
with open(filepath, "rb") as f:
pl: ResurrectPlaylist = pickle.load(f)
return cls(pl.title, pl.desc, pl.status, pl.video_ids)
def save(self, filepath: str):
"""save instance of class in a pickle file
Args:
filepath (str): pickle file path
"""
with open(filepath, "wb") as f:
pickle.dump(self, f)
``` |
{
"source": "360modder/vsdownload",
"score": 2
} |
#### File: 360modder/vsdownload/setup.py
```python
import os
import sys
import re
import pathlib
from setuptools import setup
def get_version() -> str:
"""Get __version__ from vsdownload.py file."""
version_file = os.path.join(os.path.dirname(__file__), "vsdownload", "vsdownload.py")
version_file_data = open(version_file, "rt", encoding="utf-8").read()
version_regex = r"(?<=^__version__ = ['\"])[^'\"]+(?=['\"]$)"
try:
return re.findall(version_regex, version_file_data, re.M)[0]
except IndexError:
raise ValueError(f"Unable to find version string in {version_file}.")
assert sys.version_info >= (3, 6, 0), "vsdownload requires Python 3.6+"
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
with open("requirements.txt") as f:
REQUIREMENTS = [req.replace("\n", "") for req in f.readlines()]
setup(
name="vsdownload",
version=get_version(),
description="command line program to download hls video streams from websites, m3u8 files and urls",
long_description=README,
long_description_content_type="text/markdown",
keywords=["m3u8", "ts", "video", "stream", "downloader", "m3u8downloader"],
url="https://github.com/360modder/vsdownload.git",
author="360modder",
author_email="<EMAIL>",
license="MIT",
python_requires=">=3.6",
packages=["vsdownload", "vsdownload/commands"],
include_package_data=True,
install_requires=REQUIREMENTS,
entry_points={
"console_scripts": [
"vsdownload=vsdownload.vsdownload:app",
"vsdownload-gui=vsdownload.vsdownload_gui_wrapper:console_script",
]},
project_urls={
"Bug Tracker": "https://github.com/360modder/vsdownload/issues",
"Source": "https://github.com/360modder/vsdownload",
}
)
``` |
{
"source": "360netlab/DGA",
"score": 3
} |
#### File: code/enviserv/dga.py
```python
import argparse
import hashlib
import struct
def dga(seed, nr, tlds):
for i in range(nr):
seed_str = seed + str(i)
#print seed_str
s = hashlib.md5()
s.update(seed_str.encode('latin1'))
x = s.digest()
domain = ""
for j in range(5):
domain += "%02x" %(x[j])
domain += '.' + tlds[i % 6]
print(domain)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--nr", help="nr of domains", type=int, default=500)
parser.add_argument("-s", "--seed", help="random string", default="papa_koli")
parser.add_argument("-T", "--tlds", help="TLD", default="com-net-org-info-biz-in")
args = parser.parse_args()
tlds = args.tlds.split('-')
dga(args.seed, args.nr, tlds)
```
#### File: code/mydoom/dga.py
```python
import argparse
from datetime import datetime
def dga(date, seed, nr, tlds):
_sld = ['e', 'v', 'l', 'k', 'r', 'd', 'o', 'h', 'l', 'p']
magic = 'nj'
len_sld = len(_sld)
for i in range(len_sld):
for j in range(len(magic)):
_sld[i] = chr(ord(_sld[i]) ^ ((ord(magic[j]) + i * j) & 0xff))
_seed = seed + date.year + date.month + date.day
for i in range(nr):
if i == nr - 1:
_seed = seed
_seed = ((_seed * 0x19660d) + 0x3c6ef35f) & 0xffffffff
sld = ''
tld = ''
m = _seed
for j in range(len_sld):
idx = m % len_sld
sld += _sld[idx]
if j == 0:
if idx < 7:
tld = tlds[idx]
else:
tld = tlds[-1]
m = m / len_sld
print(sld + '.' + tld)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--time', help="Seconds since January 1, 1970 UTC")
parser.add_argument("-n", "--nr", help="nr of domains", type=int, default=51)
parser.add_argument("-s", "--seed", help="RAND_MAX", default="0xfa8")
parser.add_argument("-T", "--tlds", help="TLD", default="com-biz-us-net-org-ws-info-in")
args = parser.parse_args()
d = datetime.utcfromtimestamp(int(args.time))
tlds = args.tlds.split('-')
dga(d, int(args.seed, 16), args.nr, tlds)
```
#### File: code/xshellghost/dga.py
```python
import argparse
from datetime import datetime
from ctypes import c_uint
def dga(year, month, nr, tlds):
_year = c_uint(year)
_month = c_uint(month)
seed = c_uint(0)
print(_year.value)
print(_month.value)
seed.value = 0x90422a3a * _month.value
print("%x" %(seed.value))
seed.value -= 0x39d06f76 * _year.value
print("%x" %(seed.value))
seed.value -= 0x67b7fc6f
print("%x" %(seed.value))
sld_len = seed.value % 6 + 10
sld = ''
for i in range(sld_len):
sld += chr(seed.value % 0x1a + ord('a'))
seed.value = 29 * seed.value + 19
domain = sld + '.' + tlds[0]
print(domain)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--time', help="Seconds since January 1, 1970 UTC")
parser.add_argument("-n", "--nr", help="nr of domains to generate")
args = parser.parse_args()
tlds = ['com']
d = datetime.utcfromtimestamp(int(args.time))
dga(d.year, d.month, int(args.nr), tlds)
``` |
{
"source": "360youlun/cmsplugin-text-ng",
"score": 2
} |
#### File: cmsplugin-text-ng/cmsplugin_text_ng/apps.py
```python
from django.apps import AppConfig
from cmsplugin_text_ng.type_registry import register_type
class CmsPluginTextNgConfig(AppConfig):
name = 'cmsplugin_text_ng'
verbose_name = "Django Cms Plugin Text-NG"
def ready(self):
from cmsplugin_text_ng.models import TextNGVariableText
register_type('text', TextNGVariableText)
```
#### File: cmsplugin-text-ng/cmsplugin_text_ng/models.py
```python
from distutils.version import StrictVersion
import django
from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cmsplugin_text_ng.compat import AbstractText
from cmsplugin_text_ng.type_registry import register_type, get_type_list
class TextNGTemplateCategory(models.Model):
title = models.CharField(max_length=128)
def __unicode__(self):
return self.title
class Meta:
verbose_name = _('template category')
verbose_name_plural = _('template categories')
ordering = ['title']
class TextNGTemplate(models.Model):
category = models.ForeignKey(TextNGTemplateCategory, blank=True, null=True)
title = models.CharField(max_length=128)
path = models.CharField(max_length=128)
def __unicode__(self):
if self.category:
return u"%s (%s)" % (self.title, self.category)
return self.title
class Meta:
verbose_name = _('template')
verbose_name_plural = _('templates')
ordering = ['title']
class TextNG(AbstractText):
template = models.ForeignKey(TextNGTemplate)
def copy_relations(self, old_instance):
for model in get_type_list():
for instance in model.objects.filter(text_ng=old_instance):
instance.pk = None
instance.text_ng = self
instance.save()
class Meta:
verbose_name = _('text')
verbose_name_plural = _('texts')
class TextNGVariableBase(models.Model):
select_related = []
text_ng = models.ForeignKey(TextNG, related_name='+')
label = models.CharField(_('label'), max_length=20, validators=[RegexValidator(regex='[_a-z]+', message=_('Only lower case characters.'))])
def __unicode__(self):
return self.label
class Meta:
abstract = True
unique_together = ('text_ng', 'label')
class TextNGVariableText(TextNGVariableBase):
value = models.TextField(_('value'), null=True, blank=True)
def __unicode__(self):
return self.label + (' (%s)' % self.value if self.value else '')
class Meta:
verbose_name = _('text')
verbose_name_plural = _('texts')
if StrictVersion(django.get_version()) < StrictVersion('1.7'):
register_type('text', TextNGVariableText)
```
#### File: cmsplugin-text-ng/cmsplugin_text_ng/type_registry.py
```python
from django.db.models import FieldDoesNotExist
from cmsplugin_text_ng import exceptions
_registry = {}
def register_type(type_name, model_class):
from cmsplugin_text_ng.models import TextNGVariableBase
if type_name in _registry:
if _registry[type_name] == model_class:
# already registered
return
else:
raise exceptions.VariableTypeAlreadyRegistered(
'The type "%s" is already registered by %s' % (type_name, _registry[type_name].__name__)
)
if not issubclass(model_class, TextNGVariableBase):
raise exceptions.InvalidType('%s is not a subclass of TextNGVariableBase' % model_class.__name__)
try:
field = model_class._meta.get_field_by_name('value')[0]
except FieldDoesNotExist:
raise exceptions.InvalidType('%s does not define a "value" field' % model_class.__name__)
if not field.null:
raise exceptions.InvalidType('"value" field of %s is not nullable' % model_class.__name__)
_registry[type_name] = model_class
def get_type(type_name):
return _registry[type_name]
def get_type_list():
return _registry.values()
``` |
{
"source": "360youlun/django-cms",
"score": 2
} |
#### File: django-cms/cms/context_processors.py
```python
from cms.utils.conf import get_cms_setting
from cms.utils import get_template_from_request
import warnings
def cms_settings(request):
"""
Adds cms-related variables to the context.
"""
return {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'CMS_TEMPLATE': lambda: get_template_from_request(request),
}
def media(request):
warnings.warn('cms.context_processors.media has been deprecated in favor of '
'cms.context_processors.cms_settings. Please update your '
'configuration', DeprecationWarning)
return cms_settings(request)
``` |
{
"source": "360youlun/python-varnish",
"score": 3
} |
#### File: 360youlun/python-varnish/varnish.py
```python
from telnetlib import Telnet
from threading import Thread
from httplib import HTTPConnection
from urlparse import urlparse
from hashlib import sha256
import logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
)
def http_purge_url(url):
"""
Do an HTTP PURGE of the given asset.
The URL is run through urlparse and must point to the varnish instance not the varnishadm
"""
url = urlparse(url)
connection = HTTPConnection(url.hostname, url.port or 80)
connection.request('PURGE', '%s?%s' % (url.path or '/', url.query), '',
{'Host': url.hostname})
response = connection.getresponse()
if response.status != 200:
logging.error('Purge failed with status: %s' % response.status)
return response
class VarnishHandler(Telnet):
def __init__(self, host_port_timeout, secret=None, **kwargs):
if isinstance(host_port_timeout, basestring):
host_port_timeout = host_port_timeout.split(':')
Telnet.__init__(self, *host_port_timeout)
(status, length), content = self._read()
if status == 107 and secret is not None:
self.auth(secret, content)
elif status != 200:
logging.error('Connecting failed with status: %i' % status)
def _read(self):
(status, length), content = map(int, self.read_until('\n').split()), ''
while len(content) < length:
content += self.read_some()
return (status, length), content[:-1]
def fetch(self, command):
"""
Run a command on the Varnish backend and return the result
return value is a tuple of ((status, length), content)
"""
logging.debug('SENT: %s: %s' % (self.host, command))
self.write('%s\n' % command)
while 1:
buffer = self.read_until('\n').strip()
if len(buffer):
break
status, length = map(int, buffer.split())
content = ''
assert status == 200, 'Bad response code: {status} {text} ({command})'.format(status=status,
text=self.read_until(
'\n').strip(),
command=command)
while len(content) < length:
content += self.read_until('\n')
logging.debug('RECV: %s: %dB %s' % (status, length, content[:30]))
self.read_eager()
return (status, length), content
# Service control methods
def start(self):
"""start Start the Varnish cache process if it is not already running."""
return self.fetch('start')
def stop(self):
"""stop Stop the Varnish cache process."""
return self.fetch('stop')
def quit(self):
"""quit Close the connection to the varnish admin port."""
return self.close()
def auth(self, secret, content):
challenge = content[:32]
response = sha256('%s\n%s\n%s\n' % (challenge, secret, challenge))
response_str = 'auth %s' % response.hexdigest()
self.fetch(response_str)
# Information methods
def ping(self, timestamp=None):
"""
ping [timestamp]
Ping the Varnish cache process, keeping the connection alive.
"""
cmd = 'ping'
if timestamp: cmd += ' %s' % timestamp
return tuple(map(float, self.fetch(cmd)[1].split()[1:]))
def status(self):
"""status Check the status of the Varnish cache process."""
return self.fetch('status')[1]
def help(self, command=None):
"""
help [command]
Display a list of available commands.
If the command is specified, display help for this command.
"""
cmd = 'help'
if command: cmd += ' %s' % command
return self.fetch(cmd)[1]
# VCL methods
def vcl_load(self, configname, filename):
"""
vcl.load configname filename
Create a new configuration named configname with the contents of the specified file.
"""
return self.fetch('vcl.load %s %s' % (configname, filename))
def vcl_inline(self, configname, vclcontent):
"""
vcl.inline configname vcl
Create a new configuration named configname with the VCL code specified by vcl, which must be a
quoted string.
"""
return self.fetch('vcl.inline %s %s' % (configname, vclcontent))
def vcl_show(self, configname):
"""
vcl.show configname
Display the source code for the specified configuration.
"""
return self.fetch('vcl.show %s' % configname)
def vcl_use(self, configname):
"""
vcl.use configname
Start using the configuration specified by configname for all new requests. Existing requests
will coninue using whichever configuration was in use when they arrived.
"""
return self.fetch('vcl.use %s' % configname)
def vcl_discard(self, configname):
"""
vcl.discard configname
Discard the configuration specified by configname. This will have no effect if the specified
configuration has a non-zero reference count.
"""
return self.fetch('vcl.discard %s' % configname)
def vcl_list(self):
"""
vcl.list
List available configurations and their respective reference counts. The active configuration
is indicated with an asterisk ("*").
"""
vcls = {}
for line in self.fetch('vcl.list')[1].splitlines():
a = line.split()
vcls[a[2]] = tuple(a[:-1])
return vcls
# Param methods
def param_show(self, param, l=False):
"""
param.show [-l] [param]
Display a list if run-time parameters and their values.
If the -l option is specified, the list includes a brief explanation of each parameter.
If a param is specified, display only the value and explanation for this parameter.
"""
cmd = 'param.show '
if l: cmd += '-l '
return self.fetch(cmd + param)
def param_set(self, param, value):
"""
param.set param value
Set the parameter specified by param to the specified value. See Run-Time Parameters for a list
of paramea ters.
"""
self.fetch('param.set %s %s' % (param, value))
# Ban methods
def ban(self, expression):
"""
ban field operator argument [&& field operator argument [...]]
Immediately invalidate all documents matching the ban expression. See Ban Expressions for more
documentation and examples.
"""
return self.fetch('ban %s' % expression)[1]
def ban_url(self, regex):
"""
ban.url regexp
Immediately invalidate all documents whose URL matches the specified regular expression. Please
note that the Host part of the URL is ignored, so if you have several virtual hosts all of them
will be banned. Use ban to specify a complete ban if you need to narrow it down.
"""
return self.fetch('ban req.url ~ %s' % regex)[1]
def ban_list(self):
"""
ban.list
All requests for objects from the cache are matched against items on the ban list. If an object
in the cache is older than a matching ban list item, it is considered "banned", and will be
fetched from the backend instead.
When a ban expression is older than all the objects in the cache, it is removed from the list.
ban.list displays the ban list. The output looks something like this (broken into two lines):
0x7fea4fcb0580 1303835108.618863 131G req.http.host ~ www.myhost.com && req.url ~ /some/url
The first field is the address of the ban.
The second is the time of entry into the list, given as a high precision timestamp.
The third field describes many objects point to this ban. When an object is compared to a ban
the object is marked with a reference to the newest ban it was tested against. This isn't really
useful unless you're debugging.
A "G" marks that the ban is "Gone". Meaning it has been marked as a duplicate or it is no longer
valid. It stays in the list for effiency reasons.
Then follows the actual ban it self.
"""
return self.fetch('ban.list')[1]
def purge_url(self, url):
"""
Wrapper for http_purge_url
"""
return http_purge_url(url)
class ThreadedRunner(Thread):
"""
Runs commands on a particular varnish server in a separate thread
"""
def __init__(self, addr, *commands, **kwargs):
self.addr = addr
self.commands = commands
self.kwargs = kwargs
super(ThreadedRunner, self).__init__()
def run(self):
handler = VarnishHandler(self.addr, **self.kwargs)
for cmd in self.commands:
if isinstance(cmd, tuple) and len(cmd) > 1:
getattr(handler, cmd[0].replace('.', '_'))(*cmd[1:])
else:
getattr(handler, cmd.replace('.', '_'))()
handler.close()
def run(addr, *commands, **kwargs):
"""
Non-threaded batch command runner returning output results
"""
results = []
handler = VarnishHandler(addr, **kwargs)
for cmd in commands:
if isinstance(cmd, tuple) and len(cmd) > 1:
results.extend([getattr(handler, c[0].replace('.', '_'))(*c[1:]) for c in cmd])
else:
results.append(getattr(handler, cmd.replace('.', '_'))(*commands[1:]))
break
handler.close()
return results
class VarnishManager(object):
def __init__(self, servers):
if not len(servers):
logging.warn('No servers found, please declare some')
self.servers = servers
def run(self, *commands, **kwargs):
threaded = kwargs.pop('threaded', False)
for server in self.servers:
if threaded:
[ThreadedRunner(server, *commands, **kwargs).start()
for server in self.servers]
else:
return [run(server, *commands, **kwargs)
for server in self.servers]
def help(self, *args):
return run(self.servers[0], *('help',) + args)[0]
def close(self):
self.run('close', threaded=True)
self.servers = ()
``` |
{
"source": "3623/InfiniteRecharge2021",
"score": 2
} |
#### File: sim/codebase/Listener.py
```python
import subprocess
import threading
from networktables import NetworkTables
#!/usr/bin/env python3
#
# This is a NetworkTables client (eg, the DriverStation/coprocessor side).
# You need to tell it the IP address of the NetworkTables server (the
# robot or simulator).
#
# When running, this will create an automatically updated value, and print
# out the value.
#
import sys
import time
from networktables import NetworkTables
# To see messages from networktables, you must setup logging
import logging
class Listener():
def __init__(self):
logging.basicConfig(level=logging.DEBUG)
NetworkTables.initialize(server='10.36.23.2')
self.sd = NetworkTables.getTable("SmartDashboard")
self.auto_value = self.sd.getAutoUpdateValue("robotTime", 0)
self.p = subprocess.Popen(["java", "MyClass2"], stdout=subprocess.PIPE)
def fetchNetworkTable(self):
print("robotTime:", self.auto_value.value)
return self.auto_value.value
def fetchStdout(self):
line = self.p.stdout.readline()
print(line)
return line
def updateState(self, robot):
if (NetworkTables.isConnected()):
robot.setPosition(self.fetchNetworkTable())
else:
robot.setPosition(self.fetchStdout())
if __name__ == "__main__":
ey = Listener()
ey.fetchStdout()
```
#### File: sim/codebase/main.py
```python
from cmu_112_graphics import *
from tkinter import *
from robot import RobotModel
import Utils
import math
import threading
import time
from logger import Logger
from graph import StackedTimeGraph
from Listener import Listener
class SimulationApp(App):
def appStarted(self):
# Citation: this is a modified version of image from user BENL at link
# https://www.chiefdelphi.com/t/top-down-orthographic-view-of-frcs-2019-playing-field/335397/9
self._fieldImage = self.loadImage("Skills-field.png")
self.fieldImageScaled = ImageTk.PhotoImage(self._fieldImage)
self._robotImage = self.scaleImage(self.loadImage("robot-blue2.png"), 0.57)
self._WAYPOINT_RADIUS = 30
self.setAppDims()
self.resetUserInputs()
self.FIELD_REAL_WIDTH = 9.2 # meters
self.FIELD_REAL_HEIGHT = 4.6 # 16.46
self.timerDelay = 30 # milliseconds
self.waypoints = []
self.robot = RobotModel(1.0, 1.0, 0.0)
self.time = 0.0
self.listener = Listener()
self.UPDATE_RATE = 100
# odometryThread = threading.Thread(
# target=self.odometryPeriodic, daemon=True)
# odometryThread.start()
self.logger = Logger()
self.logger.registerLoggerDict(self.robot.logDict, "robot")
# self.logger.registerLoggerDict(self.controls.logDict, "controls")
yAxes = [self.logger.dict["robot.heading"],
self.logger.dict["robot.vel"]]
self.graph = StackedTimeGraph(self.logger.time, yAxes,
(self.fieldImageWidth, self.width), (self.height, 0))
def resetUserInputs(self):
self._appTime = 0
self.releaseDelay = 0.1
self.autoDriving = False
self.autoDrivingStart = False
self.selectedWaypoint = None
self.rotatingWaypoint = False
self.lastClickTime = 0
self._DOUBLE_CLICK_TIME = 0.2
def timerFired(self):
deltaTime = self.timerDelay/1000.0
self._appTime += deltaTime
if self.autoDriving:
self.logger.log(self.simTime)
def redrawAll(self, canvas):
canvas.create_image(self.fieldImageWidth/2, self.height/2,
image=self.fieldImageScaled)
robotAppX, robotAppY = self.realWorldToAppCoords(
self.robot.x, self.robot.y)
rotatedRobot = self._robotImage.rotate(-self.robot.heading)
canvas.create_image(robotAppX, robotAppY,
image=ImageTk.PhotoImage(rotatedRobot))
for i, waypoint in enumerate(self.waypoints):
self.drawNode(canvas, waypoint, i)
paths = self.robot.getPath(self.waypoints, 0)
for x, path in enumerate(paths):
if x == 0:
color = "blue"
else:
color = "blue"
for i in range(len(path)-1): # len(path)-1):
point = path[i]
pX1, pY1 = self.realWorldToAppCoords(point[0], point[1])
nextPoint = path[i+1]
pX2, pY2 = self.realWorldToAppCoords(
nextPoint[0], nextPoint[1])
canvas.create_line(pX1, pY1, pX2, pY2, fill=color)
self.graph.draw(canvas)
def keyPressed(self, event):
key = event.key
if key == "h":
self.superhelp()
elif key == "Delete":
if self.selectedWaypoint is not None:
self.waypoints.remove(self.selectedWaypoint)
self.selectedWaypoint = None
elif key == "w":
self.incrementWaypointSpeed(0.05)
elif key == "s":
self.incrementWaypointSpeed(-0.05)
elif key == "p":
print(self.logger.dict["robot.vel"])
elif key == "d":
for waypoint in self.waypoints:
print(waypoint.toString())
else:
None
def keyReleased(self, event):
None
def mousePressed(self, event):
if event.x < self.fieldImageWidth:
self.fieldClick(event)
def fieldClick(self, event):
self.cursorX, self.cursorY = event.x, event.y
self.selectedWaypoint = None
for waypoint in self.waypoints:
appWaypointX, appWaypointY = self.realWorldToAppCoords(
waypoint.x, waypoint.y)
if Utils.distance(appWaypointX, appWaypointY, event.x, event.y) < self._WAYPOINT_RADIUS:
self.selectedWaypoint = waypoint
newAngle = math.degrees(-math.atan2(appWaypointX - event.x,
appWaypointY - event.y))
if self._appTime - self.lastClickTime < self._DOUBLE_CLICK_TIME:
waypoint.isCritical = not waypoint.isCritical
if abs(newAngle - self.selectedWaypoint.heading) < 40.0:
self.rotatingWaypoint = True
else:
self.rotatingWaypoint = False
if self.selectedWaypoint is None: # New waypoint
x, y = self.appToRealWorldCoords(event.x, event.y)
newWaypoint = Waypoint(x, y, 0.0, 0.6)
self.waypoints.append(newWaypoint)
self.selectedWaypoint = newWaypoint
self.lastClickTime = self._appTime
def fieldDragged(self, event):
dX = event.x - self.cursorX
dY = event.y - self.cursorY
if self.selectedWaypoint is not None:
appWaypointX, appWaypointY = self.realWorldToAppCoords(
self.selectedWaypoint.x, self.selectedWaypoint.y)
if self.rotatingWaypoint:
newAngle = math.degrees(-math.atan2(appWaypointX - event.x,
appWaypointY - event.y))
self.selectedWaypoint.setPosition(heading=newAngle)
else:
appWaypointX += dX
appWaypointY += dY
waypointX, waypointY = self.appToRealWorldCoords(
appWaypointX, appWaypointY)
self.selectedWaypoint.x, self.selectedWaypoint.y = waypointX, waypointY
self.cursorX, self.cursorY = event.x, event.y
def mouseDragged(self, event):
if event.x < self.fieldImageWidth:
self.fieldDragged(event)
def mouseReleased(self, event):
None
def mouseMoved(self, event):
self.graph.updateHover(event.x)
def realWorldToAppCoords(self, x, y):
newX = (self.fieldImageWidth/2) + (self.fieldImageWidth/self.FIELD_REAL_WIDTH*x)
newY = (self.height) - (self.height/self.FIELD_REAL_HEIGHT*y)
return int(newX), int(newY)
def appToRealWorldCoords(self, x, y):
newX = (x - self.fieldImageWidth/2) / (self.fieldImageWidth/self.FIELD_REAL_WIDTH)
newY = (self.height - y) / (self.height/self.FIELD_REAL_HEIGHT)
return newX, newY
def setAppDims(self):
root = self._root
screenWidth = root.winfo_screenwidth()
screenHeight = root.winfo_screenheight()
imageWidth, imageHeight = self._fieldImage.size
if screenHeight/imageHeight < screenWidth/imageWidth:
scaleFactor = screenHeight/imageHeight*0.9
else:
scaleFactor = screenWidth/imageWidth*0.9
self.height = int(imageHeight * scaleFactor)
self.width = int(imageWidth * scaleFactor + screenWidth * 0.3)
self.setSize(self.width, self.height)
scaledFieldImage = self.scaleImage(self._fieldImage, scaleFactor)
self.fieldImageWidth = scaledFieldImage.size[0]
self.fieldImageScaled = ImageTk.PhotoImage(scaledFieldImage)
def drawNode(self, canvas, node, i):
r = self._WAYPOINT_RADIUS
x, y = self.realWorldToAppCoords(node.x, node.y)
color = self.numberToColor(node.kSpeed)
r2 = r
if node.isCritical:
canvas.create_oval(x+r, y+r,
x-r, y-r,
fill="white")
r2 = 0.7*r
canvas.create_oval(x+r2, y+r2,
x-r2, y-r2,
fill=color,
outline="white")
x1 = x + (r * 1.3 * math.sin(node.r))
x2 = x + (r * 0.3 * math.sin(node.r))
y1 = y - (r * 1.3 * math.cos(node.r))
y2 = y - (r * 0.3 * math.cos(node.r))
canvas.create_line(x2, y2, x1, y1, width=r/4, fill="gold")
canvas.create_text(x, y, anchor="c", text=f"{i}")
def numberToColor(self, x):
scaled = 255 - abs(int(x * 255))
red, green, blue = scaled, scaled, scaled
if x < 0.0:
red = 255
elif x > 0.0:
green = 255
# set your favourite rgb color
color = '#%02x%02x%02x' % (red, green, blue)
return color
def incrementWaypointSpeed(self, delta):
if self.selectedWaypoint is not None:
speed = self.selectedWaypoint.kSpeed
speed += delta
speed = Utils.limit(speed, 1.0, -1.0)
self.selectedWaypoint.kSpeed = speed
def superhelp(self):
print("Arrow keys to move.\n"
+ "Click on the screen to create a waypoint, and click\n"
+ " on any already created waypoint to select it.\n"
+ " Double clicking on the waypoint to make it critical \n"
+ " (the robot will slow down and stop there, else \n"
+ " it will just drive through it). The yellow tick \n"
+ " indicates the direction of the waypoint, and can be\n"
+ " dragged to change the direction. The speed is\n"
+ " indicated by the color and controlled by 'w' and 's'.\n"
+ " The waypoint can be deleted by 'del'.\n"
+ " Press d to print waypoint locations'.\n")
class Waypoint(Utils.Twist):
def __init__(self, x, y, heading, kSpeed, isCritical=False):
super().__init__(x, y, heading)
self.kSpeed = kSpeed
self.isCritical = isCritical
def toString(self):
return f"x: {round(self.x,2)}, y: {round(self.y,2)}, " + \
f"heading: {round(self.heading,2)}, speed: {self.kSpeed}," + \
f"critical: {self.isCritical}"
def __repr__(self):
return self.toString()
if __name__ == "__main__":
SimulationApp()
``` |
{
"source": "365midlar/airbrake-flask",
"score": 3
} |
#### File: 365midlar/airbrake-flask/setup.py
```python
classifiers = """\
Development Status :: 5 - Production/Stable
Environment :: Console
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python :: 2
Topic :: Software Development :: Quality Assurance
Topic :: Software Development :: Testing
""".splitlines()
from setuptools import setup
from airbrake import __version__, __app_url__, __app_name__
doc = __doc__.splitlines()
setup(
name=__app_name__,
version=__version__,
packages=['airbrake'],
zip_safe=False,
author='<NAME>, <NAME>',
author_email='<EMAIL>',
url=__app_url__,
license='MIT',
description=doc[0],
long_description='\n'.join(doc[2:]),
install_requires=['requests==2.0.1'],
extras_require={
'test': [
'nose',
'coverage',
'mock',
'blinker',
'Flask',
'gevent'
]
},
test_suite="tests",
keywords='error airbrake flask exception',
classifiers=classifiers
)
``` |
{
"source": "365moods/uwsgi",
"score": 2
} |
#### File: uwsgi_sloth/commands/start.py
```python
import os
import signal
import argparse
import datetime
from configobj import ConfigObj
from uwsgi_sloth.analyzer import format_data, RealtimeLogAnalyzer, URLClassifier
from uwsgi_sloth.tailer import Tailer, no_new_line
from uwsgi_sloth.template import render_template
from uwsgi_sloth.utils import makedir_if_none_exists, total_seconds, parse_url_rules, smart_str
from uwsgi_sloth.models import merge_requests_data_to, RequestsData, SavePoint
from uwsgi_sloth.settings import REALTIME_UPDATE_INTERVAL, DEFAULT_MIN_MSECS
import logging
logger = logging.getLogger('uwsgi_sloth')
class HTMLRender(object):
"""helper for render HTML"""
def __init__(self, html_dir, domain=None):
self.html_dir = html_dir
self.domain = domain
def render_requests_data_to_html(self, data, file_name, context={}):
"""Render to HTML file"""
file_path = os.path.join(self.html_dir, file_name)
logger.info('Rendering HTML file %s...' % file_path)
data = format_data(data)
data.update(context)
data.update(domain=self.domain)
with open(file_path, 'w') as fp:
fp.write(smart_str(render_template('realtime.html', data)))
def update_html_symlink(html_dir):
""""Maintail symlink: "today.html", "yesterday.html" """
today = datetime.date.today()
yesterday = datetime.date.today() - datetime.timedelta(days=1)
for from_date, alias_name in (
(today, 'today.html'), (yesterday, 'yesterday.html')):
from_date_file_path = os.path.join(html_dir, 'day_%s.html' % from_date)
symlink_path = os.path.join(html_dir, alias_name)
try:
os.unlink(symlink_path)
except OSError:
pass
os.symlink(from_date_file_path, symlink_path)
def start(args):
# Load config file
config = ConfigObj(infile=args.config.name)
data_dir = config['data_dir']
uwsgi_log_path = config['uwsgi_log_path']
min_msecs = int(config.get('min_msecs', DEFAULT_MIN_MSECS))
url_file = config.get('url_file')
# Load custom url rules
url_rules = []
if url_file:
with open(url_file, 'r') as fp:
url_rules = parse_url_rules(fp)
html_dir = os.path.join(data_dir, 'html')
db_dir = os.path.join(data_dir, 'data')
makedir_if_none_exists(html_dir)
makedir_if_none_exists(db_dir)
save_point = SavePoint(db_dir)
last_log_datetime = save_point.get_last_datetime() or \
(datetime.datetime.now() - datetime.timedelta(seconds=REALTIME_UPDATE_INTERVAL))
logger.info('Start from last savepoint, last_log_datetime: %s' % last_log_datetime)
last_update_datetime = None
url_classifier = URLClassifier(user_defined_rules=url_rules)
analyzer = RealtimeLogAnalyzer(url_classifier=url_classifier, min_msecs=min_msecs,
start_from_datetime=last_log_datetime)
file_tailer = Tailer(uwsgi_log_path)
html_render = HTMLRender(html_dir, domain=config.get('domain'))
# Listen INT/TERM signal
def gracefully_exit(*args):
logger.info('Sinal received, exit.')
file_tailer.stop_follow()
signal.signal(signal.SIGINT, gracefully_exit)
for line in file_tailer:
# Analyze line
if line != no_new_line:
analyzer.analyze_line(line)
now = datetime.datetime.now()
if not file_tailer.trailing:
continue
if last_update_datetime and \
total_seconds(now - last_update_datetime) < REALTIME_UPDATE_INTERVAL:
continue
# Render HTML file when:
# - file_tailer reaches end of file.
# - last_update_datetime if over one `interval` from now
# Render latest interval HTML file
html_render.render_requests_data_to_html(analyzer.get_data('last_interval'),
'latest_5mins.html', context={'datetime_range': 'Last 5 minutes'})
analyzer.clean_data_by_key('last_interval')
for date in analyzer.data.keys():
day_requests_data = RequestsData(date, db_dir)
merge_requests_data_to(day_requests_data.data, analyzer.get_data(date))
# Render to HTML file
html_render.render_requests_data_to_html(day_requests_data.data,
'day_%s.html' % date, context={'datetime_range': date})
# Save data to pickle file
day_requests_data.save()
# Reset Everything
analyzer.clean_data_by_key(date)
update_html_symlink(html_dir)
last_update_datetime = now
if analyzer.last_analyzed_datetime:
save_point.set_last_datetime(analyzer.last_analyzed_datetime)
save_point.save()
def load_subcommand(subparsers):
"""Load this subcommand"""
parser_start = subparsers.add_parser('start', help='Start uwsgi-sloth process for realtime analyzing.')
parser_start.add_argument('-c', '--config', type=argparse.FileType('r'), dest='config',
help='uwsgi-sloth config file, use "uwsgi-sloth echo_conf" for a default one', required=True)
parser_start.set_defaults(func=start)
``` |
{
"source": "3662/cmput404-project",
"score": 2
} |
#### File: service/tests/helper.py
```python
from social_distribution.models import Author, Post, Comment, Like
def create_dummy_authors(n):
'''Creates n dummy authors.'''
for i in range(n):
Author.objects.create_user(username=f'test{i}',
password=f'<PASSWORD>',
first_name = f'Test{i}',
last_name = 'Example',
host='http://localhost:8000/',
github=f'https://github.com/test{i}',
profile_image='https://avatars.githubusercontent.com/u/55664235?v=4')
def create_dummy_post(author, visibility='PUBLIC', content_type='text/plain'):
'''Creates a dummy post for the given author'''
if visibility not in map(lambda p:p[0], Post.VISIBILITY_CHOICES):
raise ValueError('Invalid visibility')
if content_type not in map(lambda p:p[0], Post.CONTENT_TYPE_CHOICES):
raise ValueError('Invalid content type')
Post.objects.create(author=author,
visibility=visibility,
title='Test Post',
source='',
origin='',
description='Test post description',
content_type=content_type,
content='Test post content',
categories='test,cmput404')
def create_dummy_posts(n, author, visibility='PUBLIC', content_type='text/plain'):
'''Creates n dummy posts for the given author'''
if visibility not in map(lambda p:p[0], Post.VISIBILITY_CHOICES):
raise ValueError('Invalid visibility')
if content_type not in map(lambda p:p[0], Post.CONTENT_TYPE_CHOICES):
raise ValueError('Invalid content type')
for i in range(n):
Post.objects.create(author=author,
visibility=visibility,
title=f'Test Post{i}',
source='',
origin='',
description=f'Test post{i} description',
content_type=content_type,
content=f'Test post{i} content',
categories='test,cmput404')
def create_dummy_author_with_followers(num_followers):
'''Creates a dummy author with num_followers followers.'''
author = Author.objects.create_user(username='test',
password='<PASSWORD>',
first_name = 'Test',
last_name = 'Example',
host='http://localhost:8000/',
github=f'https://github.com/test',
profile_image='https://avatars.githubusercontent.com/u/55664235?v=4')
for i in range(num_followers):
follower = Author.objects.create_user(username=f'test{i}',
password=f'<PASSWORD>',
first_name = f'Test{i}',
last_name = 'Example',
host='http://localhost:8000/',
github=f'https://github.com/test{i}',
profile_image='https://avatars.githubusercontent.com/u/55664235?v=4')
author.followers.add(follower)
def create_dummy_comments(n, author, post):
'''Creates n dummy comments to the post written by the author'''
for i in range(n):
Comment.objects.create(author=author,
post=post,
content_type='text/plain',
content=f'Test Comment{i}')
def create_dummy_likes_to_post(like_authors, post):
'''Creates likes from like_authors to the post'''
for like_author in like_authors:
Like.objects.create(author=like_author,
author_url = like_author.get_id_url(),
object_type='POST',
object_url=post.get_id_url())
def create_dummy_likes_to_comment(like_authors, comment):
'''Creates likes from like_authors to the comment'''
for like_author in like_authors:
Like.objects.create(author=like_author,
author_url = like_author.get_id_url(),
object_type='COMMENT',
object_url=comment.get_id_url())
```
#### File: service/tests/tests_author.py
```python
from django.test import TestCase, Client
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from social_distribution.models import Author
from service.models import ServerNode
from .helper import create_dummy_authors
class AuthorsDetailViewTestCase(TestCase):
NUM_AUTHORS = 5
def setUp(self):
ServerNode.objects.create(host='testserver', is_local=True)
create_dummy_authors(self.NUM_AUTHORS)
def test_get(self):
c = Client()
num_authors = len(Author.objects.all())
response = c.get(f'/service/authors?page=1&size={num_authors}')
data = response.json()
self.assertEqual(data['type'], 'authors')
self.assertEqual(len(data['items']), num_authors)
first = data['items'][0]
self.assertTrue('type' in first.keys())
self.assertTrue('id' in first.keys())
self.assertTrue('url' in first.keys())
self.assertTrue('host' in first.keys())
self.assertTrue('displayName' in first.keys())
self.assertTrue('github' in first.keys())
self.assertTrue('profileImage' in first.keys())
response = c.get(f'/service/authors?page=2&size={num_authors}')
self.assertEqual(response.status_code, 404)
def test_head(self):
c = Client()
num_authors = len(Author.objects.all())
response = c.head(f'/service/authors?page=1&size={num_authors}')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'')
class AuthorDetailViewTestCase(TestCase):
def setUp(self):
ServerNode.objects.create(host='testserver', is_local=True)
create_dummy_authors(1)
def test_get(self):
c = Client()
author = Author.objects.get(username='test0')
response = c.get(f'/service/authors/{author.id}')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertTrue('type' in data.keys())
self.assertTrue('id' in data.keys())
self.assertTrue('url' in data.keys())
self.assertTrue('host' in data.keys())
self.assertTrue('displayName' in data.keys())
self.assertTrue('github' in data.keys())
self.assertTrue('profileImage' in data.keys())
self.assertEqual(data['type'], 'author')
self.assertEqual(data['displayName'], author.get_full_name())
self.assertEqual(data['profileImage'], author.profile_image)
try:
validate = URLValidator()
validate(data['id'])
validate(data['url'])
validate(data['github'])
except ValidationError as e:
self.assertTrue(False, "This field must be a valid url")
else:
self.assertEqual(data['id'], author.get_id_url())
self.assertEqual(data['url'], author.get_profile_url())
self.assertEqual(data['github'], author.github)
def test_head(self):
c = Client()
author = Author.objects.get(username='test0')
response = c.head(f'/service/authors/{author.id}')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'')
def test_post(self):
c = Client()
author = Author.objects.get(username='test0')
# test without signed in
response = c.post(f'/service/authors/{author.id}')
self.assertEqual(response.status_code, 403)
c.login(username=author.username, password='<PASSWORD>')
# post with invalid form
data = {
'github': 'invalid url',
}
response = c.post(f'/service/authors/{author.id}', data, follow=True)
self.assertEqual(response.status_code, 400)
# post with valid form
data = {
'first_name': 'Updated_first_name',
'last_name': 'Updated_last_name',
'profile_image': 'https://avatars.githubusercontent.com/u/71972141?s=200&v=4',
'github': 'https://github.com/updated',
}
response = c.post(f'/service/authors/{author.id}', data, follow=True)
self.assertEqual(response.status_code, 200)
# check if the author is updated
author = Author.objects.get(username='test0')
self.assertEqual(author.first_name, data['first_name'])
self.assertEqual(author.last_name, data['last_name'])
self.assertEqual(author.profile_image, data['profile_image'])
self.assertEqual(author.github, data['github'])
```
#### File: service/tests/tests_inbox.py
```python
import uuid
import json
from django.test import TestCase, Client
from django.core.exceptions import ObjectDoesNotExist
from social_distribution.models import Author, Post, Inbox, InboxItem, FollowRequest, Like, Comment
from service.models import ServerNode
from .helper import create_dummy_authors, create_dummy_post, create_dummy_posts, create_dummy_comments
class InboxViewTestCase(TestCase):
def setUp(self):
ServerNode.objects.create(host='testserver', is_local=True)
create_dummy_authors(2)
def test_send_posts(self):
c = Client()
sender = Author.objects.get(username='test0')
receiver = Author.objects.get(username='test1')
# dummy posts
num_posts = 5
create_dummy_posts(num_posts, sender, 'PUBLIC', 'text/plain')
posts = Post.objects.filter(author=sender).order_by('id')
self.assertEqual(len(posts), num_posts)
# sender sends dummy posts to receiver's inbox
for post in posts:
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(post.get_detail_dict()),
content_type='application/json')
self.assertEqual(response.status_code, 201)
# assert InboxItems are created
receiver_inbox = Inbox.objects.get(author=receiver)
self.assertEqual(len(InboxItem.objects.filter(inbox=receiver_inbox)), num_posts)
# assert their api objects
items = InboxItem.objects.filter(inbox=receiver_inbox).order_by('object_id')
for i in range(len(items)):
self.assertDictEqual(items[i].get_detail_dict(), posts[i].get_detail_dict())
# clear inbox
response = c.delete(f'/service/authors/{receiver.id}/inbox')
self.assertEqual(response.status_code, 204)
self.assertTrue(not InboxItem.objects.filter(inbox=receiver_inbox).exists())
def test_send_follow_request(self):
c = Client()
sender = Author.objects.get(username='test0')
receiver = Author.objects.get(username='test1')
# valid follow object
data = {
'type': 'Follow',
'summary': 'Test0 wants to follow Test1',
'actor': sender.get_detail_dict(),
'object': receiver.get_detail_dict(),
}
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(data),
content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertTrue(FollowRequest.objects.filter(from_author=sender, to_author=receiver).exists())
fr = FollowRequest.objects.get(from_author=sender, to_author=receiver)
# assert InboxItem is created
receiver_inbox = Inbox.objects.get(author=receiver)
self.assertTrue(InboxItem.objects.filter(inbox=receiver_inbox, object_url=None, object_id=fr.id).exists())
# send Follow object again (should fail)
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(data),
content_type='application/json')
self.assertEqual(response.status_code, 400)
# clear inbox
response = c.delete(f'/service/authors/{receiver.id}/inbox')
self.assertEqual(response.status_code, 204)
self.assertTrue(not InboxItem.objects.filter(inbox=receiver_inbox).exists())
def test_send_like(self):
c = Client()
sender = Author.objects.get(username='test0')
receiver = Author.objects.get(username='test1')
create_dummy_post(receiver)
post = Post.objects.get(author=receiver)
# valid like object to post
data = {
'@context': 'https://www.w3.org/ns/activitystreams',
'summary': 'Test0 Likes your post',
'type': 'Like',
'author': sender.get_detail_dict(),
'object': post.get_id_url()
}
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(data),
content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertTrue(Like.objects.filter(author=sender,
author_url=sender.get_id_url(),
object_type='POST',
object_url=post.get_id_url()).exists())
like = Like.objects.get(author=sender,
author_url=sender.get_id_url(),
object_type='POST',
object_url=post.get_id_url())
receiver_inbox = Inbox.objects.get(author=receiver)
self.assertTrue(InboxItem.objects.filter(inbox=receiver_inbox, object_url=None, object_id=like.id).exists())
# clear inbox
response = c.delete(f'/service/authors/{receiver.id}/inbox')
self.assertEqual(response.status_code, 204)
self.assertTrue(not InboxItem.objects.filter(inbox=receiver_inbox).exists())
def test_send_comment(self):
c = Client()
sender = Author.objects.get(username='test0')
receiver = Author.objects.get(username='test1')
create_dummy_post(receiver)
post = Post.objects.get(author=receiver)
create_dummy_comments(1, sender, post)
comment = Comment.objects.get(author=sender, post=post)
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(comment.get_detail_dict()),
content_type='application/json')
self.assertEqual(response.status_code, 201)
receiver_inbox = Inbox.objects.get(author=receiver)
self.assertTrue(InboxItem.objects.filter(inbox=receiver_inbox, object_url=comment.get_id_url(), object_id=comment.id).exists())
# clear inbox
response = c.delete(f'/service/authors/{receiver.id}/inbox')
self.assertEqual(response.status_code, 204)
self.assertTrue(not InboxItem.objects.filter(inbox=receiver_inbox).exists())
```
#### File: service/views/views_inbox.py
```python
import json
from django.shortcuts import get_object_or_404
from django.views import View
from django.http import JsonResponse, HttpResponse, Http404
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator, EmptyPage
from service.server_authorization import is_server_authorized, is_local_server, get_401_response
from social_distribution.models import Author, Post, Like, Comment, Inbox, InboxItem, FollowRequest
class InboxView(View):
http_method_names = ['get', 'head', 'options', 'post', 'delete']
DEFAULT_PAGE = 1
DEFAULT_SIZE = 10
def get(self, request, *args, **kwargs):
'''
GET [local]: if authenticated, get a list of posts sent to AUTHOR_ID (paginated)
Default page = 1, size = 10
Returns:
- 200: if successful
- 401: if server is not authorized
- 403: if the author is not authenticated
- 404: if author or page does not exist
'''
if not is_local_server(request):
return get_401_response()
author_id = kwargs.get('author_id', '')
return JsonResponse(self._get_inbox_items(request, author_id))
def head(self, request, *args, **kwargs):
'''
Handles HEAD request of the same GET request.
Returns:
- 200: if successful
- 401: if server is not authorized
- 403: if the author is not authenticated, or host is not local
- 404: if author or page does not exist
'''
if not is_local_server(request):
return get_401_response()
author_id = kwargs.get('author_id', '')
data_json = json.dumps(self._get_inbox_items(request, author_id))
response = HttpResponse()
response.headers['Content-Type'] = 'application/json'
response.headers['Content-Length'] = str(len(bytes(data_json, 'utf-8')))
return response
def post(self, request, *args, **kwargs):
'''
POST [local, remote]: send a object to the author
- If the type is “post” then add that post to AUTHOR_ID's inbox
- If the type is “follow” then add that follow is added to AUTHOR_ID's inbox to approve later
- If the type is “like” then add that like to AUTHOR_ID's inbox
- If the type is “comment” then add that comment to AUTHOR_ID's inbox
Returns:
- 201: if successful
- 400: if the object is invalid.
- 401: if server is not authorized
- 404: if the author does not exist.
'''
if not is_server_authorized(request):
return get_401_response()
author_id = kwargs.get('author_id', '')
author = get_object_or_404(Author, id=author_id)
try:
inbox = Inbox.objects.get(author=author)
except ObjectDoesNotExist:
# create an inbox for this author if it doesn't exist
inbox = Inbox.objects.create(author=author)
valid_object_types = ['post', 'follow', 'like', 'comment']
data = json.loads(request.body.decode('utf-8'))
try:
t = data['type'].strip().lower()
if t not in valid_object_types:
raise ValueError('The type of the object is invalid')
if t == 'post':
object_type = 0
post_id = data['id'].split('/')[-1]
if Post.objects.filter(id=post_id).exists():
post = Post.objects.get(id=post_id)
object_id = post.id
object_url = post.get_id_url()
else:
object_id = None
object_url = data['id']
elif t == 'comment':
object_type = 1
comment_id = data['id'].split('/')[-1]
if Comment.objects.filter(id=comment_id).exists():
comment = Comment.objects.get(id=comment_id)
object_id = comment.id
object_url = comment.get_id_url()
else:
object_id = None
object_url = data['id']
elif t == 'follow':
object_type = 2
object = self._create_follow_request(data, author)
object_id = object.id
object_url = None
else:
object_type = 3
object = self._create_like(data)
object_id = object.id
object_url = None
if not InboxItem.objects.filter(object_id=object_id).exists():
# Only create if the object is not already in the inbox
# If the object exists in the inbox, it has been already updated at this point.
InboxItem.objects.create(inbox=inbox,
object_type=InboxItem.OBJECT_TYPE_CHOICES[object_type][0],
object_id=object_id,
object_url=object_url)
except (KeyError, ValueError) as e:
status_code = 400
return HttpResponse(e if str(e) != '' else 'The object is invalid', status=status_code)
else:
return HttpResponse("An object is successfully sent to the inbox", status=201)
def delete(self, request, *args, **kwargs):
'''
DELETE [local]: clears the inbox
Returns:
- 204: if successfully cleared
- 401: if server is not authorized
- 404: if the author does not exist
'''
if not is_local_server(request):
return get_401_response()
author_id = kwargs.get('author_id', '')
author = get_object_or_404(Author, id=author_id)
try:
inbox = Inbox.objects.get(author=author)
except ObjectDoesNotExist:
# create an inbox for this author if it doesn't exist
inbox = Inbox.objects.create(author=author)
InboxItem.objects.filter(inbox=inbox).delete()
return HttpResponse("The inbox is cleared", status=204)
def _get_inbox_items(self, request, author_id) -> dict:
'''
Returns a dict containing a list of posts in the author_id's inbox.
'''
if not request.user.is_authenticated:
status_code = 403
message = "You do not have permission to access this author's inbox."
return HttpResponse(message, status=status_code)
page = int(request.GET.get('page', self.DEFAULT_PAGE))
size = int(request.GET.get('size', self.DEFAULT_SIZE))
author = get_object_or_404(Author, id=author_id)
try:
inbox = Inbox.objects.get(author=author)
except ObjectDoesNotExist:
# create an inbox for this author if it doesn't exist
inbox = Inbox.objects.create(author=author)
try:
q = InboxItem.objects.all().filter(inbox=inbox)
q = q.order_by('-date_created')
inbox_items = Paginator(q, size).page(page)
except EmptyPage:
raise Http404('Page does not exist')
data = {}
data['type'] = 'inbox'
data['items'] = [item.get_detail_dict() for item in inbox_items]
return data
def _create_like(self, data_dict) -> Like:
'''
Create a Like from data_dict.
Raises ValueError if
- @context is invalid, or
- object id in data_dict is not associated with the author
'''
context = data_dict['@context']
if context != Like.context:
raise ValueError('Invalid context: %s' % context)
like_author_id = data_dict['author']['id'].split('/')[-1]
like_author = None # can be local or remote author
if Author.objects.filter(id=like_author_id).exists():
# is a local author
like_author = Author.objects.get(id=like_author_id)
like_author_url = data_dict['author']['id']
object_url = data_dict['object']
# object id must exist in our database
object_id = object_url.split('/')[-1]
if Post.objects.filter(id=object_id).exists():
object_type = Like.OBJECT_TYPE_CHOICES[0][0]
elif Comment.objects.filter(id=object_id).exists():
object_type = Like.OBJECT_TYPE_CHOICES[1][0]
else:
raise ValueError('object id: %s is not associated with this author' % object_id)
return Like.objects.create(author=like_author,
author_url=like_author_url,
object_type=object_type,
object_url=object_url)
def _create_follow_request(self, data_dict, author:Author) -> FollowRequest:
'''
Creates a FollowRequest between the two authors in data_dict.
Raises a ValueError if
- FollowRequest between the two authors already exists, or
- author in the request and author in the data_dict are not equal.
'''
# from_author can be from remote server
from_author_id = data_dict['actor']['id'].split('/')[-1]
if Author.objects.filter(id=from_author_id).exists():
from_author = Author.objects.get(id=from_author_id)
else:
from_author = None
from_author_url = data_dict['actor']['url']
to_author_id = data_dict['object']['id'].split('/')[-1]
to_author = get_object_or_404(Author, id=to_author_id)
if author != to_author:
# assert target author is the author in the request
raise ValueError('Target author and to_author in follow object must be equal')
if FollowRequest.objects.filter(from_author_url=from_author_url, to_author=author).exists():
# raise an exception if follow request between the two author already exists
raise ValueError('Follow request is already sent')
return FollowRequest.objects.create(from_author=from_author,
from_author_url=from_author_url,
to_author=author,
to_author_url=author.get_id_url())
```
#### File: service/views/views_like.py
```python
import json
from django.shortcuts import get_object_or_404
from django.views import View
from django.http import JsonResponse, HttpResponse, Http404
from service.server_authorization import is_server_authorized, get_401_response
from social_distribution.models import Author, Post, Like, Comment
class PostLikesView(View):
http_method_names = ['get', 'head', 'options']
def get(self, request, *args, **kwargs):
'''
GET [local, remote] returns a list of likes from other authors on AUTHOR_ID's post POST_ID
Returns:
- 200 if successful
- 401: if server is not authorized
- 404 if post does not exist
'''
if not is_server_authorized(request):
return get_401_response()
author_id = kwargs.get('author_id', '')
post_id = kwargs.get('post_id', '')
return JsonResponse(self._get_likes(author_id, post_id))
def head(self, request, *args, **kwargs):
'''
Handles HEAD request of the same GET request.
Returns:
- 200: if successful
- 401: if server is not authorized
- 404: if post does not exist
'''
if not is_server_authorized(request):
return get_401_response()
author_id = kwargs.get('author_id', '')
post_id = kwargs.get('post_id', '')
data_json = json.dumps(self._get_likes(author_id, post_id))
response = HttpResponse()
response.headers['Content-Type'] = 'application/json'
response.headers['Content-Length'] = str(len(bytes(data_json, 'utf-8')))
return response
def _get_likes(self, author_id, post_id):
'''
Returns a dict that contains a list of likes.
'''
post_author = get_object_or_404(Author, pk=author_id)
post = get_object_or_404(Post, pk=post_id, author=post_author)
likes = Like.objects.filter(object_type=Like.OBJECT_TYPE_CHOICES[0][0], object_url=post.get_id_url()).all()
data = {}
data['type'] = 'liked'
data['count'] = likes.count()
data['items'] = [l.get_detail_dict() for l in likes]
return data
class CommentLikesView(View):
http_method_names = ['get', 'head', 'options']
def get(self, request, *args, **kwargs):
'''
GET [local, remote] returns a list of likes from other authors on AUTHOR_ID's post POST_ID comment COMMENT_ID.
Returns:
- 200 if successful
- 401: if server is not authorized
- 404 if post does not exist
'''
if not is_server_authorized(request):
return get_401_response()
author_id = kwargs.get('author_id', '')
post_id = kwargs.get('post_id', '')
comment_id = kwargs.get('comment_id', '')
return JsonResponse(self._get_likes(author_id, post_id, comment_id))
def head(self, request, *args, **kwargs):
'''
Handles HEAD request of the same GET request.
Returns:
- 200: if successful
- 401: if server is not authorized
- 404: if post does not exist
'''
if not is_server_authorized(request):
return get_401_response()
author_id = kwargs.get('author_id', '')
post_id = kwargs.get('post_id', '')
comment_id = kwargs.get('comment_id', '')
data_json = json.dumps(self._get_likes(author_id, post_id, comment_id))
response = HttpResponse()
response.headers['Content-Type'] = 'application/json'
response.headers['Content-Length'] = str(len(bytes(data_json, 'utf-8')))
return response
def _get_likes(self, author_id, post_id, comment_id):
'''
Returns a dict that contains a list of likes.
'''
post_author = get_object_or_404(Author, pk=author_id)
post = get_object_or_404(Post, pk=post_id, author=post_author)
comment = get_object_or_404(Comment, pk=comment_id, post=post)
likes = Like.objects.filter(object_type=Like.OBJECT_TYPE_CHOICES[1][0], object_url=comment.get_id_url()).all()
data = {}
data['type'] = 'liked'
data['count'] = likes.count()
data['items'] = [l.get_detail_dict() for l in likes]
return data
```
#### File: service/views/views_post.py
```python
import json
from django.shortcuts import get_object_or_404
from django.core.paginator import Paginator, EmptyPage
from django.http import JsonResponse, HttpResponse, Http404
from django.views import View
from posts.forms import PostForm
from django.core.exceptions import ValidationError
from service.server_authorization import is_server_authorized, is_local_server, get_401_response
from social_distribution.models import Author, Post
class PostView(View):
http_method_names = ['get', 'head', 'options', 'post', 'delete', 'put']
def get(self, request, *args, **kwargs):
'''
GET [local, remote]: Returns a JSON response with status code of 200
that contains the public post whose id is post_id.
Returns:
- 200: if successful
- 401: if server is not authorized
- 404: if author or post does not exist
'''
if not is_server_authorized(request):
return get_401_response()
author_id = kwargs.get('author_id', '')
post_id = kwargs.get('post_id', '')
author = get_object_or_404(Author, pk=author_id)
post = get_object_or_404(Post, pk=post_id, author_id=author_id, visibility="PUBLIC")
return JsonResponse(post.get_detail_dict())
def head(self, request, *args, **kwargs):
'''
Handles HEAD request of the same GET request.
Returns:
- 200: if the request is successful
- 401: if server is not authorized
- 404: if author or post does not exist
'''
if not is_server_authorized(request):
return get_401_response()
author_id = kwargs.get('author_id', '')
post_id = kwargs.get('post_id', '')
author = get_object_or_404(Author, pk=author_id)
post = get_object_or_404(Post, pk=post_id, author_id=author_id, visibility="PUBLIC")
data_json = json.dumps(post.get_detail_dict())
response = HttpResponse()
response.headers['Content-Type'] = 'application/json'
response.headers['Content-Length'] = str(len(bytes(data_json, 'utf-8')))
return response
def post(self, request, *args, **kwargs):
'''
POST [local]: Updates the post whose id is post_id.
Returns:
- 200: if the update is successful
- 400: if the data is invalid
- 401: if server is not authorized
- 403: if the user is not authenticated
- 404: if author or post does not exist
'''
if not is_local_server(request):
return get_401_response()
author_id = kwargs.get('author_id', '')
post_id = kwargs.get('post_id', '')
post = get_object_or_404(Post, pk=post_id, author_id=author_id)
if not request.user.is_authenticated:
status_code = 403
message = "You do not have permission to update this author's post."
return HttpResponse(message, status=status_code)
form = PostForm(request.POST)
if form.is_valid():
self.update_post(post, form)
return HttpResponse("Post is successfully updated.")
status_code = 400
return HttpResponse('The form is not valid.', status=status_code)
def delete(self, request, *args, **kwargs):
'''
DELETE [local]: removes the post whose id is post_id.
Returns:
- 204: if the deletion was successful
- 401: if server is not authorized
- 403: if the user is not authenticated
- 404: if author or post does not exist
'''
if not is_local_server(request):
return get_401_response()
author_id = kwargs.get('author_id', '')
post_id = kwargs.get('post_id', '')
author = Author.objects.get(id=author_id)
post = get_object_or_404(Post, pk=post_id, author=author)
post.delete()
return HttpResponse('Post successfully deleted', status=204)
def put(self, request, *args, **kwargs):
'''
PUT [local]: creates a post where its id is post_id, if the given form data is valid.
Note: if the post already exists, it will update the post with the new form data,
but the user must be authenticated.
Returns:
- 200: if the post is successfully updated
- 201: if the post is successfully created
- 400: if the data is invalid
- 401: if server is not authorized
- 403: if the user is not authenticated
- 404: if author does not exist
'''
if not is_local_server(request):
return get_401_response()
status_code = 201
author_id = kwargs.pop('author_id', '')
post_id = kwargs.pop('post_id', '')
author = get_object_or_404(Author, pk=author_id)
try:
data = json.loads(request.body)
except json.JSONDecodeError:
status_code = 400
return HttpResponse('Data is not a valid json', status=status_code)
if Post.objects.filter(id=post_id, author=author).exists():
if not request.user.is_authenticated:
status_code = 403
message = "You do not have permission to update this author's post."
return HttpResponse(message, status=status_code)
# update post with the given data
post = Post.objects.get(id=post_id, author=author)
form = PostForm(data)
if form.is_valid():
self.update_post(post, form)
return HttpResponse("Post is successfully updated.")
status_code = 400
return HttpResponse('The form is not valid.', status=status_code)
try:
post = Post.objects.create(pk=post_id, author=author, **data)
except ValidationError as e:
status_code = 400
return HttpResponse('The form data is not valid.', status=status_code)
return HttpResponse('Post successfully created', status=status_code)
def update_post(self, post, form):
'''Updates the fields of the post with the given valid form'''
post.title = form.cleaned_data['title']
post.description = form.cleaned_data['description']
# TODO image
# post.image = form.cleaned_data['image']
post.content_type = form.cleaned_data['content_type']
post.content = form.cleaned_data['content']
post.categories = form.cleaned_data['categories']
post.visibility = form.cleaned_data['visibility']
post.save(update_fields=['title', 'description', 'content_type', 'content', 'image', 'categories', 'visibility'])
post.save() # update modified date
class PostsView(View):
DEFAULT_PAGE = 1
DEFAULT_SIZE = 15
http_method_names = ['get', 'head', 'options', 'post']
def get(self, request, *args, **kwargs):
'''
GET [local, remote]: Returns a JSON response that contains a list of the
recent posts from author_id. (paginated)
Default page = 1, size = 15
Returns:
- 200: if successful
- 401: if server is not authorized
- 404: if author or page does not exist
'''
if not is_server_authorized(request):
return get_401_response()
author_id = kwargs.get('author_id', '')
return JsonResponse(self._get_posts(request, author_id))
def head(self, request, *args, **kwargs):
'''
Handles HEAD request of the same GET request.
Returns:
- 200: if successful
- 401: if server is not authorized
- 404: if author or page does not exist
'''
if not is_server_authorized(request):
return get_401_response()
author_id = kwargs.get('author_id', '')
data_json = json.dumps(self._get_posts(request, author_id))
response = HttpResponse()
response.headers['Content-Type'] = 'application/json'
response.headers['Content-Length'] = str(len(bytes(data_json, 'utf-8')))
return response
def post(self, request, *args, **kwargs):
'''
POST [local]: Creates a new post, but generates a new id.
Returns:
- 201: if the post is successfully created
- 400: if the data is invalid
- 401: if server is not authorized
- 403: if the user is not authenticated
- 404: if author does not exist
'''
if not is_local_server(request):
return get_401_response()
status_code = 201
author_id = kwargs.get('author_id', '')
author = get_object_or_404(Author, id=author_id)
# form = PostForm(request.POST)
form = PostForm(json.loads(request.body))
if not form.is_valid():
status_code = 400
return HttpResponse('The form data is not valid.', status=status_code)
post = Post.objects.create(author=author, **form.cleaned_data)
body = json.dumps(post.get_detail_dict())
return HttpResponse(body, status=status_code)
def _get_posts(self, request, author_id) -> dict:
'''
Returns a dict that contains a list of posts.
'''
page = int(request.GET.get('page', self.DEFAULT_PAGE))
size = int(request.GET.get('size', self.DEFAULT_SIZE))
author = get_object_or_404(Author, pk=author_id)
try:
q = Post.objects.all().filter(author=author)
count = q.count()
q = q.filter(visibility='PUBLIC')
q = q.order_by('-modified')
posts = Paginator(q, size).page(page)
except EmptyPage:
raise Http404('Page does not exist')
data = {}
data['type'] = 'posts'
data['count'] = count
data['items'] = [p.get_detail_dict() for p in posts]
return data
``` |
{
"source": "370rokas/anonleecher",
"score": 3
} |
#### File: 370rokas/anonleecher/anonscraper.py
```python
from colorama import Fore
import requests
import getopt
import sys
import re
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 7.0; SM-G930V Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.125 Mobile Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'DNT': '1',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1'
}
def google(q):
s = requests.Session()
q = '+'.join(q.split())
url = 'https://www.google.com/search?&q=site%3Aanonfile.com+' + q + '&ie=utf-8&oe=utf-8'
r = s.get(url, headers=headers)
output = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', r.text)
return output
def main():
print(Fore.MAGENTA + '''
______________ __
|__ /__ / __ \_________ / /______ ______
/_ < / / / / / ___/ __ \/ //_/ __ `/ ___/
___/ / / / /_/ / / / /_/ / ,< / /_/ (__ )
/____/ /_/\____/_/ \____/_/|_|\__,_/____/
AnonScraper
Author: 370rokas <https://github.com/370rokas/anonscraper>
''' + Fore.RESET)
argv = sys.argv[1:]
options = "hq:f:"
l_options = ["help", "query", "filename"]
query = ""
filename = ""
def display_help():
print(Fore.WHITE + "anonscraper.py -q <query> -f #" + Fore.RESET)
sys.exit()
try:
args, vals = getopt.getopt(argv, options, l_options)
if len(args) == 0:
display_help()
for c_arg, c_val in args:
if c_arg in ('-h', '--help'):
display_help()
if c_arg in ('-q', '--query'):
query = c_val
if c_arg in ('-f', '--filename'):
filename = c_val
except getopt.error as err:
print(str(err))
urls = google(query)
filtered = []
for url in urls:
if ("anonfile.com/" in url and url != "https://anonfile.com" and not "google." in url):
filtered.append(url)
for i in range(len(filtered)):
print(Fore.CYAN + str(i+1) + ". " + filtered[i] + Fore.RESET)
if filename != "":
open(filename, 'a').write(filtered[i] + "\n")
if filename != "":
print(Fore.GREEN + "[i] Saved results into " + filename)
print(Fore.MAGENTA + "[i] Finished. Got " + str(len(filtered)) + " results." + Fore.RESET)
if __name__ == '__main__':
main()
``` |
{
"source": "370rokas/harv3st",
"score": 3
} |
#### File: 370rokas/harv3st/main.py
```python
import argparse
parser = argparse.ArgumentParser(description="Harvests various websites for information about an username or an email.")
parser.add_argument('--username', metavar="u", type=str, help="username to harvest", nargs='+')
parser.add_argument('--email', metavar="e", type=str, help="email to harvest", nargs='+')
parser.add_argument('--verbose', '-v', action='count', default=0)
from harvester import run_username, run_email
from misc import banner
from colorama import Fore
def main():
args = parser.parse_args()
print(Fore.RED + banner + "\n370rokas (c) 2022\n" + Fore.RESET)
if not args.username and not args.email:
parser.print_help()
return
verbose_level = args.verbose
if args.username:
# Harvest by usernames
for username in args.username:
print(Fore.CYAN + f"[{username}] Harvesting started." + Fore.RESET)
run_username(username, verbose_level)
print(Fore.CYAN + f"[{username}] Harvesting finished." + Fore.RESET)
if args.email:
# Harvest by emails
for email in args.email:
print(Fore.CYAN + f"[{email}] Harvesting started." + Fore.RESET)
run_email(email, verbose_level)
print(Fore.CYAN + f"[{email}] Harvesting finished." + Fore.RESET)
if __name__ == '__main__':
main()
``` |
{
"source": "372046933/discovery-client",
"score": 3
} |
#### File: discovery-client/discovery/util.py
```python
from urllib.parse import urlencode
def sort_urlencode(data):
"""
Encode a dict into a URL query string.
sort by key
:param dict data: data
:rtype: str
"""
return urlencode(sorted(data.items(), key=lambda v: v[0]), doseq=True)
``` |
{
"source": "373137461/Meizu_Hygrothermo",
"score": 2
} |
#### File: custom_components/meizu_hygrothermo/sensor.py
```python
import re
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from datetime import timedelta
from homeassistant.const import (ATTR_BATTERY_LEVEL,CONF_HOST, CONF_NAME, CONF_MAC, CONF_SCAN_INTERVAL, TEMP_CELSIUS)
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.event import track_time_interval
from homeassistant.util.dt import utcnow
import socket
import json
REQUIREMENTS = []
_LOGGER = logging.getLogger(__name__)
BT_MAC = vol.All(
cv.string,
vol.Length(min=17, max=17)
)
SCAN_INTERVAL = timedelta(seconds=30)
NAME = "Meijia BT Hygrothermograph"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MAC, default=None): vol.Any(BT_MAC, None),
vol.Required(CONF_HOST, default=None): cv.string,
vol.Optional(CONF_NAME, default=NAME): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL): cv.time_period,
})
SENSOR_TYPES = {
'Temperature': [TEMP_CELSIUS, 'mdi:thermometer'],
'Humidity': ['%', 'mdi:water-percent']
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the sensor platform."""
device = MeizuHygroThermo(hass, config.get(CONF_NAME), config.get(CONF_HOST),config.get(CONF_MAC))
add_devices(device.entities)
track_time_interval(hass, device.get_data, config.get(CONF_SCAN_INTERVAL))
class MeizuHygroThermoDelegate(object):
def __init__(self):
self.temperature = None
self.humidity = None
self.received = False
def handleNotification(self, cHandle, data):
if cHandle == 14:
m = re.search('T=([\d\.]*)\s+?H=([\d\.]*)', ''.join(map(chr, data)))
self.temperature = m.group(1)
self.humidity = m.group(2)
self.received = True
class MeizuHygroThermo(object):
def __init__(self, hass, name, address,mac):
self.address = mac
self.host = address
self.battery = None
self.temperature = None
self.humidity = None
self.last_battery = None
self.entities = [
MeizuHygroThermoEntity(hass, name, 'Temperature'),
MeizuHygroThermoEntity(hass, name, 'Humidity')
]
self.get_data()
def get_data(self, now = None):
try:
c = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
HOST=self.host
PORT=21567
BUFSIZ=1024
ADDR=(HOST,PORT)
c.connect(ADDR)
c.send(self.address.encode())
data = c.recv(BUFSIZ)
print(data.decode('utf-8'))
c.close()
json_str = json.loads(data.decode('utf-8'))
print(json_str)
self.temperature = json_str['temperature']
self.humidity = json_str['humidity']
self.battery = json_str['battery']
self.last_battery = "1"
ok = True
except Exception as ex:
_LOGGER.error("Unexpected error: {}".format(ex))
ok = False
for i in [0, 1]:
changed = self.entities[i].set_state(ok, self.battery, self.temperature if i == 0 else self.humidity)
if (not now is None) and changed:
self.entities[i].async_schedule_update_ha_state()
class MeizuHygroThermoEntity(Entity):
def __init__(self, hass, name, device_type):
self.hass = hass
self._name = '{} {}'.format(name, device_type)
self._state = None
self._is_available = True
self._type = device_type
self._device_state_attributes = {}
self.__errcnt = 0
self.__laststate = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def should_poll(self):
"""Return the polling state. No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._device_state_attributes
@property
def icon(self):
"""Return the icon to use in the frontend."""
try:
return SENSOR_TYPES.get(self._type)[1]
except TypeError:
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
try:
return SENSOR_TYPES.get(self._type)[0]
except TypeError:
return None
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def set_state(self, is_available, battery, state_value):
changed = False
if is_available:
if not battery is None:
self._device_state_attributes[ATTR_BATTERY_LEVEL] = battery
changed = True
self._state = state_value
changed = changed or self.__laststate != state_value
self.__laststate = state_value
self.__errcnt = 0
self._is_available = True
else:
self.__errcnt += 1
if self.__errcnt > 3:
self._is_available = False
changed = True
return changed
``` |
{
"source": "3778/icd-prediction-mimic",
"score": 2
} |
#### File: 3778/icd-prediction-mimic/utils.py
```python
import numpy as np
import pickle
import pandas as pd
from tensorflow.keras.callbacks import LearningRateScheduler
from constants import SAVE_DIR, W2V_DIR, W2V_SIZE, MAX_LENGTH
import model_functions as fun
import models
def make_icds_histogram(df):
return df.ICD9_CODE.explode().value_counts()
def load_list_from_txt(filepath):
with open(filepath, 'r') as f:
return f.read().split()
def preprocessor(text_series):
return (text_series
.str.replace('<[^>]*>', '')
.str.lower()
.str.replace('[\W]+', ' ')
.str.split())
def preprocessor_tfidf(text_series):
return (text_series
.str.replace('\[\*\*[^\]]*\*\*\]','')
.str.replace('<[^>]*>', '')
.str.replace('[\W]+', ' ')
.str.lower()
.str.replace(' \d+', ' '))
def preprocessor_word2vec(text_series):
return (text_series
.str.replace('\[\*\*[^\]]*\*\*\]','')
.str.replace('<[^>]*>', '')
.str.replace('[\W]+', ' ')
.str.lower()
.str.replace(' \d+', ' ')
.str.split())
def convert_data_to_index(string_data, row_dict):
return [row_dict.get(word, row_dict['_unknown_']) for word in string_data]
def lr_schedule_callback(args):
# Create scheduler function
def scheduler(epoch):
if epoch < args.epoch_drop:
return args.initial_lr
else:
return args.final_lr
return LearningRateScheduler(scheduler, verbose=1)
def get_model(args=None, load_path=None):
if args.MODEL_NAME == 'cte':
return models.CTE_Model(args, load_path)
elif args.MODEL_NAME == 'lr':
return models.LR_Model(args, load_path)
elif args.MODEL_NAME == 'cnn':
return models.CNN_Model(args, load_path)
elif args.MODEL_NAME == 'gru':
return models.GRU_Model(args, load_path)
elif args.MODEL_NAME == 'cnn_att':
return models.CNNAtt_Model(args, load_path)
``` |
{
"source": "3778/ml-challenge",
"score": 3
} |
#### File: challenge/_internal/evaluate.py
```python
from . import DATA_DIR
import pandas as pd
import numpy as np
import sklearn.metrics as sklearn_metrics
def evaluate_regression(y_pred):
"""Evaluates predictions with multiple metrics
For details on metrics see
https://scikit-learn.org/stable/modules/model_evaluation.html
Args:
y_pred (array): Predictions from regression model.
Returns:
dict: Evaluation results on multiple metrics.
"""
path = DATA_DIR / 'answers.csv'
y_true = pd.read_csv(path, dtype={'value': float})['value']
metrics_to_evaluate = ['explained_variance_score',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'r2_score']
return {m: getattr(sklearn_metrics, m)(y_true, y_pred)
for m in metrics_to_evaluate}
``` |
{
"source": "378978764/gputasker",
"score": 2
} |
#### File: gputasker/base/utils.py
```python
from django.contrib.auth.models import User
from .models import SystemConfig
def get_admin_config():
admin_users = User.objects.filter(is_superuser=True)
system_config = SystemConfig.objects.all()
if admin_users.count() == 0:
raise RuntimeError('Please create a superuser!')
if system_config.count() == 0:
raise RuntimeError('Please login admin site and set system config!')
elif system_config.count() != 1:
raise RuntimeError('Please login admin site and delete other system config!')
if system_config[0].user.config is None:
raise RuntimeError(
'Please login admin site and create a config for user {}!'.format(system_config[0].user.username)
)
return system_config[0].user.config.server_username, \
system_config[0].user.config.server_private_key_path, \
system_config[0].gpustat_path
``` |
{
"source": "37b/django-DefectDojo",
"score": 2
} |
#### File: unittests/tools/test_intsights_parser.py
```python
from django.test import TestCase
from dojo.tools.intsights.parser import IntSightsParser
from dojo.models import Engagement, Product, Test
class TestIntSightsParser(TestCase):
def get_test(self):
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
return test
def test_intsights_parser_without_file_has_no_findings(self):
parser = IntSightsParser()
findings = parser.get_findings(None, self.get_test())
self.assertEqual(0, len(findings))
def test_intsights_parser_with_no_vuln_has_no_findings(self):
testfile = open("dojo/unittests/scans/intsights/intsights_zero_vul.json")
parser = IntSightsParser()
findings = parser.get_findings(testfile, self.get_test())
testfile.close()
self.assertEqual(0, len(findings))
def test_intsights_parser_with_one_criticle_vuln_has_one_findings(self):
testfile = open("dojo/unittests/scans/intsights/intsights_one_vul.json")
parser = IntSightsParser()
findings = parser.get_findings(testfile, self.get_test())
testfile.close()
self.assertEqual(1, len(findings))
self.assertEqual("handlebars", findings[0].component_name)
self.assertEqual("4.5.2", findings[0].component_version)
def test_intsights_parser_with_many_vuln_has_many_findings(self):
testfile = open("dojo/unittests/scans/intsights/intsights_many_vul.json")
parser = IntSightsParser()
findings = parser.get_findings(testfile, self.get_test())
testfile.close()
self.assertEqual(3, len(findings))
def test_intsights_parser_empty_with_error(self):
with self.assertRaises(ValueError) as context:
testfile = open("dojo/unittests/scans/intsights/empty_with_error.json")
parser = IntSightsParser()
findings = parser.get_findings(testfile, self.get_test())
testfile.close()
self.assertTrue(
"IntSights report contains errors:" in str(context.exception)
)
self.assertTrue("ECONNREFUSED" in str(context.exception))
``` |
{
"source": "37dev/xx-node-info",
"score": 2
} |
#### File: xx-node-info/nodeinfo/models.py
```python
from django.db import models
from nodeinfo.managers import NodeInfoManager
from tgbot.handlers import static_text
from tgbot.utils import reply
class NodeInfo(models.Model):
name = models.CharField(max_length=200)
node_id = models.CharField(max_length=200)
group = models.CharField(max_length=200)
application_id = models.CharField(max_length=200)
location = models.CharField(max_length=200)
round_failure_avg = models.DecimalField(max_digits=3, decimal_places=2, null=True)
status = models.CharField(max_length=200)
team = models.CharField(max_length=200)
uptime = models.DecimalField(max_digits=3, decimal_places=2, null=True)
application_url = models.CharField(max_length=200)
subscribed_users = models.ManyToManyField('tgbot.User', related_name="subscribed_nodes")
network = models.CharField(max_length=32, null=True)
objects = NodeInfoManager()
def __str__(self):
return self.node_id
@property
def has_subscribers(self):
has_subscribers = self.subscribed_users.exists()
return has_subscribers
def is_user_subscribed(self, user):
user_already_subscribed = user.subscribed_nodes.filter(pk=self.pk).exists()
return user_already_subscribed
@classmethod
def get_node_from_context(cls, update, context):
try:
node_id = context.args[0]
network = context.args[1]
node = cls.objects.get(node_id=node_id, network=network.lower())
return node
except IndexError:
reply(update, static_text.invalid_subscription_format_text)
return None
except cls.DoesNotExist:
reply(update, static_text.node_does_not_exist_text)
return None
@staticmethod
def get_node_network():
pass
```
#### File: xx-node-info/tgbot/views.py
```python
import json
import logging
from django.views import View
from django.http import JsonResponse
from tgbot.tasks import process_telegram_event
logger = logging.getLogger(__name__)
class TelegramBotWebhookView(View):
def post(self, request, *args, **kwargs):
process_telegram_event.delay(json.loads(request.body))
return JsonResponse({"message": "ok"}, status=200)
def get(self, request):
return JsonResponse({"message": "ok"}, status=200)
``` |
{
"source": "3846chs/SNIP",
"score": 3
} |
#### File: SNIP/snip/train.py
```python
import os
import tensorflow.compat.v1 as tf
import time
import numpy as np
# np.random._bit_generator = np.random.bit_generator
from augment import augment
def train(args, model, sess, dataset):
print('|========= START TRAINING =========|')
if not os.path.isdir(args.path_summary): os.makedirs(args.path_summary)
if not os.path.isdir(args.path_model): os.makedirs(args.path_model)
saver = tf.train.Saver()
random_state = np.random.RandomState(9)
writer = {}
writer['train'] = tf.summary.FileWriter(args.path_summary + '/train', sess.graph)
writer['val'] = tf.summary.FileWriter(args.path_summary + '/val')
t_start = time.time()
best_val_loss = 100
for itr in range(args.train_iterations):
batch = dataset.get_next_batch('train', args.training_batch_size)
batch = augment(batch, args.aug_kinds, random_state)
feed_dict = {}
feed_dict.update({model.inputs[key]: batch[key] for key in ['input', 'label']})
feed_dict.update({model.compress: False, model.is_train: True, model.pruned: True})
input_tensors = [model.outputs] # always execute the graph outputs
if (itr+1) % args.check_interval == 0:
input_tensors.extend([model.summ_op, model.sparsity])
input_tensors.extend([model.train_op])
result = sess.run(input_tensors, feed_dict)
# Check on validation set.
if (itr+1) % args.check_interval == 0:
batch = dataset.get_next_batch('val', args.training_batch_size)
batch = augment(batch, args.aug_kinds, random_state)
feed_dict = {}
feed_dict.update({model.inputs[key]: batch[key] for key in ['input', 'label']})
feed_dict.update({model.compress: False, model.is_train: False, model.pruned: True})
input_tensors = [model.outputs, model.summ_op, model.sparsity]
result_val = sess.run(input_tensors, feed_dict)
# Check summary and print results
if (itr+1) % args.check_interval == 0:
writer['train'].add_summary(result[1], itr)
writer['val'].add_summary(result_val[1], itr)
pstr = '(train/val) los:{:.3f}/{:.3f} acc:{:.3f}/{:.3f} spa:{:.3f} lr:{:.7f}'.format(
result[0]['los'], result_val[0]['los'],
result[0]['acc'], result_val[0]['acc'],
result[2], result[0]['lr'],
)
print('itr{}: {} (t:{:.1f})'.format(itr+1, pstr, time.time() - t_start))
t_start = time.time()
# Save model
if best_val_loss > result_val[0]['los']:
print('save model, becase best_val_loss({:.3f}) > current_val_loss({:.3f})'.format(
best_val_loss, result_val[0]['los']
))
saver.save(sess, args.path_model + '/itr-' + str(itr))
best_val_loss = result_val[0]['los']
# # Save model
# if (itr+1) % args.save_interval == 0:
# saver.save(sess, args.path_model + '/itr-' + str(itr))
``` |
{
"source": "386jp/ytcomment_trends",
"score": 3
} |
#### File: ytcomment_trends/ytcomment_trends/entrypoint.py
```python
import argparse
import matplotlib.pyplot as plt
from .main import CommentAnalyzer
def entrypoint():
"""Entrypoint for the ytcomment_trends package
"""
parser = argparse.ArgumentParser(prog='ytcomment_trends', usage='ytcomment_trends -v pR2E2OatMTQ -k hogefuga', description='ytcomment_trends: YouTube comment trends analysis tool using oseti')
parser.add_argument('-v', '--video_id', help='YouTube video id', type=str, required=True)
parser.add_argument('-k', '--key', help='YouTube API key', type=str, required=True)
parser.add_argument('-s', '--summarized_in', help='Summarized in (W: week, D: day). Please refer to the Pandas documentation for more information. Default: W', type=str, required=False, default="W")
args = parser.parse_args()
ca = CommentAnalyzer(args.video_id, args.key)
ca_comments = ca.get_comments()
ca_analyzed = ca.get_analyzed_comments(ca_comments)
ca_summarized = ca.get_summarized_comments(ca_analyzed, summarized_in=args.summarized_in)
fig, ax1 = plt.subplots()
t = ca_summarized.keys()
color = 'tab:red'
ax1.set_xlabel('datetime comment posted')
ax1.set_ylabel('number of comments', color=color)
ax1.plot(t, [v['comments'] for v in ca_summarized.values()], color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
oseti_scores = [v['oseti_score'] for v in ca_summarized.values()]
color = 'tab:blue'
ax2.set_ylabel('negative / positive', color=color)
ax2.plot(t, oseti_scores, color=color)
ax2.tick_params(axis='y', labelcolor=color)
plt.ylim(-1.2, 1.2)
plt.title("YouTube Video Comment Trends for " + args.video_id)
plt.grid(True)
fig.tight_layout()
plt.show()
if __name__ == "__main__":
entrypoint()
``` |
{
"source": "38b394ce01/grammarinator",
"score": 2
} |
#### File: grammarinator/grammarinator/cli.py
```python
import logging
import os
logger = logging.getLogger('grammarinator')
def init_logging():
logging.basicConfig(format='%(message)s')
def add_jobs_argument(parser):
parser.add_argument('-j', '--jobs', metavar='NUM', type=int, default=os.cpu_count(),
help='parallelization level (default: number of cpu cores (%(default)d)).')
def add_disable_cleanup_argument(parser):
parser.add_argument('--disable-cleanup', dest='cleanup', default=True, action='store_false',
help='disable the removal of intermediate files.')
```
#### File: tests/grammars/CustomSubclassGenerator.py
```python
from grammarinator.runtime import *
from CustomGenerator import CustomGenerator
class CustomSubclassGenerator(CustomGenerator):
def tagname(self, parent=None):
current = UnparserRule(name='tagname', parent=parent)
UnlexerRule(src='customtag', parent=current)
return current
def _custom_lexer_content(self, parent=None):
return UnlexerRule(src='custom content', parent=parent)
```
#### File: tests/grammars/SuperGenerator.py
```python
from grammarinator.runtime import *
class SuperGenerator(Generator):
def inheritedRule(self, parent=None):
return UnlexerRule(src='I was inherited.', parent=parent)
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.