seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
618407604
|
from PIL import Image
import os,sys
import shutil
if(len(sys.argv)==1):
print("ファイルが存在しないよ!")
input("ボタンを押してね!")
sys.exit()
count=0
for i in sys.argv[1:]:
count=count+1
print(str(count)+"枚目は「"+i+"」")
file=i
if(os.path.exists(os.path.dirname(file)+"\Resize")==False):
os.mkdir(os.path.dirname(file)+"\Resize")
img=Image.open(file)
print("キロバイトになるまでリサイズしています・・・")
while(1):
img_resize = img.resize((int(img.width/2), int(img.height/2)))
img_resize.save(file.replace(".png","")+"_1.png",quality=95)
if(int(str(os.path.getsize(file))[:3]) <= 1000):
break
print("クオリティーを下げています・・・")
image=Image.open(file.replace(".png","")+"_1.png")
image.save(file.replace(".png","")+"_2.png",quality=60)
print("JPEG化しています・・・")
image2=Image.open(file.replace(".png","")+"_2.png")
image.load()
background = Image.new("RGB", image2.size, (255, 255, 255))
background.paste(image2, mask=image2.split()[3])
name=file.replace(".png","")+"_軽量化.jpg"
background.save(name,"JPEG")
os.remove(file.replace(".png","")+"_1.png")
os.remove(file.replace(".png","")+"_2.png")
print(os.path.dirname(file)+"\Resize\\"+os.path.basename(name))
if(os.path.exists(os.path.dirname(file)+"\Resize\\"+os.path.basename(name))==False):
shutil.move(name,os.path.dirname(file)+"\Resize")
else:
os.remove(os.path.dirname(file)+"\Resize\\"+os.path.basename(name))
shutil.move(name,os.path.dirname(file)+"\Resize")
print(str(count)+"枚目完了!\n")
input("完了!\nボタンを押してね!\n")
| null |
ワンクリック軽量.py
|
ワンクリック軽量.py
|
py
| 1,685 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.argv",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.path.getsize",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "os.remove",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
}
] |
167508745
|
"""empty message
Revision ID: 26bec75584e5
Revises: bc5e7cba581f
Create Date: 2018-04-03 13:50:26.021794
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '26bec75584e5'
down_revision = 'bc5e7cba581f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('adminlog',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('admin_id', sa.Integer(), nullable=True),
sa.Column('ip', sa.String(length=100), nullable=True),
sa.Column('addtime', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['admin_id'], ['admin.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_adminlog_addtime'), 'adminlog', ['addtime'], unique=False)
op.create_table('userlog',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('ip', sa.String(length=100), nullable=True),
sa.Column('addtime', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_userlog_addtime'), 'userlog', ['addtime'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_userlog_addtime'), table_name='userlog')
op.drop_table('userlog')
op.drop_index(op.f('ix_adminlog_addtime'), table_name='adminlog')
op.drop_table('adminlog')
# ### end Alembic commands ###
| null |
migrations/versions/26bec75584e5_.py
|
26bec75584e5_.py
|
py
| 1,608 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKeyConstraint",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.PrimaryKeyConstraint",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "alembic.op.create_index",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "alembic.op.create_table",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKeyConstraint",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.PrimaryKeyConstraint",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "alembic.op.create_index",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_index",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_index",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "alembic.op.f",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 47,
"usage_type": "name"
}
] |
304524391
|
from django.shortcuts import render
from django.template.loader import get_template
from django.http import HttpResponse
from .models import Expentry,Expcategory
import logging
logger = logging.getLogger('application')
# Create your views here.
def expenses_index(request):
entries = Expentry.objects.all()
sorted_cats = get_sorted_cats()
t = get_template('expenses/index.html')
return HttpResponse(t.render({'expenses': entries,
'cats': sorted_cats},
request))
def create_category(entry):
if entry["parent"] == "":
c = Expcategory(cat_name = entry["cat_name"],
cat_parent = None)
c.save()
else:
p = Expcategory.objects.filter(cat_name = entry["parent"])
if not p == None:
c = Expcategory(cat_name = entry["cat_name"],
cat_parent = p)
c.save()
else:
pc = Expcategory(cat_name = entry["parent"],
cat_parent = None)
pc.save()
c = Expcategory(cat_name = entry["cat_name"],
cat_parent = pc)
c.save()
def get_sorted_cats():
categories = list(Expcategory.objects.all())
sorted_cats = {}
for c in categories:
if c.cat_name in sorted_cats: ## it is a top level item and registered
continue
if c.cat_parent == None: ## it is a top level item and not registered
sorted_cats[c.cat_name] = {
"cname": c.cat_name,
"cid": c.id,
"children": []
}
else: ## it is not top level item
if c.cat_parent.cat_name in sorted_cats: ## its parent is registered
sorted_cats[c.cat_parent.cat_name]["children"].append({
"cid": c.id,
"cname": c.cat_name
})
else: ## its parent is not registered
sorted_cats[c.cat_parent.cat_name] = {
"cname": c.cat_name,
"cid": c.id,
"children": []
}
sorted_cats[c.cat_parent.cat_name]["children"].append({
"cid": c.id,
"cname": c.cat_name
})
return sorted_cats
| null |
expenses/views.py
|
views.py
|
py
| 2,365 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.Expentry.objects.all",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.Expentry.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.Expentry",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.template.loader.get_template",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.Expcategory",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "models.Expcategory.objects.filter",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "models.Expcategory.objects",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "models.Expcategory",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "models.Expcategory",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "models.Expcategory",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "models.Expcategory",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "models.Expcategory.objects.all",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "models.Expcategory.objects",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "models.Expcategory",
"line_number": 40,
"usage_type": "name"
}
] |
524448802
|
#run the code in 虛擬環境
#在指定頁面進行爬蟲
#database
import codecs
import mysql.connector
import time
import serial
#spider
import requests
from bs4 import BeautifulSoup
import serial
from time import sleep
mydb = mysql.connector.connect(
host = "localhost", # Database IP address
user = "# Database username",
passwd = "# Database password",
database = "# Database name"
)
no=1
data=[]#declare a list
temp=""#溫度
rain_chance=""#降雨機率
humidity=""#濕度
ray=""#紫外線
air=""#空氣品質
wind_speed=""#風速
def get_web_content():
page=requests.get('https://weather.yam.com/%E6%A5%A0%E6%A2%93%E5%8D%80/%E9%AB%98%E9%9B%84%E5%B8%82')#goverment website cannot get the info. we want
#print(page.status_code)#check, if output=200,then success
return page.text#HTML source code
def get_info_weather():
soup=BeautifulSoup(get_web_content(),'html.parser')
#print(soup.text) print content we get and without HTML tags
tmp=soup.find('div',class_='detail').find_all('p','')#type:bs4.element.ResultSet
global data# will modify the global variable
global temp
global rain_chance
global humidity
global ray
global air
global wind_speed
for i in tmp:
data.append(str(i))#cannot write "global data.append()"
#for d in data:
#print(d)
temp=data[0][3:7]+":"+data[0][10:-4]
rain_chance=data[1][3:7]+":"+data[1][10:-4]
humidity=data[2][3:7]+":"+data[2][10:-4]
temp_ray=data[3][3:6]+":"+data[3][9:-10]+"-"+data[3][15:-5]
air=data[4][3:7]+":"+data[4][10:-4]
wind_speed=data[5][3:5]+":"+data[5][8:-4]
#print(temp) 體感溫度 : 14℃
#print(rain_chance) 降雨機率 : 20%
#print(humidity) 相對濕度 : 86%
#print(ray) 紫外線 : 0 (低量級)
#print(air) 空氣品質 : 普通
#print(wind_speed) 風速 : 5km/h
ray=change_ray(temp_ray)
def change_ray(tmp):
r = ""
for str in tmp:
for uchar in str:
inside_code = ord(uchar)
if inside_code == 12288:
inside_code = 32
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
r+=chr(inside_code)
return r
#database
def connect_DB():
mycursor = mydb.cursor()#execute up instruction
#mycursor.execute("INSERT INTO data (no,temperature,rain_chance,humidity,ray,air,wind_speed) VALUES('"+ str(no) + "','""','""','""','""','""','""')")
def input_DB():
mycursor = mydb.cursor()#execute up instruction
#Insert data to table
mycursor.execute("UPDATE data SET temperature='"+temp+"',rain_chance='"+rain_chance+"',humidity='"+humidity+"',ray='"+ray+"',air='"+air+"',wind_speed='"+wind_speed+"'WHERE no=1")
mydb.commit() # table内容有更新,必須使用此句
print("Insert data compeleted.\n")
connect_DB()
while True:
get_info_weather()
input_DB()
time.sleep(60)
| null |
ES/Python/Crawler.py
|
Crawler.py
|
py
| 3,038 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "mysql.connector.connector.connect",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 100,
"usage_type": "call"
}
] |
519058921
|
#!/usr/bin/python3
'''
state view module
'''
from flask import jsonify, abort, request
from models import storage, classes
from api.v1.views import app_views
@app_views.route('/states', methods=['GET'])
def all_states():
'''
function returns all state objects
'''
return jsonify([v.to_dict() for k, v in storage.all('State').items()])
@app_views.route('/states/<state_id>', methods=['GET'])
def single_state(state_id):
'''
function returns state object given its id
'''
for k, v in storage.all('State').items():
if v.id == state_id:
return jsonify(v.to_dict())
else:
abort(404)
@app_views.route('/states/<state_id>', methods=['DELETE'])
def delete_state(state_id):
'''
function deletes an object given its id
'''
remove = storage.get("State", state_id)
if remove is not None:
storage.delete(remove)
storage.save()
return jsonify({}), 200
abort(404)
@app_views.route('/states', methods=['POST'])
def post_state():
'''
function creates a new state object
'''
data = request.get_json()
if data is None:
return jsonify({'error': 'Not a JSON'}), 400
elif 'name' not in data:
return jsonify({'error': 'Missing name'}), 400
else:
new_inst = classes["State"]()
setattr(new_inst, 'name', data['name'])
new_inst.save()
return jsonify(new_inst.to_dict()), 201
@app_views.route('/states/<state_id>', methods=['PUT'])
def put_state(state_id):
'''
function updates a state object given its id
'''
data = request.get_json()
state = storage.get("State", state_id)
if state is None:
abort(404)
if data is None:
return jsonify({'error': 'Not a JSON'}), 400
for k, v in data.items():
if k != 'id' and k != 'created_at' and k != 'updated_at':
setattr(state, k, v)
state.save()
return jsonify(state.to_dict()), 200
| null |
AirBnB_clone_v3/api/v1/views/states.py
|
states.py
|
py
| 1,968 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.jsonify",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.storage.all",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "api.v1.views.app_views.route",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "api.v1.views.app_views",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "models.storage.all",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "api.v1.views.app_views.route",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "api.v1.views.app_views",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "models.storage.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "models.storage.delete",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "models.storage.save",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "api.v1.views.app_views.route",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "api.v1.views.app_views",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "flask.request.get_json",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "models.classes",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "api.v1.views.app_views.route",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "api.v1.views.app_views",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "flask.request.get_json",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "models.storage.get",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "api.v1.views.app_views.route",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "api.v1.views.app_views",
"line_number": 60,
"usage_type": "name"
}
] |
323868325
|
import skvideo.io
from skvideo.io import FFmpegWriter
import cv2
import numpy as np
if __name__ == '__main__':
model_A = "faster_rcnn_inception_v2_coco_2017_11_08"
model_B = "rc1_180k"
video_names = ["bike2.mp4", "bus_motorbike.mp4", "cat_dog.mp4", "suitcase_person.mp4", "truck_car.mp4"]
for video_name in video_names[1:]:
cap = cv2.VideoCapture
input_path_A = "/root/data/testing_videos/res/{}_{}".format(model_A, video_name)
input_path_B = "/root/data/testing_videos/res/{}_{}".format(model_B, video_name)
input_video_A = cv2.VideoCapture(input_path_A)
input_video_B = cv2.VideoCapture(input_path_B)
OUTPUT_PATH = "/root/data/testing_videos/res/{}_{}".format("compare", video_name)
output_video = FFmpegWriter(OUTPUT_PATH)
while True:
_, input_frame_A = input_video_A.read()
if input_frame_A is None:
break
_, input_frame_B = input_video_B.read()
#for i, input_frame_A in enumerate(input_video_A):
#input_frame_B = input_video_B[i]
height, width, _ = input_frame_A.shape
output_frame = np.zeros((height*2, width, 3), dtype=np.uint8)
output_frame[:height,:,:] = cv2.cvtColor(input_frame_A, cv2.COLOR_BGR2RGB)
output_frame[height:,:,:] = cv2.cvtColor(input_frame_B, cv2.COLOR_BGR2RGB)
output_video.writeFrame(output_frame)
output_video.close()
print(OUTPUT_PATH)
| null |
research/object_detection/video_merger.py
|
video_merger.py
|
py
| 1,566 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.VideoCapture",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "skvideo.io.FFmpegWriter",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 34,
"usage_type": "attribute"
}
] |
619422657
|
import cv2
import serial
print("Versão do OpenCV:", cv2.__version__)
classificador = cv2.CascadeClassifier(
"cascades/haarcascade_frontalface_default.xml")
webCam = cv2.VideoCapture(1)
porta = 'COM3' # linux ou mac em geral -> '/dev/ttyS0'
velocidadeBaud = 115200
#ligarArduino = False
ligarArduino = True
if ligarArduino:
SerialArduino = serial.Serial(porta,velocidadeBaud)#, timeout = 0.2)
while(True):
conectou, imagem = webCam.read()
imagem = cv2.flip(imagem, 1) # inverte imagem (opcional)
alturaImagem, larguraImagem = imagem.shape[:2]
converteuCinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)
encontrarFaces = classificador.detectMultiScale(converteuCinza,
scaleFactor=1.5,
minSize=(150,150),
maxSize=(200,200))
cor = (0,0,255)
for(origemX, origemY, largura, altura) in encontrarFaces:
cv2.rectangle(imagem,(origemX,origemY),
(origemX + largura, origemY + altura),
cor,2)
#print("Largura", largura, "Altura", altura)
raio = 4
centroRosto = (origemX + int(largura/2),origemY + int(altura/2))
cv2.circle(imagem, centroRosto, raio, cor)
# Normalizar = deixa valores entre zero até um
normalizarZeroAteUm = int(larguraImagem/2)
# Correção = transforma valores para 1 até 10
fatorDeCorrecao = 10
erroCentro = (((centroRosto[0] - (larguraImagem/2))
/normalizarZeroAteUm) * fatorDeCorrecao)
print(erroCentro)
#erroCentro = int(erroCentro)
try:
if ligarArduino:
SerialArduino.write(('servo' + str(erroCentro) + '\n').encode())
except:
print("não enviou")
# desenha linha central
cv2.line(imagem,(int(larguraImagem/2),0),
(int(larguraImagem/2),alturaImagem),
cor, 2)
cv2.imshow("Rosto", imagem)
teclou = cv2.waitKey(1) & 0xFF
if teclou == ord('q') or teclou == 27: # se apertar q ou ESC
if ligarArduino:
SerialArduino.close()
break
webCam.release()
cv2.destroyAllWindows()
| null |
enxergar/POCs/video3_2_arduino/assistenteMilGrauVisaoDetectarArduino.py
|
assistenteMilGrauVisaoDetectarArduino.py
|
py
| 2,311 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.__version__",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "serial.Serial",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.flip",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 71,
"usage_type": "call"
}
] |
371462863
|
from discord.ext import commands, menus
import utils
import random , discord, os, importlib, mystbin, typing, aioimgur, functools, tweepy
import traceback, textwrap
from discord.ext.menus.views import ViewMenuPages
class Owner(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(brief="a command to send mail")
async def mail(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.reply("User not found, returning Letter")
user = ctx.author
if user:
await ctx.reply("Please give me a message to use.")
message = await self.bot.wait_for("message",check = utils.check(ctx))
embed_message = discord.Embed(title=message.content, timestamp=(message.created_at), color=random.randint(0, 16777215))
embed_message.set_author(name=f"Mail from: {ctx.author}",icon_url=(ctx.author.display_avatar.url))
embed_message.set_footer(text = f"{ctx.author.id}")
embed_message.set_thumbnail(url = "https://i.imgur.com/1XvDnqC.png")
if (user.dm_channel is None):
await user.create_dm()
try:
await user.send(embed=embed_message)
except:
user = ctx.author
await user.send(content="Message failed. sending",embed=embed_message)
embed_message.add_field(name="Sent To:",value=str(user))
await self.bot.get_channel(855217084710912050).send(embed=embed_message)
@commands.command()
async def load(self, ctx, *, cog = None):
if cog:
try:
self.bot.load_extension(cog)
except Exception as e:
await ctx.send(e)
traceback.print_exc()
await ctx.send("Loaded cog(see if there's any errors)")
if cog is None:
await ctx.send("you can't ask to load no cogs.")
@commands.command()
async def reload(self, ctx, *, cog = None):
cog = cog or "all"
if cog == "all":
for x in list(self.bot.extensions):
try:
self.bot.reload_extension(x)
except commands.errors.ExtensionError as e:
await ctx.send(e)
traceback.print_exc()
await ctx.send("done reloading all cogs(check for any errors)")
else:
try:
self.bot.reload_extension(cog)
except commands.errors.ExtensionError as e:
await ctx.send(e)
traceback.print_exc()
await ctx.send("Cog reloaded :D (check for any errors)")
@commands.command()
async def unload(self, ctx, *, cog = None):
if cog:
try:
self.bot.unload_extension(cog)
except commands.errors.ExtensionError as e:
await ctx.send(e)
traceback.print_exc()
await ctx.send("Cog should be unloaded just fine :D.(check any errors)")
if cog is None:
await ctx.send("you can't ask to reload no cogs")
@commands.command()
async def shutdown(self, ctx):
await ctx.send("shutdown/logout time happening.")
await self.bot.close()
async def cog_check(self, ctx):
return await self.bot.is_owner(ctx.author)
async def cog_command_error(self, ctx, error):
if ctx.command or not ctx.command.has_error_handler():
await ctx.send(error)
traceback.print_exc()
#I need to fix all cog_command_error
@commands.command(brief="Changes Bot Status(Owner Only)")
async def status(self , ctx , * , args=None):
if await self.bot.is_owner(ctx.author):
if args:
await self.bot.change_presence(status=discord.Status.do_not_disturb, activity= discord.Activity(type=discord.ActivityType.watching,name=args))
if args is None:
await self.bot.change_presence(status=discord.Status.do_not_disturb)
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("That's an owner only command")
@commands.command(brief="Only owner command to change bot's nickname")
async def change_nick(self, ctx ,*, name=None):
if await self.bot.is_owner(ctx.author):
if isinstance(ctx.channel, discord.TextChannel):
await ctx.send("Changing Nickname")
try:
await ctx.guild.me.edit(nick=name)
except discord.Forbidden:
await ctx.send("Appears not to have valid perms")
if isinstance(ctx.channel,discord.DMChannel):
await ctx.send("You can't use that in Dms.")
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("You can't use that command")
class ServersEmbed(menus.ListPageSource):
async def format_page(self, menu, item):
embed = discord.Embed(title="Servers:",description=item,color=random.randint(0, 16777215))
return embed
@commands.command(brief="a command to give a list of servers(owner only)",help="Gives a list of guilds(Bot Owners only)")
async def servers(self, ctx):
if await self.bot.is_owner(ctx.author):
pag = commands.Paginator()
for g in self.bot.guilds:
pag.add_line(f"[{len(g.members)}/{g.member_count}] **{g.name}** (`{g.id}`) | {(g.system_channel or g.text_channels[0]).mention}")
pages = [page.strip("`") for page in pag.pages]
menu = ViewMenuPages(self.ServersEmbed(pages, per_page=1),delete_message_after=True)
if (ctx.author.dm_channel is None):
await ctx.author.create_dm()
await menu.start(ctx, channel = ctx.author.dm_channel)
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("You can't use that it's owner only")
@commands.command(brief="only works with JDJG, but this command is meant to send updates to my webhook")
async def webhook_update(self, ctx, *, args = None):
if await self.bot.is_owner(ctx.author):
if args:
if isinstance(ctx.channel, discord.TextChannel):
try:
await ctx.message.delete()
except:
await ctx.send("It couldn't delete the message in this guils so, I kept it here.")
webhook = discord.Webhook.from_url(os.environ["webhook1"], session = self.bot.session)
embed=discord.Embed(title="Update",color=(35056),timestamp=(ctx.message.created_at))
embed.add_field(name="Update Info:",value=args)
embed.set_author(name="JDJG's Update",icon_url='https://i.imgur.com/pdQkCBv.png')
embed.set_footer(text="JDJG's Updates")
await webhook.send(embed=embed)
webhook=discord.Webhook.from_url(os.environ["webhook99"], session = self.bot.session)
embed=discord.Embed(title="Update",color=(35056),timestamp=(ctx.message.created_at))
embed.add_field(name="Update Info:",value=args)
embed.set_author(name="JDJG's Update",icon_url='https://i.imgur.com/pdQkCBv.png')
embed.set_footer(text="JDJG's Updates")
await webhook.send(embed=embed)
if args is None:
await ctx.send("You sadly can't use it like that.")
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("You can't use that")
@commands.command(brief="Commands to see what guilds a person is in.")
async def mutualguilds(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
pag = commands.Paginator()
for g in user.mutual_guilds:
pag.add_line(f"{g}")
pages = [page.strip("`") for page in pag.pages]
pages = pages or ["No shared servers"]
menu = ViewMenuPages(utils.mutualGuildsEmbed(pages, per_page=1),delete_message_after = True)
if (ctx.author.dm_channel is None):
await ctx.author.create_dm()
await menu.start(ctx, channel = ctx.author.dm_channel)
@commands.command(brief="A command to add sus_users with a reason")
async def addsus(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.send("can't have a user be none.")
if user:
await ctx.reply("Please give me a reason why:")
reason = await self.bot.wait_for("message",check= utils.check(ctx))
cur = await self.bot.sus_users.cursor()
await cur.execute("INSERT INTO sus_users VALUES (?, ?)", (user.id, reason.content))
await self.bot.sus_users.commit()
await cur.close()
await ctx.send("added sus users, succesfully")
@commands.command(brief="a command to remove sus users.")
async def removesus(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.send("You can't have a none user.")
if user:
cur = await self.bot.sus_users.cursor()
await cur.execute("DELETE FROM sus_users WHERE user_id = ?", (user.id,))
await self.bot.sus_users.commit()
await cur.close()
await ctx.send("Removed sus users.")
class SusUsersEmbed(menus.ListPageSource):
async def format_page(self, menu, item):
embed=discord.Embed(title = "Users Deemed Suspicious by JDJG Inc. Official", color = random.randint(0, 16777215))
embed.add_field(name = f"User ID : {item[0]}", value = f"**Reason :** {item[1]}", inline = False)
return embed
@commands.command(brief="a command to grab all in the sus_users list")
async def sus_users(self, ctx):
cur = await self.bot.sus_users.cursor()
cursor = await cur.execute("SELECT * FROM SUS_USERS;")
sus_users = tuple(await cursor.fetchall())
await cur.close()
await self.bot.sus_users.commit()
menu = ViewMenuPages(self.SusUsersEmbed(sus_users, per_page=1),delete_message_after=True)
await menu.start(ctx)
@sus_users.error
async def sus_users_error(self, ctx, error):
await ctx.send(error)
class TestersEmbed(menus.ListPageSource):
async def format_page(self, menu, item):
embed = discord.Embed(title = "Testing Users:", color = random.randint(0, 16777215))
embed.add_field(name = "User ID:", value = f"{item}", inline = False)
return embed
@commands.command(brief = "a command listed all the commands")
async def testers(self, ctx):
menu = ViewMenuPages(self.TestersEmbed(self.bot.testers, per_page = 1), delete_message_after = True)
await menu.start(ctx)
@commands.command()
async def update_sus(self, ctx):
await self.bot.sus_users.commit()
await ctx.send("Updated SQL boss.")
@update_sus.error
async def update_sus_error(self, ctx, error):
await ctx.send(error)
@commands.command(aliases=["bypass_command"])
async def command_bypass(self, ctx ,user: utils.BetterUserconverter = None, *, command = None):
#make sure to swap to autoconverter if it gets added.
user = user or ctx.author
if command:
command_wanted=self.bot.get_command(command)
if command_wanted:
await ctx.send(f"{command_wanted.name} now accessible for the {user} for one command usage!")
self.bot.special_access[user.id]=command_wanted.name
if command_wanted is None:
await ctx.send("Please specify a valid command.")
if command is None:
await ctx.send("select a command :(")
@commands.command(brief = "resets cooldown for you.",aliases = ["reset_cooldown"])
async def resetcooldown(self, ctx, *, command = None):
if not command:
return await ctx.send("please specificy a command")
command_wanted = self.bot.get_command(command)
if not command_wanted:
return await ctx.send("please specify a command")
if not command_wanted.is_on_cooldown(ctx):
return await ctx.send("That doesn't have a cooldown/isn't on a cooldown.")
command_wanted.reset_cooldown(ctx)
await ctx.send(f"reset cooldown of {command_wanted}")
@commands.command(brief = "leaves a guild only use when needed or really wanted. Otherwise no thanks.")
async def leave_guild(self, ctx, *, guild: typing.Optional[discord.Guild] = None):
guild = guild or ctx.guild
if guild is None: return await ctx.send("Guild is None can't do anything.")
await ctx.send("Bot leaving guild :(")
try:
await guild.leave()
except Exception as e:
await ctx.send(f"Somehow an error occured: {e}")
traceback.print_exc()
@commands.command()
async def aioinput_test(self, ctx, *, args = None):
args = args or "Test"
result=await self.bot.loop.run_in_executor(None, input, (f"{args}:"))
await ctx.send(f"Result of the input was {result}")
@commands.command(brief="a powerful owner tool to reload local files that aren't reloadable.")
async def reload_basic(self, ctx, *, args = None):
if args is None:await ctx.send("Can't reload module named None")
if args:
try: module = importlib.import_module(name=args)
except Exception as e:
traceback.print_exc()
return await ctx.send(e)
try: value=importlib.reload(module)
except Exception as e:
traceback.print_exc()
return await ctx.send(e)
await ctx.send(f"Sucessfully reloaded {value.__name__} \nMain Package: {value.__package__}")
@commands.command(brief="backs up a channel and then sends it into a file or mystbin")
async def channel_backup(self, ctx):
messages = await ctx.channel.history(limit = None, oldest_first = True).flatten()
new_line = "\n"
page = "\n".join(f"{msg.author} ({('Bot' if msg.author.bot else 'User')}) : {msg.content} {new_line}Attachments : {msg.attachments}" if msg.content else f"{msg.author} ({('Bot' if msg.author.bot else 'User')}) : {new_line.join(f'{e.to_dict()}' for e in msg.embeds)} {new_line}Attachments : {msg.attachments}" for msg in messages)
mystbin_client = mystbin.Client(session = self.bot.session)
paste = await mystbin_client.post(page)
await ctx.author.send(content=f"Added text file to mystbin: \n{paste.url}")
@channel_backup.error
async def channel_backup_error(self, ctx, error):
etype = type(error)
trace = error.__traceback__
values=''.join(map(str,traceback.format_exception(etype, error, trace)))
pages = textwrap.wrap(values, width = 1992)
menu = ViewMenuPages(utils.ErrorEmbed(pages, per_page = 1),delete_message_after = True)
if (ctx.author.dm_channel is None):
await ctx.author.create_dm()
await menu.start(ctx, channel = ctx.author.dm_channel)
mystbin_client = mystbin.Client(session=self.bot.session)
paste = await mystbin_client.post(values)
await ctx.send(f"Traceback: {paste.url}")
@commands.command(brief = "adds packages and urls to rtfm DB", aliases=["add_rtfm"])
async def addrtfm(self, ctx, name = None, *, url = None):
if not name or not url or not name and not url:
return await ctx.send("You need a name and also url.")
cur = await self.bot.sus_users.cursor()
await cur.execute("INSERT INTO RTFM_DICTIONARY VALUES (?, ?)", (name, url))
await self.bot.sus_users.commit()
await cur.close()
await ctx.send(f"added {name} and {url} to the rtfm DB")
@commands.command(brief = "removes packages from the rtfm DB", aliases = ["remove_rtfm"])
async def removertfm(self, ctx, *, name = None):
if name is None:
return await ctx.send("You can't remove None")
cur = await self.bot.sus_users.cursor()
await cur.execute("DELETE FROM RTFM_DICTIONARY WHERE name = ?", (name,))
await self.bot.sus_users.commit()
await cur.close()
await ctx.send(f"Removed the rfm value {name}.")
@commands.command(brief = "a command to save images to imgur(for owner only lol)")
async def save_image(self, ctx):
if not ctx.message.attachments:
return await ctx.send("You need to provide some attachments.")
await ctx.send("JDJG doesn't take any responbility for what you upload here :eyes: don't upload anything bad okay?")
for x in ctx.message.attachments:
try:
discord.utils._get_mime_type_for_image(await x.read())
except Exception as e:
traceback.print_exc()
return await ctx.send(e)
imgur_client= aioimgur.ImgurClient(os.environ["imgur_id"], os.environ["imgur_secret"])
imgur_url = await imgur_client.upload(await x.read())
await ctx.send(f"{imgur_url['link']}")
@commands.command(brief="A command to remove testers")
async def remove_tester(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.send("You can't have a non existent user.")
if user:
cur = await self.bot.sus_users.cursor()
await cur.execute("DELETE FROM testers_list WHERE user_id = ?", (user.id,))
await self.bot.sus_users.commit()
await cur.close()
if not user.id in self.bot.testers:
return await ctx.send(f"{user} isn't in the testers list.")
else:
self.bot.testers.remove(user.id)
await ctx.send(f"Removed tester known as {user}")
@commands.command(brief="A command to add testers")
async def add_tester(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.send("You can't have a non existent user.")
if user:
cur = await self.bot.sus_users.cursor()
await cur.execute("INSERT INTO testers_list VALUES (?)", (user.id,))
await self.bot.sus_users.commit()
await cur.close()
if not user.id in self.bot.testers:
self.bot.testers.append(user.id)
await ctx.send(f"added tester known as {user}")
else:
return await ctx.send(f"{user} is in the testers list already!")
def tweepy_post(self, post_text = None):
consumer_key = os.getenv('tweet_key')
consumer_secret = os.getenv('tweet_secret')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
access_token = os.getenv('tweet_access')
access_secret = os.getenv('tweet_token')
auth.set_access_token(access_token, access_secret)
twitter_api = tweepy.API(auth)
return twitter_api.update_status(status = post_text)
@commands.command(brief = "sends tweet to JDBot Twitter")
async def send_tweet(self, ctx, *, args = None):
if not args:
return await ctx.send("you can't send nothing to twitter.")
try:
tweet_time = functools.partial(self.tweepy_post, args)
post = await self.bot.loop.run_in_executor(None, tweet_time)
except Exception as e:
traceback.print_exc()
return await ctx.send(f"Exception occured at {e}")
await ctx.send(f"Url of sent tweet is: https://twitter.com/twitter/statuses/{post.id}")
@commands.command(brief = "chunks a guild for the purpose of testing purpose(it's owner only to be used in testing guilds only)")
async def chunk_guild(self, ctx):
if ctx.guild is None:
return await ctx.send("You can't chunk a guild that doesn't exist or a channel that is a DM.")
if ctx.guild.chunked:
return await ctx.send("No need to chunk this guild, it appears to be chunked")
await ctx.guild.chunk(cache = True)
await ctx.send("Finished chunking..")
@chunk_guild.error
async def chunk_guild_error(self, ctx, error):
await ctx.send(error)
traceback.print_exc()
@commands.command(brief = "displays the guild status and user status immediately")
async def stats_status(self, ctx):
await ctx.send("changing status, check now....")
await self.bot.change_presence(status=discord.Status.online, activity=discord.Activity(type=discord.ActivityType.watching, name=f"{len(self.bot.guilds)} servers | {len(self.bot.users)} users"))
@stats_status.error
async def stats_status_error(self, ctx, error):
await ctx.send(error)
@commands.command(brief="a command to give a list of servers(owner only)",help="Gives a list of guilds(Bot Owners only) but with join dates updated.")
async def servers2(self, ctx):
if await self.bot.is_owner(ctx.author):
sorted_guilds = sorted(self.bot.guilds, key=lambda guild: guild.me.joined_at)
pag = commands.Paginator()
for g in sorted_guilds:
pag.add_line(f"{discord.utils.format_dt(g.me.joined_at, style = 'd')} {discord.utils.format_dt(g.me.joined_at, style = 'T')} \n[{len(g.members)}/{g.member_count}] **{g.name}** (`{g.id}`) | {(g.system_channel or g.text_channels[0]).mention}\n")
pages = [page.strip("`") for page in pag.pages]
menu = ViewMenuPages(self.ServersEmbed(pages, per_page=1),delete_message_after=True)
if (ctx.author.dm_channel is None):
await ctx.author.create_dm()
await menu.start(ctx, channel = ctx.author.dm_channel)
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("You can't use that it's owner only")
def setup(bot):
bot.add_cog(Owner(bot))
| null |
cogs/owner.py
|
owner.py
|
py
| 20,373 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "discord.ext.commands.Cog",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "utils.BetterUserconverter",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "utils.check",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "traceback.print_exc",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.errors",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "traceback.print_exc",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.errors",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "traceback.print_exc",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.errors",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "traceback.print_exc",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "traceback.print_exc",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "discord.Status",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "discord.Activity",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "discord.ActivityType",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "discord.Status",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "discord.TextChannel",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "discord.Forbidden",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "discord.DMChannel",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "discord.ext.menus.ListPageSource",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.menus",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Paginator",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "discord.ext.menus.views.ViewMenuPages",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "discord.TextChannel",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "discord.Webhook.from_url",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "discord.Webhook",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "discord.Webhook.from_url",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "discord.Webhook",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "utils.BetterUserconverter",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Paginator",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "discord.ext.menus.views.ViewMenuPages",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "utils.mutualGuildsEmbed",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "utils.BetterUserconverter",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "utils.check",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "utils.BetterUserconverter",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "discord.ext.menus.ListPageSource",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.menus",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "discord.ext.menus.views.ViewMenuPages",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "discord.ext.menus.ListPageSource",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.menus",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "discord.ext.menus.views.ViewMenuPages",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "utils.BetterUserconverter",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "discord.Guild",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "traceback.print_exc",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "importlib.import_module",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "importlib.reload",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "mystbin.Client",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 333,
"usage_type": "name"
},
{
"api_name": "traceback.format_exception",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "textwrap.wrap",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "discord.ext.menus.views.ViewMenuPages",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "utils.ErrorEmbed",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "mystbin.Client",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 368,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "discord.utils._get_mime_type_for_image",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 400,
"usage_type": "attribute"
},
{
"api_name": "traceback.print_exc",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "aioimgur.ImgurClient",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 407,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 391,
"usage_type": "name"
},
{
"api_name": "utils.BetterUserconverter",
"line_number": 413,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "utils.BetterUserconverter",
"line_number": 431,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 430,
"usage_type": "name"
},
{
"api_name": "os.getenv",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "tweepy.OAuthHandler",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 464,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 480,
"usage_type": "name"
},
{
"api_name": "traceback.print_exc",
"line_number": 495,
"usage_type": "call"
},
{
"api_name": "discord.Status",
"line_number": 502,
"usage_type": "attribute"
},
{
"api_name": "discord.Activity",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "discord.ActivityType",
"line_number": 502,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 498,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Paginator",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 515,
"usage_type": "name"
},
{
"api_name": "discord.utils.format_dt",
"line_number": 517,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 517,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.menus.views.ViewMenuPages",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 509,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 509,
"usage_type": "name"
}
] |
522490840
|
# coding: utf-8
import scrapy
from urllib.parse import urlparse, parse_qs
class AllStocks(scrapy.Spider):
name = "allStocks"
def start_requests(self):
pages = 464
urls = []
for i in range(0, 464):
x = i + 1
url = 'http://stock.finance.sina.com.cn/usstock/api/jsonp.php//US_CategoryService.getList?page='+str(x)+'&num=20&sort=&asc=0&market=&id='
urls.append(url)
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
num = str(parse_qs(urlparse(response.url).query)['page'][0])
filename = 'allStocks-%s.json' % num
data = str(response.body, 'gbk')
data = data[2:-3]
open(filename, 'w').write(data)
# 60k url: https://stock.finance.sina.com.cn/usstock/api/jsonp_v2.php/var%20_MSFT_60_1548714405787=/US_MinKService.getMinK?symbol=MSFT&type=60&___qn=3
| null |
tutorial/tutorial/spiders/allStock.py
|
allStock.py
|
py
| 951 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "scrapy.Spider",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse_qs",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlparse",
"line_number": 21,
"usage_type": "call"
}
] |
530887272
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import httplib
import urllib
import json
import os
tempData = {
"data": [
{
"TOTAL_CAL": "825",
"MENU": [
{
"LUNCH_MENU_NM": "뚝배기어묵탕*어묵사리",
"LUNCH_MENU_ESTIMATION": 0,
"LUNCH_ROOM_NM": "가마"
},
{
"LUNCH_MENU_NM": "현미밥/잡곡밥",
"LUNCH_MENU_ESTIMATION": 4.5646,
"LUNCH_ROOM_NM": "가마"
},
{
"LUNCH_MENU_NM": "삼색계란말이",
"LUNCH_MENU_ESTIMATION": 0,
"LUNCH_ROOM_NM": "가마"
},
{
"LUNCH_MENU_NM": "들깨버섯볶음",
"LUNCH_MENU_ESTIMATION": 0,
"LUNCH_ROOM_NM": "가마"
},
{
"LUNCH_MENU_NM": "깻잎지무침",
"LUNCH_MENU_ESTIMATION": 0,
"LUNCH_ROOM_NM": "가마"
},
{
"LUNCH_MENU_NM": "열무김치",
"LUNCH_MENU_ESTIMATION": 3.5096,
"LUNCH_ROOM_NM": "가마"
}
],
"DATE_VAL": "20161115",
"LUNCH_TYPE": "1",
"DIVISION_INFO": "",
"DIVISION_CODE": "SEOCHO",
"TOTAL_PRICE": "4500",
"LUNCH_ROOM_NM": "가마",
"CONTENTS_ETC": ""
},
{
"TOTAL_CAL": "445",
"MENU": [
{
"LUNCH_MENU_NM": "닭가슴살샐러드세트",
"LUNCH_MENU_ESTIMATION": 6.2,
"LUNCH_ROOM_NM": "건강도시락"
},
{
"LUNCH_MENU_NM": "닭가슴살샐러드*유자D",
"LUNCH_MENU_ESTIMATION": 6.25,
"LUNCH_ROOM_NM": "건강도시락"
},
{
"LUNCH_MENU_NM": "단호박/메추리알/컬리플라워",
"LUNCH_MENU_ESTIMATION": 0,
"LUNCH_ROOM_NM": "건강도시락"
},
{
"LUNCH_MENU_NM": "잡곡빵/모듬과일/녹차",
"LUNCH_MENU_ESTIMATION": 6,
"LUNCH_ROOM_NM": "건강도시락"
}
],
"DATE_VAL": "20161115",
"LUNCH_TYPE": "1",
"DIVISION_INFO": "",
"DIVISION_CODE": "SEOCHO",
"TOTAL_PRICE": "4500",
"LUNCH_ROOM_NM": "건강도시락",
"CONTENTS_ETC": ""
},
{
"TOTAL_CAL": "746",
"MENU": [
{
"LUNCH_MENU_NM": "계란볶음밥*마파소스",
"LUNCH_MENU_ESTIMATION": 0,
"LUNCH_ROOM_NM": "인터쉐프"
},
{
"LUNCH_MENU_NM": "탕파국",
"LUNCH_MENU_ESTIMATION": 3.2874,
"LUNCH_ROOM_NM": "인터쉐프"
},
{
"LUNCH_MENU_NM": "견과류깐풍기",
"LUNCH_MENU_ESTIMATION": 0,
"LUNCH_ROOM_NM": "인터쉐프"
},
{
"LUNCH_MENU_NM": "단무지무침",
"LUNCH_MENU_ESTIMATION": 3.2,
"LUNCH_ROOM_NM": "인터쉐프"
},
{
"LUNCH_MENU_NM": "열무김치",
"LUNCH_MENU_ESTIMATION": 3.1667,
"LUNCH_ROOM_NM": "인터쉐프"
}
],
"DATE_VAL": "20161115",
"LUNCH_TYPE": "1",
"DIVISION_INFO": "",
"DIVISION_CODE": "SEOCHO",
"TOTAL_PRICE": "4500",
"LUNCH_ROOM_NM": "인터쉐프",
"CONTENTS_ETC": ""
},
{
"TOTAL_CAL": "25",
"MENU": [
{
"LUNCH_MENU_NM": "양상추샐러드*요거트D/오리엔탈D",
"LUNCH_MENU_ESTIMATION": 0,
"LUNCH_ROOM_NM": "입맛코너"
}
],
"DATE_VAL": "20161115",
"LUNCH_TYPE": "1",
"DIVISION_INFO": "",
"DIVISION_CODE": "SEOCHO",
"TOTAL_PRICE": "4500",
"LUNCH_ROOM_NM": "입맛코너",
"CONTENTS_ETC": ""
},
{
"TOTAL_CAL": "632",
"MENU": [
{
"LUNCH_MENU_NM": "잔치국수",
"LUNCH_MENU_ESTIMATION": 5.5,
"LUNCH_ROOM_NM": "해피존"
},
{
"LUNCH_MENU_NM": "충무김밥",
"LUNCH_MENU_ESTIMATION": 5.3333,
"LUNCH_ROOM_NM": "해피존"
},
{
"LUNCH_MENU_NM": "골뱅이무침",
"LUNCH_MENU_ESTIMATION": 0,
"LUNCH_ROOM_NM": "해피존"
},
{
"LUNCH_MENU_NM": "단무지무침",
"LUNCH_MENU_ESTIMATION": 5.6667,
"LUNCH_ROOM_NM": "해피존"
},
{
"LUNCH_MENU_NM": "열무김치",
"LUNCH_MENU_ESTIMATION": 2.3585,
"LUNCH_ROOM_NM": "해피존"
}
],
"DATE_VAL": "20161115",
"LUNCH_TYPE": "1",
"DIVISION_INFO": "",
"DIVISION_CODE": "SEOCHO",
"TOTAL_PRICE": "4500",
"LUNCH_ROOM_NM": "해피존",
"CONTENTS_ETC": ""
}
],
"result": "00",
"message": "Success"
}
def main():
#req = request.get_json(silent=True, force=True)
#data = json.loads(tempData)
res = makeWebhookResult(tempData)
res = json.dumps(res, indent=4)
# print(res)
#r = make_response(res)
#r.headers['Content-Type'] = 'application/json'
return res
def makeWebhookResult(data):
dataList = data.get('data')
if dataList is None:
return {}
speech = ""
displayText = ""
for i in range(len(dataList)):
data = dataList[i]
total_cal = data.get('TOTAL_CAL')
menuList = data.get('MENU')
lunch_room_nm = menuList[0].get('LUNCH_ROOM_NM')
lunch_menu_nm = menuList[0].get('LUNCH_MENU_NM').split("\n")[0] #some menu has '\n' in it
speech = speech + lunch_room_nm + "(" + total_cal + "cal)" + " : " + lunch_menu_nm
displayText = displayText + lunch_room_nm + "(" + total_cal + "cal)" + " : "
for menu in menuList:
lunch_menu_nm = menu.get('LUNCH_MENU_NM').split("\n")[0] #some menu has '\n' in it
displayText = displayText + lunch_menu_nm + " "
if i < len(dataList)-1:
speech += "\n"
displayText += "\n"
print("speech: ")
print(speech)
print("\n")
print("displayText: ")
print(displayText)
return {
"speech": speech,
"displayText": displayText,
# "data": data,
# "contextOut": [],
"source": "menu-at-seocho"
}
if __name__ == '__main__':
main()
| null |
reply_local.py
|
reply_local.py
|
py
| 6,283 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.dumps",
"line_number": 187,
"usage_type": "call"
}
] |
386195789
|
import csv
import io
import json
import pickle
from base64 import b64decode
from datetime import datetime
from flask import g, make_response
from crc import db, session
from crc.api.common import ApiError
from crc.models.approval import Approval, ApprovalModel, ApprovalSchema, ApprovalStatus
from crc.models.workflow import WorkflowModel
from crc.services.approval_service import ApprovalService
from crc.services.ldap_service import LdapService
# Returns counts of approvals in each status group assigned to the given user.
# The goal is to return results as quickly as possible.
def get_approval_counts(as_user=None):
uid = as_user or g.user.uid
db_user_approvals = db.session.query(ApprovalModel)\
.filter_by(approver_uid=uid)\
.filter(ApprovalModel.status != ApprovalStatus.CANCELED.name)\
.all()
study_ids = [a.study_id for a in db_user_approvals]
db_other_approvals = db.session.query(ApprovalModel)\
.filter(ApprovalModel.study_id.in_(study_ids))\
.filter(ApprovalModel.approver_uid != uid)\
.filter(ApprovalModel.status != ApprovalStatus.CANCELED.name)\
.all()
# Make a dict of the other approvals where the key is the study id and the value is the approval
# TODO: This won't work if there are more than 2 approvals with the same study_id
other_approvals = {}
for approval in db_other_approvals:
other_approvals[approval.study_id] = approval
counts = {}
for name, value in ApprovalStatus.__members__.items():
counts[name] = 0
for approval in db_user_approvals:
# Check if another approval has the same study id
if approval.study_id in other_approvals:
other_approval = other_approvals[approval.study_id]
# Other approval takes precedence over this one
if other_approval.id < approval.id:
if other_approval.status == ApprovalStatus.PENDING.name:
counts[ApprovalStatus.AWAITING.name] += 1
elif other_approval.status == ApprovalStatus.DECLINED.name:
counts[ApprovalStatus.DECLINED.name] += 1
elif other_approval.status == ApprovalStatus.CANCELED.name:
counts[ApprovalStatus.CANCELED.name] += 1
elif other_approval.status == ApprovalStatus.APPROVED.name:
counts[approval.status] += 1
else:
counts[approval.status] += 1
else:
counts[approval.status] += 1
return counts
def get_all_approvals(status=None):
approvals = ApprovalService.get_all_approvals(include_cancelled=status is True)
results = ApprovalSchema(many=True).dump(approvals)
return results
def get_approvals(status=None, as_user=None):
#status = ApprovalStatus.PENDING.value
user = g.user.uid
if as_user:
user = as_user
approvals = ApprovalService.get_approvals_per_user(user, status,
include_cancelled=False)
results = ApprovalSchema(many=True).dump(approvals)
return results
def get_approvals_for_study(study_id=None):
db_approvals = ApprovalService.get_approvals_for_study(study_id)
approvals = [Approval.from_model(approval_model) for approval_model in db_approvals]
results = ApprovalSchema(many=True).dump(approvals)
return results
def get_health_attesting_csv():
records = ApprovalService.get_health_attesting_records()
si = io.StringIO()
cw = csv.writer(si)
cw.writerows(records)
output = make_response(si.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=health_attesting.csv"
output.headers["Content-type"] = "text/csv"
return output
# ----- Begin descent into madness ---- #
def get_csv():
"""A damn lie, it's a json file. A huge bit of a one-off for RRT, but 3 weeks of midnight work can convince a
man to do just about anything"""
content = ApprovalService.get_not_really_csv_content()
return content
# ----- come back to the world of the living ---- #
def update_approval(approval_id, body):
if approval_id is None:
raise ApiError('unknown_approval', 'Please provide a valid Approval ID.')
approval_model = session.query(ApprovalModel).get(approval_id)
if approval_model is None:
raise ApiError('unknown_approval', 'The approval "' + str(approval_id) + '" is not recognized.')
if approval_model.approver_uid != g.user.uid:
raise ApiError("not_your_approval", "You may not modify this approval. It belongs to another user.")
approval_model.status = body['status']
approval_model.message = body['message']
approval_model.date_approved = datetime.now()
session.add(approval_model)
session.commit()
# Called only to send emails
approver = body['approver']['uid']
ApprovalService.update_approval(approval_id, approver)
result = ApprovalSchema().dump(approval_model)
return result
| null |
crc/api/approval.py
|
approval.py
|
py
| 5,018 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.g.user",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "crc.db.session.query",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "crc.models.approval.ApprovalModel",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "crc.db.session",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "crc.db",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalModel.status",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "crc.models.approval.ApprovalModel",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalStatus.CANCELED",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "crc.models.approval.ApprovalStatus",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "crc.db.session.query",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "crc.models.approval.ApprovalModel",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "crc.db.session",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "crc.db",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalModel.study_id.in_",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "crc.models.approval.ApprovalModel.study_id",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "crc.models.approval.ApprovalModel",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalModel.approver_uid",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "crc.models.approval.ApprovalModel",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalModel.status",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "crc.models.approval.ApprovalModel",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalStatus.CANCELED",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "crc.models.approval.ApprovalStatus",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalStatus.__members__.items",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "crc.models.approval.ApprovalStatus.__members__",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "crc.models.approval.ApprovalStatus",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalStatus.PENDING",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "crc.models.approval.ApprovalStatus",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalStatus.AWAITING",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "crc.models.approval.ApprovalStatus",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalStatus.DECLINED",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "crc.models.approval.ApprovalStatus",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalStatus.DECLINED",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "crc.models.approval.ApprovalStatus",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalStatus.CANCELED",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "crc.models.approval.ApprovalStatus",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalStatus.CANCELED",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "crc.models.approval.ApprovalStatus",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalStatus.APPROVED",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "crc.models.approval.ApprovalStatus",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "crc.services.approval_service.ApprovalService.get_all_approvals",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "crc.services.approval_service.ApprovalService",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalSchema",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "crc.services.approval_service.ApprovalService.get_approvals_per_user",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "crc.services.approval_service.ApprovalService",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalSchema",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "crc.services.approval_service.ApprovalService.get_approvals_for_study",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "crc.services.approval_service.ApprovalService",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.Approval.from_model",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "crc.models.approval.Approval",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalSchema",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "crc.services.approval_service.ApprovalService.get_health_attesting_records",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "crc.services.approval_service.ApprovalService",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "io.StringIO",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "crc.services.approval_service.ApprovalService.get_not_really_csv_content",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "crc.services.approval_service.ApprovalService",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "crc.api.common.ApiError",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "crc.session.query",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "crc.models.approval.ApprovalModel",
"line_number": 119,
"usage_type": "argument"
},
{
"api_name": "crc.session",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "crc.api.common.ApiError",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "crc.api.common.ApiError",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "crc.session.add",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "crc.session",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "crc.session.commit",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "crc.session",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "crc.services.approval_service.ApprovalService.update_approval",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "crc.services.approval_service.ApprovalService",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "crc.models.approval.ApprovalSchema",
"line_number": 136,
"usage_type": "call"
}
] |
616464747
|
from typing import List
from http import HTTPStatus
from kentik_api.api_calls import custom_dimensions
from kentik_api.api_resources.base_api import BaseAPI
from kentik_api.public.types import ID
from kentik_api.public.custom_dimension import CustomDimension, Populator
from kentik_api.requests_payload import custom_dimensions_payload, populators_payload
from kentik_api.api_connection.api_connector_protocol import APIConnectorProtocol
from kentik_api.requests_payload.conversions import convert, permissive_enum_to_str
class PopulatorsAPI(BaseAPI):
"""Exposes Kentik API operations related to populators (belong to custom dimensions)"""
def create(self, populator: Populator) -> Populator:
assert populator.value is not None
assert populator.direction is not None
assert populator.dimension_id is not None
apicall = custom_dimensions.create_populator(populator.dimension_id)
payload = populators_payload.CreateRequest(
value=populator.value,
direction=populator.direction.value,
device_name=populator.device_name,
interface_name=populator.interface_name,
addr=populator.addr,
port=populator.port,
tcp_flags=populator.tcp_flags,
protocol=populator.protocol,
asn=populator.asn,
nexthop_asn=populator.nexthop_asn,
nexthop=populator.nexthop,
bgp_aspath=populator.bgp_aspath,
bgp_community=populator.bgp_community,
device_type=populator.device_type,
site=populator.site,
lasthop_as_name=populator.lasthop_as_name,
nexthop_as_name=populator.nexthop_as_name,
mac=populator.mac,
country=populator.country,
vlans=populator.vlans,
)
response = self.send(apicall, payload)
return populators_payload.CreateResponse.from_json(response.text).to_populator()
def update(self, populator: Populator) -> Populator:
assert populator.value is not None
assert populator.direction is not None
assert populator.dimension_id is not None
apicall = custom_dimensions.update_populator(populator.dimension_id, populator.id)
payload = populators_payload.UpdateRequest(
value=populator.value,
direction=convert(populator.direction, permissive_enum_to_str),
device_name=populator.device_name,
interface_name=populator.interface_name,
addr=populator.addr,
port=populator.port,
tcp_flags=populator.tcp_flags,
protocol=populator.protocol,
asn=populator.asn,
nexthop_asn=populator.nexthop_asn,
nexthop=populator.nexthop,
bgp_aspath=populator.bgp_aspath,
bgp_community=populator.bgp_community,
device_type=populator.device_type,
site=populator.site,
lasthop_as_name=populator.lasthop_as_name,
nexthop_as_name=populator.nexthop_as_name,
mac=populator.mac,
country=populator.country,
vlans=populator.vlans,
)
response = self.send(apicall, payload)
return populators_payload.UpdateResponse.from_json(response.text).to_populator()
def delete(self, custom_dimension_id: ID, populator_id: ID) -> bool:
apicall = custom_dimensions.delete_populator(custom_dimension_id, populator_id)
response = self.send(apicall)
return response.http_status_code == HTTPStatus.NO_CONTENT
class CustomDimensionsAPI(BaseAPI):
"""Exposes Kentik API operations related to custom dimensions"""
def __init__(self, api_connector: APIConnectorProtocol) -> None:
super(CustomDimensionsAPI, self).__init__(api_connector)
self._populators = PopulatorsAPI(api_connector)
def get(self, custom_dimension_id: ID) -> CustomDimension:
apicall = custom_dimensions.get_custom_dimension_info(custom_dimension_id)
response = self.send(apicall)
return custom_dimensions_payload.GetResponse.from_json(response.text).to_custom_dimension()
def get_all(self) -> List[CustomDimension]:
apicall = custom_dimensions.get_custom_dimensions()
response = self.send(apicall)
return custom_dimensions_payload.GetAllResponse.from_json(response.text).to_custom_dimensions()
def create(self, custom_dimension: CustomDimension) -> CustomDimension:
assert custom_dimension.name is not None
assert custom_dimension.display_name is not None
assert custom_dimension.type is not None
apicall = custom_dimensions.create_custom_dimension()
payload = custom_dimensions_payload.CreateRequest(
name=custom_dimension.name,
display_name=custom_dimension.display_name,
type=custom_dimension.type,
)
response = self.send(apicall, payload)
return custom_dimensions_payload.CreateResponse.from_json(response.text).to_custom_dimension()
def update(self, custom_dimension: CustomDimension) -> CustomDimension:
assert custom_dimension.display_name is not None
apicall = custom_dimensions.update_custom_dimension(custom_dimension.id)
payload = custom_dimensions_payload.UpdateRequest(
display_name=custom_dimension.display_name,
)
response = self.send(apicall, payload)
return custom_dimensions_payload.UpdateResponse.from_json(response.text).to_custom_dimension()
def delete(self, custom_dimension_id: ID) -> bool:
apicall = custom_dimensions.delete_custom_dimension(custom_dimension_id)
response = self.send(apicall)
return response.http_status_code == HTTPStatus.NO_CONTENT
@property
def populators(self) -> PopulatorsAPI:
return self._populators
| null |
kentik_api_library/kentik_api/api_resources/custom_dimensions_api.py
|
custom_dimensions_api.py
|
py
| 5,888 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "kentik_api.api_resources.base_api.BaseAPI",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "kentik_api.public.custom_dimension.Populator",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions.create_populator",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "kentik_api.requests_payload.populators_payload.CreateRequest",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "kentik_api.requests_payload.populators_payload",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "kentik_api.requests_payload.populators_payload.CreateResponse.from_json",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "kentik_api.requests_payload.populators_payload.CreateResponse",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "kentik_api.requests_payload.populators_payload",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "kentik_api.public.custom_dimension.Populator",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions.update_populator",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "kentik_api.requests_payload.populators_payload.UpdateRequest",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "kentik_api.requests_payload.populators_payload",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "kentik_api.requests_payload.conversions.convert",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "kentik_api.requests_payload.conversions.permissive_enum_to_str",
"line_number": 53,
"usage_type": "argument"
},
{
"api_name": "kentik_api.requests_payload.populators_payload.UpdateResponse.from_json",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "kentik_api.requests_payload.populators_payload.UpdateResponse",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "kentik_api.requests_payload.populators_payload",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "kentik_api.public.types.ID",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions.delete_populator",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "http.HTTPStatus.NO_CONTENT",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "http.HTTPStatus",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "kentik_api.api_resources.base_api.BaseAPI",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "kentik_api.api_connection.api_connector_protocol.APIConnectorProtocol",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "kentik_api.public.types.ID",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions.get_custom_dimension_info",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload.GetResponse.from_json",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload.GetResponse",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "kentik_api.public.custom_dimension.CustomDimension",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions.get_custom_dimensions",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload.GetAllResponse.from_json",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload.GetAllResponse",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "kentik_api.public.custom_dimension.CustomDimension",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "kentik_api.public.custom_dimension.CustomDimension",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions.create_custom_dimension",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload.CreateRequest",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload.CreateResponse.from_json",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload.CreateResponse",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "kentik_api.public.custom_dimension.CustomDimension",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions.update_custom_dimension",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload.UpdateRequest",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload.UpdateResponse.from_json",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload.UpdateResponse",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "kentik_api.requests_payload.custom_dimensions_payload",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "kentik_api.public.types.ID",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions.delete_custom_dimension",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "kentik_api.api_calls.custom_dimensions",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "http.HTTPStatus.NO_CONTENT",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "http.HTTPStatus",
"line_number": 124,
"usage_type": "name"
}
] |
350127618
|
import os.path as osp
import collections as col
import numpy as np
import mastic.molecule as masticmol
import mastic.features as masticfeat
import mastic.config.features as masticfeatconfig
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem import ChemicalFeatures
class RDKitMoleculeWrapper(object):
"""Wrapper class providing convenience access and conversion of
rdkit's molecule representations to mastic molecular representations.
Examples
--------
Load a small molecule from the test data using rdkit:
>>> from rdkit import Chem
>>> from mastic.interfaces.rdkit import RDKitMoleculeWrapper
>>> from mastic.tests.data import BEN_path
>>> sml_rdkit = Chem.MolFromPDBFile(BEN_path, remove_Hs=False)
Construct the wrapper:
>>> sml_wrapped = RDKitMoleculeWrapper(sml_rdkit)
Now we can easily (and pythonically) access attributes.
Most importantly we can convert to mastic.MoleculeType:
>>> BENType = sml_wrapped.make_molecule_type()
And use rdkit's chemical feature finding methods:
>>> features = sml_wrapped.find_features()
"""
def __init__(self, rdkit_molecule, mol_name=None):
self.rdkit_molecule = rdkit_molecule
self.mol_name = mol_name
@property
def atoms(self):
"""The rdkit atoms in the molecule."""
return [atom for atom in self.rdkit_molecule.GetAtoms()]
@property
def bonds(self):
"""The rdkit bonds in the molecule."""
return [bond for bond in self.rdkit_molecule.GetBonds()]
@property
def num_atoms(self):
"""The number of atoms in the molecule."""
return self.rdkit_molecule.GetNumAtoms()
@property
def num_bonds(self):
"""The number of bonds in the molecule."""
return self.rdkit_molecule.GetNumBonds()
@property
def num_conformers(self):
"""The number of coordinate sets for the whole molecule."""
return self.rdkit_molecule.GetNumConformers()
def atoms_data(self):
"""Return a list of the data for each atom in the molecule. Calls
atom_data.
"""
atoms_data = []
for atom in self.rdkit_molecule.GetAtoms():
atom_data = self.atom_data(atom.GetIdx())
atoms_data.append(atom_data)
return atoms_data
def atom_data(self, atom_idx):
"""Extracts useful rdkit information about an atom and returns it as a
dictionary.
"""
atom = self.atoms[atom_idx]
atom_dict = {}
atom_dict['atomic_num'] = atom.GetAtomicNum()
atom_dict['bond_degree_no_Hs'] = atom.GetDegree()
# same but want a convenience attribute
atom_dict['bond_degree'] = atom.GetDegree()
atom_dict['bond_degree_with_Hs'] = atom.GetTotalDegree()
# same but want a convenience attribute
atom_dict['total_bond_degree'] = atom.GetTotalDegree()
atom_dict['explicit_valence'] = atom.GetExplicitValence()
atom_dict['implicit_valence'] = atom.GetImplicitValence()
atom_dict['total_valence'] = atom.GetTotalValence()
atom_dict['formal_charge'] = atom.GetFormalCharge()
atom_dict['hybridization'] = atom.GetHybridization()
atom_dict['is_aromatic'] = atom.GetIsAromatic()
atom_dict['in_ring'] = atom.IsInRing()
atom_dict['isotope'] = atom.GetIsotope()
atom_dict['mass'] = atom.GetMass()
atom_dict['num_radical_electrons'] = atom.GetNumRadicalElectrons()
atom_dict['element'] = atom.GetSymbol()
atom_dict['num_Hs'] = atom.GetTotalNumHs()
monomer_info = atom.GetMonomerInfo()
if monomer_info:
atom_dict['monomer_type'] = monomer_info.GetMonomerType()
atom_dict['pdb_name'] = monomer_info.GetName().strip()
# atom_dict['pdb_chain_id'] = monomer_info.GetChainID()
atom_dict['pdb_insertion_code'] = monomer_info.GetInsertionCode()
# atom_dict['pdb_heteroatom'] = monomer_info.IsHeteroAtom()
atom_dict['pdb_occupancy'] = monomer_info.GetOccupancy()
atom_dict['pdb_residue_name'] = monomer_info.GetResidueName()
atom_dict['pdb_residue_number'] = monomer_info.GetResidueNumber()
atom_dict['pdb_serial_number'] = monomer_info.GetSerialNumber()
# atom_dict['pdb_segment_number'] = monomer_info.GetSegmentNumber()
atom_dict['pdb_temp_factor'] = monomer_info.GetTempFactor()
atom_dict['rdkit_mol_idx'] = atom.GetIdx()
atom_dict['name'] = atom_dict['rdkit_mol_idx']
return atom_dict
def bonds_data(self):
"""Return a list of the data for each bond in the molecule. Calls
bond_data.
"""
bonds_data = []
for bond_idx, bond in enumerate(self.bonds):
bond_data = self.bond_data(bond_idx)
bonds_data.append(bond_data)
return bonds_data
def bond_data(self, bond_idx):
"""Extracts useful rdkit information about an atom and returns it as a
dictionary.
"""
bond = self.bonds[bond_idx]
bond_dict = {}
bond_dict['bond_order'] = str(bond.GetBondTypeAsDouble())
bond_dict['is_aromatic'] = bond.GetIsAromatic()
bond_dict['in_ring'] = bond.IsInRing()
bond_dict['stereo'] = str(bond.GetStereo())
bond_dict['is_conjugated'] = bond.GetIsConjugated()
bond_dict['rdkit_bond_type'] = str(bond.GetBondType())
atom1_idx = bond.GetBeginAtomIdx()
atom2_idx = bond.GetEndAtomIdx()
bond_dict['rdkit_atom_idxs'] = (atom1_idx, atom2_idx)
bond_dict['rdkit_mol_idx'] = bond.GetIdx()
bond_dict['name'] = bond_dict['rdkit_mol_idx']
return bond_dict
def bonds_map(self):
"""Returns a dictionary mapping the indices of the bonds to the
indices of their atoms.
"""
bond_map_dict = {}
for bond_idx, bond in enumerate(self.bonds):
atom1_idx = bond.GetBeginAtomIdx()
atom2_idx = bond.GetEndAtomIdx()
bond_map_dict[bond_idx] = (atom1_idx, atom2_idx)
return bond_map_dict
def molecule_data(self):
"""Extracts useful rdkit information about an atom and returns it as a
dictionary.
"""
molecule_dict = {}
molecule_dict['name'] = self.mol_name
ring_info = self.rdkit_molecule.GetRingInfo()
try:
molecule_dict['num_rings'] = ring_info.NumRings()
except RuntimeError:
# something is wrong, just log it and move on
# LOGGING
molecule_dict['num_rings'] = None
molecule_dict['num_atoms'] = self.rdkit_molecule.GetNumAtoms()
molecule_dict['num_bonds'] = self.rdkit_molecule.GetNumBonds()
molecule_dict['num_heavy_atoms'] = self.rdkit_molecule.GetNumHeavyAtoms()
return molecule_dict
def make_atom_type(self, atom_idx, atom_type_name):
"""Converts a single atom to a mastic.AtomType."""
atom_data = self.atom_data(atom_idx)
return masticmol.AtomType(atom_type_name, **atom_data)
def make_atom_types(self):
"""Converts all atoms in the molecule to mastic.AtomTypes."""
atoms_data = self.atoms_data()
atom_types = []
for atom_data in atoms_data:
atom_type_name = "{1}Atom{0}Type".format(atom_data['name'], self.mol_name)
atom_type = masticmol.AtomType(atom_type_name, **atom_data)
atom_types.append(atom_type)
return atom_types
def make_bond_type(self, bond_idx, bond_type_name, bond_atom_types):
"""Converts a single bond to a mastic.BondType."""
bond_data = self.bond_data(bond_idx)
return masticmol.BondType(bond_type_name,
atom_types=bond_atom_types,
**bond_data)
def make_bond_types(self, atom_types=None):
"""Converts all bonds in the molecule to mastic.BondTypes."""
# get atom types
if atom_types is None:
atom_types = self.make_atom_types()
bonds_data = self.bonds_data()
bond_types = []
for bond_data in bonds_data:
bond_type_name = "{1}Bond{0}Type".format(bond_data['name'], self.mol_name)
bond_atom_types = (atom_types[bond_data['rdkit_atom_idxs'][0]],
atom_types[bond_data['rdkit_atom_idxs'][1]])
bond_type = masticmol.BondType(bond_type_name,
atom_types=bond_atom_types,
**bond_data)
bond_types.append(bond_type)
return bond_types
def make_molecule_type(self, find_features=False):
"""Converts the molecule to a mastic.MoleculeType.
First converts all atoms and bonds to mastic Types.
Optionally the rdkit find_features function can be called with
default settings using the flag.
"""
# get relevant data
bond_map = self.bonds_map()
molecule_data = self.molecule_data()
# AtomTypes
atom_types = self.make_atom_types()
# BondTypes
bond_types = self.make_bond_types(atom_types=atom_types)
# MoleculeType
molecule_data.update({"name" : self.mol_name})
molecule_type_name = "{}Type".format(self.mol_name)
molecule_type = masticmol.MoleculeType(molecule_type_name,
atom_types=atom_types,
bond_types=bond_types, bond_map=bond_map,
**molecule_data)
if find_features:
for idx, feat_dict in self.find_features().items():
atom_idxs = feat_dict['atom_ids']
feature_attrs = {}
feature_classifiers = [feat_dict['family'], feat_dict['type']]
feature_attrs[masticfeatconfig.FEATURE_CLASSIFIER_KEY] = feature_classifiers
feature_attrs['rdkit_position'] = feat_dict['position']
feature_attrs['rdkit_family'] = feat_dict['family']
feature_attrs['rdkit_family'] = feat_dict['type']
feature_type_name = masticfeatconfig.FEATURE_TYPE_TEMPLATE.format(self.mol_name, idx)
feature_type = masticfeat.FeatureType(feature_type_name,
molecule_type=molecule_type,
atom_idxs=atom_idxs,
**feature_attrs)
molecule_type.add_feature_type(idx, feature_type)
return molecule_type
def find_features(self, fdef="BaseFeatures.fdef"):
"""Uses a feature definition (fdef) database to to find chemical
features in the molecule.
Returns a nested dictionary mapping the feature indices to the
feature dict.
"""
assert isinstance(fdef, str)
fdef_path = osp.join(RDConfig.RDDataDir, fdef)
feature_factory = ChemicalFeatures.BuildFeatureFactory(fdef_path)
factory_features = feature_factory.GetFeaturesForMol(self.rdkit_molecule)
features = {}
for feature in factory_features:
# unpack the coordinates from the rdkit object
pos_obj = feature.GetPos()
feature_info = {'family' : feature.GetFamily(),
'type' : feature.GetType(),
'atom_ids' : feature.GetAtomIds(),
'position' : (pos_obj.x, pos_obj.y, pos_obj.z)}
features[feature.GetId()] = feature_info
families = col.defaultdict(list)
types = col.defaultdict(list)
for idx, info in features.items():
families[info['family']].append(idx)
types[info['type']].append(idx)
return features
def get_conformer_coords(self, conf_idx):
"""Returns the coordinates of the specified conformer in the molecule.
"""
assert self.rdkit_molecule.GetNumConformers() > 0, \
"{0} has no conformers".format(self)
conformer = self.rdkit_molecule.GetConformer(conf_idx)
atom_idxs = range(self.rdkit_molecule.GetNumAtoms())
# make the CoordArray
coords = []
for atom_idx in atom_idxs:
coord = conformer.GetAtomPosition(atom_idx)
coord = np.array([coord.x, coord.y, coord.z])
coords.append(coord)
coords = np.array(coords)
return coords
def AssignBondOrdersFromTemplate(refmol, mol):
""" assigns bond orders to a molecule based on the
bond orders in a template molecule
Arguments
- refmol: the template molecule
- mol: the molecule to assign bond orders to
An example, start by generating a template from a SMILES
and read in the PDB structure of the molecule
>>> from rdkit.Chem import AllChem
>>> template = AllChem.MolFromSmiles("CN1C(=NC(C1=O)(c2ccccc2)c3ccccc3)N")
>>> mol = AllChem.MolFromPDBFile(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', '4DJU_lig.pdb'))
>>> len([1 for b in template.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
8
>>> len([1 for b in mol.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
22
Now assign the bond orders based on the template molecule
>>> newMol = AllChem.AssignBondOrdersFromTemplate(template, mol)
>>> len([1 for b in newMol.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
8
Note that the template molecule should have no explicit hydrogens
else the algorithm will fail.
It also works if there are different formal charges (this was github issue 235):
>>> template=AllChem.MolFromSmiles('CN(C)C(=O)Cc1ccc2c(c1)NC(=O)c3ccc(cc3N2)c4ccc(c(c4)OC)[N+](=O)[O-]')
>>> mol = AllChem.MolFromMolFile(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', '4FTR_lig.mol'))
>>> AllChem.MolToSmiles(mol)
'COC1CC(C2CCC3C(O)NC4CC(CC(O)N(C)C)CCC4NC3C2)CCC1N(O)O'
>>> newMol = AllChem.AssignBondOrdersFromTemplate(template, mol)
>>> AllChem.MolToSmiles(newMol)
'COc1cc(-c2ccc3c(c2)Nc2ccc(CC(=O)N(C)C)cc2NC3=O)ccc1[N+](=O)[O-]'
"""
refmol2 = Chem.Mol(refmol)
mol2 = Chem.Mol(mol)
# do the molecules match already?
matching = mol2.GetSubstructMatch(refmol2)
if not matching: # no, they don't match
# check if bonds of mol are SINGLE
for b in mol2.GetBonds():
if b.GetBondType() != Chem.BondType.SINGLE:
b.SetBondType(Chem.BondType.SINGLE)
b.SetIsAromatic(False)
# set the bonds of mol to SINGLE
for b in refmol2.GetBonds():
b.SetBondType(Chem.BondType.SINGLE)
b.SetIsAromatic(False)
# set atom charges to zero;
for a in refmol2.GetAtoms():
a.SetFormalCharge(0)
for a in mol2.GetAtoms():
a.SetFormalCharge(0)
matching = mol2.GetSubstructMatches(refmol2, uniquify=False)
# do the molecules match now?
if matching:
if len(matching) > 1:
pass
# print("More than one matching pattern found - picking one")
matching = matching[0]
# apply matching: set bond properties
for b in refmol.GetBonds():
atom1 = matching[b.GetBeginAtomIdx()]
atom2 = matching[b.GetEndAtomIdx()]
b2 = mol2.GetBondBetweenAtoms(atom1, atom2)
b2.SetBondType(b.GetBondType())
b2.SetIsAromatic(b.GetIsAromatic())
# apply matching: set atom properties
for a in refmol.GetAtoms():
a2 = mol2.GetAtomWithIdx(matching[a.GetIdx()])
a2.SetHybridization(a.GetHybridization())
a2.SetIsAromatic(a.GetIsAromatic())
a2.SetNumExplicitHs(a.GetNumExplicitHs())
a2.SetFormalCharge(a.GetFormalCharge())
# SanitizeMol(mol2)
if hasattr(mol2, '__sssAtoms'):
mol2.__sssAtoms = None # we don't want all bonds highlighted
else:
raise ValueError("No matching found")
return mol2
| null |
mastic/interfaces/rdkit.py
|
rdkit.py
|
py
| 16,231 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "mastic.molecule.AtomType",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "mastic.molecule",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "mastic.molecule.AtomType",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "mastic.molecule",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "mastic.molecule.BondType",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "mastic.molecule",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "mastic.molecule.BondType",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "mastic.molecule",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "mastic.molecule.MoleculeType",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "mastic.molecule",
"line_number": 258,
"usage_type": "name"
},
{
"api_name": "mastic.config.features.FEATURE_CLASSIFIER_KEY",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "mastic.config.features",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "mastic.config.features.FEATURE_TYPE_TEMPLATE.format",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "mastic.config.features.FEATURE_TYPE_TEMPLATE",
"line_number": 273,
"usage_type": "attribute"
},
{
"api_name": "mastic.config.features",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "mastic.features.FeatureType",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "mastic.features",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "rdkit.RDConfig.RDDataDir",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "rdkit.RDConfig",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "rdkit.Chem.ChemicalFeatures.BuildFeatureFactory",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "rdkit.Chem.ChemicalFeatures",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "rdkit.Chem.Mol",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "rdkit.Chem",
"line_number": 369,
"usage_type": "name"
},
{
"api_name": "rdkit.Chem.Mol",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "rdkit.Chem",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "rdkit.Chem.BondType",
"line_number": 376,
"usage_type": "attribute"
},
{
"api_name": "rdkit.Chem",
"line_number": 376,
"usage_type": "name"
},
{
"api_name": "rdkit.Chem.BondType",
"line_number": 377,
"usage_type": "attribute"
},
{
"api_name": "rdkit.Chem",
"line_number": 377,
"usage_type": "name"
},
{
"api_name": "rdkit.Chem.BondType",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "rdkit.Chem",
"line_number": 381,
"usage_type": "name"
}
] |
236043080
|
"""Definition of the ImageAnnex content type
"""
from zope.interface import implements
from Products.Archetypes import atapi
from Products.ATContentTypes.content import image
from Products.ATContentTypes.content import schemata
from collective.book import BookMessageFactory as _
from collective.book.interfaces import IImageAnnex
from collective.book.config import PROJECTNAME
ImageAnnexSchema = image.ATImageSchema.copy() + atapi.Schema((
atapi.StringField('picture_type',
schemata = 'default',
vocabulary_factory = 'collective.book.picturetypes',
widget=atapi.SelectionWidget(
format = 'select',
description=_(u'help_metadata_picturetypes',
default=u''),
label=_(u'label_metadata_picturetypes',
default=u'Picture type'),
),
),
))
# Set storage on fields copied from ATFolderSchema, making sure
# they work well with the python bridge properties.
schemata.finalizeATCTSchema(
ImageAnnexSchema,
moveDiscussion=False
)
class ImageAnnex(image.ATImage):
implements(IImageAnnex)
meta_type = "ImageAnnex"
schema = ImageAnnexSchema
# -*- Your ATSchema to Python Property Bridges Here ... -*-
atapi.registerType(ImageAnnex, PROJECTNAME)
| null |
packages/collective.book/tags/0.2/collective/book/content/imageannex.py
|
imageannex.py
|
py
| 1,316 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "Products.ATContentTypes.content.image.ATImageSchema.copy",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "Products.ATContentTypes.content.image.ATImageSchema",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "Products.ATContentTypes.content.image",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "Products.Archetypes.atapi.Schema",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "Products.Archetypes.atapi",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "Products.Archetypes.atapi.StringField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "Products.Archetypes.atapi",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "Products.Archetypes.atapi.SelectionWidget",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "Products.Archetypes.atapi",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "collective.book.BookMessageFactory",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "collective.book.BookMessageFactory",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "Products.ATContentTypes.content.schemata.finalizeATCTSchema",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "Products.ATContentTypes.content.schemata",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "Products.ATContentTypes.content.image.ATImage",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "Products.ATContentTypes.content.image",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "zope.interface.implements",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "collective.book.interfaces.IImageAnnex",
"line_number": 40,
"usage_type": "argument"
},
{
"api_name": "Products.Archetypes.atapi.registerType",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "collective.book.config.PROJECTNAME",
"line_number": 46,
"usage_type": "argument"
},
{
"api_name": "Products.Archetypes.atapi",
"line_number": 46,
"usage_type": "name"
}
] |
400626726
|
import os
import sys
import numpy as np
from sleeplearning.lib.models.single_chan_expert import SingleChanExpert
root_dir = os.path.abspath(os.path.join(os.path.dirname('__file__'), '..'))
sys.path.insert(0, root_dir)
import torch
from torch import nn
import sleeplearning.lib.base
class Ensembler(nn.Module):
def __init__(self, ms: dict):
super(Ensembler, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.experts = []
assert len(ms['expert_models']) > 0
for exp in ms['expert_models']:
clf = sleeplearning.lib.base.Base()
clf.restore(exp)
self.expert_channels = clf.ds['channels']
for param in clf.model.parameters():
param.requires_grad = False
self.experts.append(clf.model)
self.experts = nn.ModuleList(self.experts)
def train(self, mode=True):
super(Ensembler, self).train(mode=mode)
self.experts.eval()
#raise NotImplementedError("The Ensembler is for aggregation of predictions only.")
def forward(self, x):
# Majority vote over all experts
# logits: bs x seq_len x lab
# print(self.experts[0](x)["logits"].shape)
result = {'logits': torch.zeros_like(self.experts[0](x)['logits']).float().to(self.device)}
logits_shape = result['logits'].shape
for exp in self.experts:
pred = torch.argmax(exp(x)['logits'], dim=-1)
for i in range(logits_shape[0]):
for j in range(logits_shape[1]):
result['logits'][i, j, pred[i, j]] += 1 + np.random.rand() * 0.001
return result
| null |
sleeplearning/lib/models/ensembler.py
|
ensembler.py
|
py
| 1,702 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path.insert",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sleeplearning.lib.models.single_chan_expert.lib.base.Base",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sleeplearning.lib.models.single_chan_expert.lib",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "sleeplearning.lib.models.single_chan_expert",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "torch.zeros_like",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.argmax",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 48,
"usage_type": "attribute"
}
] |
184444072
|
from Bio import SeqIO
import ahocorasick
import logging
def load_data(fasta_file, kmer_file):
logging.info("Loading fasta file")
fasta_list = list(SeqIO.parse(fasta_file,"fasta"))
logging.info("Loading kmer list")
kmer_list = [line.rstrip('\n') for line in open(kmer_file)]
return fasta_list, kmer_list
def find_match(line, A):
found_kmers = []
for end_index, kmer in A.iter(line):
found_kmers.append(kmer)
return found_kmers
def setup_automaton(kmer_list):
logging.info("Setting up kmer lookup")
auto = ahocorasick.Automaton()
for seq in kmer_list:
auto.add_word(seq, seq)
auto.make_automaton()
logging.info("Completed set-up of kmer lookup")
return auto
def match_kmers(fasta_list, kmer_auto):
logging.info("Writing output")
with open(output_file,"w") as f:
for record in fasta_list:
match = find_match(str(record.seq), kmer_auto)
if match:
line = record.id + "\n"
f.write(line)
logging.info("Completed")
if __name__ == '__main__':
fasta_file = "cdhitCoronaviridae"
kmer_file = "seqSingleList.txt"
output_file = "seqfileZ.txt"
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
fasta_list, kmer_list = load_data(fasta_file, kmer_file)
kmer_auto = setup_automaton(kmer_list)
match_kmers(fasta_list, kmer_auto)
| null |
U3.1_PreQualifiedMinSet.py
|
U3.1_PreQualifiedMinSet.py
|
py
| 1,426 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.info",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO.parse",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "ahocorasick.Automaton",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 43,
"usage_type": "attribute"
}
] |
128219063
|
from operator import itemgetter
import pygame
from bird import Bird
from pipe import Pipe
import numpy as np
import random
SHAPE = (500, 500)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
WIDTH = SHAPE[0]
HEIGHT = SHAPE[1]
POPULATION = 30
np.random.seed(1)
random.seed(1)
pygame.init()
screen = pygame.display.set_mode(SHAPE)
clock = pygame.time.Clock()
font = pygame.font.Font(None, int(0.1*WIDTH))
def start_game():
birds = []
for _ in range(POPULATION):
player = Bird(SHAPE)
birds.append(player)
pipes = init_pipes()
return birds, pipes
def init_pipes():
pipes = [Pipe(x=WIDTH, shape=SHAPE, seed=1)]
return pipes
def clean_and_move_pipes(pipes, pipe_speed):
passed = False
for pipe in pipes:
if pipe.x + pipe.width < 0:
pipes.remove(pipe)
passed = True
else:
pipe.move(pipe_speed)
return pipes, passed
def new_pipe(when, pipes):
if pipes[-1].x <= when:
p = Pipe(x=WIDTH, shape=SHAPE, seed=np.random.randint(1, 9))
pipes.append(p)
return pipes
def bound_delta_y(dy, low, high):
if dy < low:
dy = low
elif dy > high:
dy = high
return dy
def normalize(x, low, high):
return (x - low)/(high - low)
def select_action(bird, pipes):
possible_distance = (pipes[0].x + pipes[0].width) - bird.x
if possible_distance >= 0:
distance = possible_distance
closest_pipe = pipes[0]
else:
distance = (pipes[1].x + pipes[1].width)- bird.x
closest_pipe = pipes[1]
distance = normalize(distance, 0, WIDTH - 10)
bird_height = normalize(bird.y, 0, HEIGHT)
pipe_top = normalize(closest_pipe.top, 0, 3*HEIGHT/4)
pipe_bot = normalize(closest_pipe.bot, 100, 3*HEIGHT/4 + 150)
pipe_middle = (pipe_bot - pipe_top) / 2 + pipe_top
distance_from_middle = pipe_middle - bird_height
input = np.asarray([bird_height, distance, distance_from_middle, (5+bird.velocity)/10])
input = np.atleast_2d(input)
probability = bird.brain.predict(input, batch_size=1)[0]
if probability[0] >= 0.5:
return 1
return 2
def draw_bird(player):
pygame.draw.circle(screen, WHITE, [player.x, player.y], player.radius)
def draw_pipes(pipe):
for p in pipe:
pygame.draw.rect(screen, WHITE, [p.x, 0, p.width, p.top])
pygame.draw.rect(screen, WHITE, [p.x, p.bot, p.width, HEIGHT - p.bot]) # better length????
def draw_scores(score):
score_text = font.render(str(score), True, WHITE)
screen.blit(score_text, [10, 0])
def draw(pipe):
screen.fill(BLACK)
draw_pipes(pipe)
def main():
birds, pipes = start_game()
all_time_best = birds[0]
pipe_speed = -10
generations = 400
all_birds = []
for generation in range(generations):
drawing = True
print('Generation {} started'.format(generation))
running = True
pipes = init_pipes()
scores = [0]*len(birds)
while running:
if drawing:
draw(pipes)
for player in birds:
if pipes[0].collision(player):
all_birds.append(player)
birds.remove(player)
else:
player.increase_score(1)
if drawing:
draw_bird(player)
if len(birds) < 1:
running = False
# for event in pygame.event.get():
# if event.type == pygame.QUIT:
# pygame.display.quit()
pipes, player_passed_pipe = clean_and_move_pipes(pipes, pipe_speed)
counter = 0
for player in birds:
action = select_action(player, pipes)
player.flapping = False
if action == 1:
player.flapping = True
# if player.flapping:
# flapping_speed = flapping_max
# else:
# flapping_speed = 0
#
# dy += gravity + flapping_speed
# dy = bound_delta_y(dy, flapping_max, gravity)
player.flap()
if player_passed_pipe:
player.increase_score(50)
if player.score > all_time_best.score:
all_time_best = player
scores[counter] = player.score
counter += 1
# if player.score > 5000:
# running = False
# all_time_best = player
# return all_time_best
#draw_scores(max(scores))
pipes = new_pipe(WIDTH/3, pipes)
if drawing:
pygame.display.flip()
clock.tick(60)
# create new population of birds
total_scores = sum(scores)
maximum = max(scores)
for bird in all_birds: # CALCULATE ALL FITNESS
bird.set_fitness(total_scores)
fitnesses = [(bird.fitness, bird) for bird in all_birds]
fitnesses = sorted(fitnesses, key=itemgetter(0), reverse=True)
#print(fitnesses)
best_birds = [bird for fitness, bird in fitnesses[:10]]
parent_one = best_birds[0]
parent_two = best_birds[1]
new_child, new_child2 = parent_one.crossover(parent_two)
best_birds[-1] = new_child
best_birds[-2] = new_child2
parent_one = best_birds[2]
parent_two = best_birds[3]
new_child, new_child2 = parent_one.crossover(parent_two)
best_birds[-3] = new_child
best_birds[-4] = new_child2
birds = best_birds
amount = len(birds)
for index in range(POPULATION - amount):
birds.append(pick_a_bird(all_birds))
all_birds = []
print('Generation {} , max score {}'.format(generation, maximum))
return all_time_best
def pick_a_bird(all_birds):
index = 0
r = random.uniform(0, 1)
while index < len(all_birds) and r > 0:
r -= all_birds[index].fitness
index += 1
index -= 1
bird = all_birds[index]
child = Bird(shape=SHAPE, brain=bird.brain)
child.mutate(0.2)
return child
bird = main()
#pygame.init()
# def draw_bird(player):
# pygame.draw.circle(screen, WHITE, [player.x, player.y], player.radius)
#
#
# def draw_pipes(pipe):
# for p in pips:
# pygame.draw.rect(screen, WHITE, [p.x, 0, p.width, p.top])
# pygame.draw.rect(screen, WHITE, [p.x, p.bot, p.width, HEIGHT - p.bot]) # better length????
#
#
# def draw_scores(score):
# score_text = font.render(str(score), True, WHITE)
# screen.blit(score_text, [10, 0])
#
#
# def draw(pipe):
# screen.fill(BLACK)
# draw_pipes(pipe)
pipes = init_pipes()
running = True
pipe_speed = -3
while running:
draw(pipes)
draw_bird(bird)
action = select_action(bird, pipes)
bird.flapping = False
if action == 1:
bird.set_flapping()
bird.flap()
pipes, passed_pipe = clean_and_move_pipes(pipes, pipe_speed)
pipes = new_pipe(WIDTH/3, pipes)
pygame.display.flip()
clock.tick(60)
| null |
main.py
|
main.py
|
py
| 7,151 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.random.seed",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "random.seed",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Clock",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "bird.Bird",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pipe.Pipe",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pipe.x",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "pipe.width",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "pipe.move",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pipe.Pipe",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "bird.x",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "bird.x",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "bird.y",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "bird.velocity",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "numpy.atleast_2d",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "bird.brain.predict",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "bird.brain",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "bird.set_fitness",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "bird.fitness",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "operator.itemgetter",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "bird.Bird",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "bird.brain",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "bird.flapping",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "bird.set_flapping",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "bird.flap",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "pygame.display.flip",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 258,
"usage_type": "attribute"
}
] |
39434165
|
import json
U = 'UP'
D = 'DOWN'
L = 'LEFT'
R = 'RIGHT'
with open('configuration.json', 'r') as configuration:
data = json.load(configuration)
START = data['start']
END = data['end']
N = int(pow(len(START), 1/2)) if len(START) == len(END) else None
node_goal = None
class PuzzleState:
def __init__(self, state, parent, move) -> None:
self.state = state
self.parent = parent
self.move = move
self.string = ' '.join(str(value) for value in state) if state else None
# successor function
def perform_movement(state:list, move:str) -> list:
new_state = state[:]
allowed = []
index = new_state.index(0)
row = index // N
column = index % N
if row > 0:
allowed.append(U)
if row < N-1:
allowed.append(D)
if column > 0:
allowed.append(L)
if column < N-1:
allowed.append(R)
if move not in allowed:
return None
elif move == U:
new_state[index], new_state[index-N] = new_state[index-N], new_state[index]
elif move == D:
new_state[index], new_state[index+N] = new_state[index+N], new_state[index]
elif move == L:
new_state[index], new_state[index-1] = new_state[index-1], new_state[index]
elif move == R:
new_state[index], new_state[index+1] = new_state[index+1], new_state[index]
return new_state
def child_nodes(node:PuzzleState) -> list:
children = []
children.append(PuzzleState(perform_movement(node.state, U), node, U))
children.append(PuzzleState(perform_movement(node.state, D), node, D))
children.append(PuzzleState(perform_movement(node.state, L), node, L))
children.append(PuzzleState(perform_movement(node.state, R), node, R))
paths = []
for child in children:
if child.state != None:
paths.append(child)
return paths
# method
def searching_algorithm() -> None:
global node_goal
frontier = [PuzzleState(START, None, None)]
visited = set()
while frontier:
node = frontier.pop(0)
visited.add(node.string)
# goal test
if node.state == END:
node_goal = node
return None
paths = child_nodes(node)
for path in paths:
if path.string not in visited:
frontier.append(path)
visited.add(path.string)
searching_algorithm()
# path cost
movements = []
while node_goal.state != START:
movements.insert(0, node_goal.move)
node_goal = node_goal.parent
summary = {}
summary['Method'] = 'IDS - Iterative Deepening Search'
summary['Start'] = START
summary['End'] = END
summary['Movements'] = movements
summary['Length'] = len(movements)
with open('report.json', 'w') as report:
json.dump(summary, report, indent=4)
| null |
puzzle/iterative_deepening_search.py
|
iterative_deepening_search.py
|
py
| 2,810 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 119,
"usage_type": "call"
}
] |
238679679
|
from django.db import models
from users.models import Person
from ticker.models import ticker, Sector, Industry
from django.db.models import Q, Sum
from decimal import Decimal
from resources.views import StockPriceReturner
import datetime
class Portfolio(models.Model):
Name=models.CharField(max_length=512, blank=False)
Owner=models.ForeignKey(Person, null=False)
IsPublic=models.BooleanField()
DateCreated=models.DateTimeField(auto_now=True)
LastModifiedTimeStamp=models.DateTimeField(auto_now=True)
#check if user is allowed to view the portfolio
def SecurePortfolio(self, currnet_user, current_portfolio):
try:
owner=Person.objects.get(userid=current_user)
except Exception:
owner=None
if current_portfolio.IsPublic==True:
return True
elif current_portfolio.IsPublic!=True and current_portfolio.Owner==owner:
return True
elif current_portfolio.IsPublic!=True and current_portfolio.Owner!=owner:
return False
#get all portfolios that user is allowed to view
def AllowedView(self, current_user):
try:
owner=Person.objects.get(userid=current_user)
except Exception:
owner=None
if owner==None:
return Portfolio.objects.filter(IsPublic=True)
else:
return Portfolio.objects.filter(Q(Owner=owner)|Q(IsPublic=True))
#new portfolio
def NewPort(self, PortfolioName, publicfield, current_user):
new_portfolio=Portfolio(Name=PortfolioName,
Owner=current_user,
IsPublic=publicfield)
new_portfolio.save()
return new_portfolio
#portfolio name validation
def NameValidation(self, PortfolioName, current_user, mode):
if mode=="new":
if len(Portfolio.objects.filter(Name=PortfolioName, Owner=current_user)) >0:
warning="The Portfoilio with the same name already exists for you"
return warning
elif mode=="update":
if len(Portfolio.objects.filter(Name=PortfolioName, Owner=current_user)) >0:
warning="The Portfoilio with the same name already exists for you"
return warning
else:
return
#updating portfolio information
def update(self, PortfolioName, publicfield, current_portfolio):
updated_port=current_portfolio
updated_port.Name=PortfolioName
updated_port.IsPublic=publicfield
updated_port.save()
return updated_port
#measure portfolio's diversification
def PortfolioComponents(self, PortfolioName):
positions=Position.objects.filter(ComponentsOf=PortfolioName)
components=ticker.objects.filter(id__in=positions.values('Ticker')).values('Industry')
portfolio_industries=Industry.objects.filter(id__in=ticker.objects.filter(
id__in=positions.values('Ticker')).values('Industry')).order_by('ShortName')
IndustryWeight=[{'Industry':str(i.ShortName),
'IndustryValue':0,
'Weight':0}
for i in portfolio_industries]
TotalValue=0
for i in positions:
#get valuation of position
[PurchaseV, LiquidationV, RemainingShares]=Transaction().WeightedAverage(i, '')
Quote=ticker().GetQuoteInfo(i.Ticker.Name)
for j in IndustryWeight:
if str(i.Ticker.Industry.ShortName)==j['Industry']:
j['IndustryValue']+=Decimal(Quote.Price,3)*RemainingShares
break
TotalValue+=Decimal(Quote.Price,3)*RemainingShares
for i in IndustryWeight:
i['Weight']=round(100*i['IndustryValue']/TotalValue,2)
return TotalValue, IndustryWeight
def Performance(self, positions):
BookValue=0
FaceValue=0
for i in positions:
[PurchaseValue, LiquidationValue, Float]=Transaction().WeightedAverage(i, '')
BookValue+=PurchaseValue
if Float==0:
FaceValue+=LiquidationValue
else:
FaceValue+=LiquidationValue+Float*i.Ticker.Price
Performance=round(100*(FaceValue/BookValue-1),3)
return Performance
"""
#measure portfolio's performance
def GetPortfolioPerformance(self, port, startdate, enddate):
if startdate==enddate and startdate=='historical':
#get porfolio total performance
#group positions by ticker information
pass
if enddate=='':
enddate=datetime.datetime.now().today()
if startdate=='':
startdate=datetime.datetime.now().today()+ datetime.timedelta(years=-1)
BookValue=0
CurrentValue=0
RemainingShares=0
buy=TransactionType.objects.get(Type="Buy")
sell=TransactionType.objects.get(Type="Sell")
for i in Transaction.objects.filter(Position__in=Position.objects.filter(ComponentsOf=port)):
if i.Type==buy:
BookValue+=i.Shares*i.Price
RemainingShares+=i.Shares
else:
CurrentValue+=i.Shares*i.Price
RemainingShares-=i.Shares
#aggregate(TotalValue=Sum('Shares*Price'))
return BookValue
"""
class Position(models.Model):
Ticker=models.ForeignKey(ticker)
ComponentsOf=models.ForeignKey(Portfolio)
EnterTime=models.DateField(auto_now=False)
ExitTime=models.DateField(auto_now=False, null=True, blank=True)
def __unicode__(self):
return self.ComponentsOf.Name + "--"+ self.Ticker.Name
def PositionValidator(self, tic, port, date):
#exists already
if len(Position.objects.filter(Ticker=tic,ComponentsOf=port, EnterTime__lte=date, ExitTime__isnull=True )) ==1:
return Position.objects.get(Ticker=tic, ComponentsOf=port)
#new position
else:
new_position=Position(Ticker=tic,
ComponentsOf=port,
EnterTime=date
)
new_position.save()
return new_position
def PositionEndChecker(self, pos):
all_transactions=Transaction.objects.filter(Position=pos)
ShareCount=0
for i in all_transactions:
if i.Type==TransactionType.objects.get(Type="Buy"):
ShareCount+=i.Shares
if i.Type==TransactionType.objects.get(Type="Sell"):
ShareCount-=i.Shares
if ShareCount==0:
pos.ExitTime=all_transactions.order_by('TransactionDate').reverse()[0].TransactionDate
pos.save()
return pos
def PositionPerformance(self, pos, dateselected):
[PurchaseValue, CurrentValue, CurrentShareCount]=Transaction().WeightedAverage(pos, dateselected)
CurrentPrice=ticker().GetQuoteInfo(pos.Ticker.Name)
Perf=100*round(((Decimal(CurrentValue)+
Decimal(CurrentShareCount)*Decimal(CurrentPrice.Price))
/Decimal(PurchaseValue))-1,3)
current_price=Perf
return current_price
class TransactionType(models.Model):
Type=models.CharField(max_length=10)
class Transaction(models.Model):
Position=models.ForeignKey(Position)
Type=models.ForeignKey(TransactionType, null=False)
Price=models.DecimalField(decimal_places=2, max_digits=10)
Shares=models.IntegerField()
TransactionDate=models.DateField(auto_now=False, null=False)
Note=models.TextField(blank=True, null=True)
def Latest5Trade(self, username):
Last5=Transaction.objects.filter(Position__in=
Position.objects.filter(ComponentsOf__in=
Portfolio.objects.filter(Owner=username))
).order_by('-TransactionDate')[:5]
return Last5
def NewTransaction(self, pos, pos_type, price, shares, date, notes):
new_transaction=Transaction(Position=pos,
Type=pos_type,
Price=price,
Shares=shares,
TransactionDate=date,
Note=notes)
new_transaction.save()
return new_transaction
def AllShares(self, pos):
CurentPosition=(int(Transaction.objects.filter(Position=pos,
Type=TransactionType.objects.get(Type="Buy")).aggregate(Sum('Shares')))
-int(Transaction.objects.filter(Position=pos,
Type=TransactionType.objects.get(Type="Sell")).aggregate(Sum('Shares')))
)
return
def WeightedAverage(self, pos, dateselected):
ShareCount=0
PurchaseValue=0
LiquidationValue=0
if dateselected == '':
for i in Transaction.objects.filter(Position=pos):
if i.Type==TransactionType.objects.get(Type="Buy"):
PurchaseValue+=Decimal(i.Shares)*i.Price
ShareCount+=i.Shares
if i.Type==TransactionType.objects.get(Type="Sell"):
LiquidationValue+=Decimal(i.Shares)*i.Price
ShareCount-=i.Shares
else:
for i in Transaction.objects.filter(Position=pos, TransactionDate__lte=dateselected):
if i.Type==TransactionType.objects.get(Type="Buy"):
PurchaseValue+=Decimal(i.Shares)*i.Price
ShareCount+=i.Shares
if i.Type==TransactionType.objects.get(Type="Sell"):
LiquidationValue+=Decimal(i.Shares)*i.Price
ShareCount-=i.Shares
return PurchaseValue, LiquidationValue, ShareCount
def RelativeValueOfTransaction(self, trade):
TotalSharesOwned=Transaction.objects.filter(Position=trade.Position).aggregate(TotalShares=Sum('Shares'))
trade.Shares=round(Decimal(trade.Shares)/Decimal(TotalSharesOwned['TotalShares'])*100,2)
return trade
| null |
portfolios/models.py
|
models.py
|
py
| 11,274 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.db.models.Model",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "users.models.Person",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "users.models.Person.objects.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "users.models.Person.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "users.models.Person",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "users.models.Person.objects.get",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "users.models.Person.objects",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "users.models.Person",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "ticker.models.ticker.objects.filter",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "ticker.models.ticker.objects",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "ticker.models.ticker",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "ticker.models.Industry.objects.filter",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "ticker.models.Industry.objects",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "ticker.models.Industry",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "ticker.models.ticker.objects.filter",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "ticker.models.ticker.objects",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "ticker.models.ticker",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "ticker.models.ticker",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "ticker.models.ticker",
"line_number": 188,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateField",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateField",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "ticker.models.ticker",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 247,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "django.db.models.DecimalField",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateField",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "django.db.models.Sum",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "django.db.models.Sum",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 318,
"usage_type": "call"
}
] |
120731195
|
from matplotlib.text import Text
import matplotlib.pyplot as plt
from matplotlib import transforms, lines
import numpy as np
import pandas as pd
from scipy import stats
def pvalAnnotation_text(x, pvalueThresholds):
singleValue = False
if type(x) is np.array:
x1 = x
else:
x1 = np.array([x])
singleValue = True
# Sort the threshold array
pvalueThresholds = pd.DataFrame(pvalueThresholds).sort_values(by=0, ascending=False).values
xAnnot = pd.Series(["" for _ in range(len(x1))])
for i in range(0, len(pvalueThresholds)):
if (i < len(pvalueThresholds)-1):
condition = (x1 <= pvalueThresholds[i][0]) & (pvalueThresholds[i+1][0] < x1)
xAnnot[condition] = pvalueThresholds[i][1]
else:
condition = x1 < pvalueThresholds[i][0]
xAnnot[condition] = pvalueThresholds[i][1]
return xAnnot if not singleValue else xAnnot.iloc[0]
def add_statistical_test_annotation(
ax, df, catPairList, xlabel=None, ylabel=None, test='Mann-Whitney', order=None,
textFormat='star', loc='inside',
pvalueThresholds=[[1e9,"ns"], [0.05,"*"], [1e-2,"**"], [1e-3,"***"], [1e-4,"****"]],
color='0.2', lineYOffsetAxesCoord=None, lineHeightAxesCoord=0.02, yTextOffsetPoints=1,
linewidth=1.5, fontsize='medium', verbose=1):
"""
"""
if loc not in ['inside', 'outside']:
raise ValueError("loc value should be inside or outside.")
if test not in ['t-test', 'Mann-Whitney']:
raise ValueError("test name incorrect.")
if verbose >= 1 and textFormat == 'star':
print("pvalue annotation legend:")
pvalueThresholds = pd.DataFrame(pvalueThresholds).sort_values(by=0, ascending=False).values
for i in range(0, len(pvalueThresholds)):
if (i < len(pvalueThresholds)-1):
print('{}: {:.2e} < p <= {:.2e}'.format(pvalueThresholds[i][1], pvalueThresholds[i+1][0], pvalueThresholds[i][0]))
else:
print('{}: p <= {:.2e}'.format(pvalueThresholds[i][1], pvalueThresholds[i][0]))
print()
yStack = []
if xlabel is None:
# Guess the xlabel based on the xaxis label text
xlabel = ax.xaxis.get_label().get_text()
if ylabel is None:
# Guess the xlabel based on the xaxis label text
ylabel = ax.yaxis.get_label().get_text()
xtickslabels = [t.get_text() for t in ax.xaxis.get_ticklabels()]
g = df.groupby(xlabel)
catValues = df[xlabel].unique()
catValues = np.array(xtickslabels)
if order is not None:
catValues = order
ylim = ax.get_ylim()
yRange = ylim[1] - ylim[0]
if loc == 'inside':
lineYOffsetAxesCoord = 0.05
elif loc == 'outside':
lineYOffsetAxesCoord = 0.03
yOffset = lineYOffsetAxesCoord*yRange
annList = []
for cat1, cat2 in catPairList:
if cat1 in catValues and cat2 in catValues:
# Get position of bars 1 and 2
x1 = np.where(catValues == cat1)[0][0]
x2 = np.where(catValues == cat2)[0][0]
cat1YMax = g[ylabel].max()[cat1]
cat2YMax = g[ylabel].max()[cat2]
cat1Values = g.get_group(cat1)[ylabel].values
cat2Values = g.get_group(cat2)[ylabel].values
testShortName = ''
if test == 'Mann-Whitney':
u_stat, pval = stats.mannwhitneyu(cat1Values, cat2Values, alternative='two-sided')
testShortName = 'M.W.W.'
if verbose >= 2: print ("{} v.s. {}: MWW RankSum P_val={:.3e} U_stat={:.3e}".format(cat1, cat2, pval, u_stat))
elif test == 't-test':
stat, pval = stats.ttest_ind(a=cat1Values, b=cat2Values)
testShortName = 't-test'
if verbose >= 2: print ("{} v.s. {}: t-test independent samples, P_val=={:.3e} stat={:.3e}".format(cat1, cat2, pval, stat))
if textFormat == 'full':
text = "{} p < {:.2e}".format(testShortName, pval)
elif textFormat is None:
text = ''
elif textFormat is 'star':
text = pvalAnnotation_text(pval, pvalueThresholds)
if loc == 'inside':
yRef = max(cat1YMax, cat2YMax)
elif loc == 'outside':
yRef = ylim[1]
if len(yStack) > 0:
yRef2 = max(yRef, max(yStack))
else:
yRef2 = yRef
y = yRef2 + yOffset
h = lineHeightAxesCoord*yRange
lineX, lineY = [x1, x1, x2, x2], [y, y + h, y + h, y]
if loc == 'inside':
ax.plot(lineX, lineY, lw=linewidth, c=color)
elif loc == 'outside':
line = lines.Line2D(lineX, lineY, lw=linewidth, c=color, transform=ax.transData)
line.set_clip_on(False)
ax.add_line(line)
ann = ax.annotate(text, xy=(np.mean([x1, x2]), y + h),
xytext=(0, yTextOffsetPoints), textcoords='offset points',
xycoords='data', ha='center', va='bottom', fontsize=fontsize,
clip_on=False, annotation_clip=False)
annList.append(ann)
ax.set_ylim((ylim[0], 1.1*(y + h)))
plt.draw()
bbox = ann.get_window_extent()
bbox_data = bbox.transformed(ax.transData.inverted())
yTopAnnot = bbox_data.ymax
yStack.append(yTopAnnot)
yStackMax = max(yStack)
if loc == 'inside':
ax.set_ylim((ylim[0], 1.03*yStackMax))
elif loc == 'outside':
ax.set_ylim((ylim[0], ylim[1]))
return ax
| null |
statannot.py
|
statannot.py
|
py
| 5,789 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "scipy.stats.mannwhitneyu",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "scipy.stats.ttest_ind",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "matplotlib.lines.Line2D",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "matplotlib.lines",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.draw",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 135,
"usage_type": "name"
}
] |
546866021
|
# -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
import pytest
from crime_data.resources.meta import FAMILIES
class TestCodesEndpoint:
@pytest.mark.parametrize('endpoint', FAMILIES)
def test_meta_endpoint_exists(self, testapp, endpoint):
res = testapp.get('/meta/{0}'.format(endpoint))
assert res.status_code == 200
assert 'filters' in res.json
assert res.json['filters'] == FAMILIES[endpoint].filter_columns
def test_meta_endpoint_handles_trailing_slash(self, testapp):
res = testapp.get('/meta/incidents/')
assert res.status_code == 200
| null |
tests/functional/test_meta.py
|
test_meta.py
|
py
| 655 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "crime_data.resources.meta.FAMILIES",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "crime_data.resources.meta.FAMILIES",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "pytest.mark",
"line_number": 10,
"usage_type": "attribute"
}
] |
472123909
|
# #!/usr/bin/env python
# # -*- coding: utf-8 -*-
#
# '''
# Created on 26 Jul 2019
#
# @author: Ajay
# '''
#
from pp_final import preprocessor
from sklearn.naive_bayes import BernoulliNB
pp = preprocessor("topic", "bnb")
clf = BernoulliNB()
model = clf.fit(pp.X_train, pp.y_train)
predicted_y = model.predict(pp.X_test)
i = pp.divider
for y in predicted_y:
print(pp.instance_array[i], y)
i = i + 1
# i = 0
# for sentence in test_array:
# test = count.transform([sentence]).toarray()
# print(instance_array[i], model.predict(test))
# i = i + 1
# text_data = np.array(test_array)
# bag_of_words = count.fit_transform(text_data)
# X_test = bag_of_words.toarray()
# predicted_y = model.predict(X_test[:5])
# for i, y in enumerate(predicted_y):
# print(instance_array[i], y)
##
| null |
BNB_topics.py
|
BNB_topics.py
|
py
| 839 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pp_final.preprocessor",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.BernoulliNB",
"line_number": 14,
"usage_type": "call"
}
] |
338558519
|
# 线性回归
import tensorflow as tf
import numpy as np
import random
import matplotlib.pyplot as plt
# 获得数据 / 10 只是让数据好看一点
def getBatch(batchSize,start=None):
if start==None:
start = random.randint(1, 10000)
n = np.linspace(start, start+batchSize, batchSize, endpoint=True).reshape((batchSize,1)) / 10
x = np.sin(n)
y = np.cos(n)
return x,y,n
# 增加层
def add_layer(inputs, in_size, out_size, activation_function=None, norm=False):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if norm: #归一化
fc_mean, fc_var = tf.nn.moments(Wx_plus_b, axes=[0])
scale = tf.Variable(tf.ones([out_size]))
shift = tf.Variable(tf.zeros([out_size]))
epsilon = 0.001
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([fc_mean, fc_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(fc_mean), tf.identity(fc_var)
mean, var = mean_var_with_update()
Wx_plus_b = tf.nn.batch_normalization(Wx_plus_b, mean, var, shift, scale, epsilon)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
# 神经网络定义
def neural_networks():
x = tf.placeholder(tf.float32, [None, 1], name='x')
y = tf.placeholder(tf.float32, [None, 1], name='y')
x_list=[]
for i in range(1,10):
x_list.append(tf.sin(x*i))
x_list.append(tf.cos(x*i))
x_list.append(tf.pow(x,i))
_x=tf.concat(x_list,axis=1)
layer = add_layer(_x, len(x_list), 128, tf.nn.relu, norm=True)
layer = add_layer(layer, 128, 256, tf.nn.relu, norm=True)
layer = add_layer(layer, 256, 512, tf.nn.relu, norm=True)
prediction = add_layer(layer, 512, 1)
cost = tf.reduce_sum(tf.square(y - prediction))
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)
return x, y, prediction, optimizer, cost
if __name__ == '__main__':
x, y, prediction, optimizer, cost = neural_networks()
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
plt.ion()
plt.show()
for i in range(100000):
batch_x, batch_y, batch_n= getBatch(200, 0)
_, loss, pred = sess.run([optimizer, cost, prediction], feed_dict={x: batch_x, y: batch_y})
if i % 50 == 0:
print(i, loss)
plt.clf()
plt.plot(batch_n, batch_y, 'r', batch_n, pred, 'b')
plt.ylim((-1.2, 1.2))
plt.draw()
plt.pause(0.1)
| null |
Linear Regression/3.1.py
|
3.1.py
|
py
| 2,770 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "random.randint",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tensorflow.random_normal",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tensorflow.zeros",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.moments",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.Variable",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tensorflow.ones",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tensorflow.zeros",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.ExponentialMovingAverage",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.control_dependencies",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tensorflow.identity",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.batch_normalization",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.sin",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "tensorflow.cos",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "tensorflow.pow",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "tensorflow.concat",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.reduce_sum",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "tensorflow.square",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.AdamOptimizer",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.Session",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.draw",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pause",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
}
] |
583990275
|
#!/usr/bin/env python
"""
Provides functionality to configure environment variables for agent.
"""
__copyright__ = '(c) Webyog, Inc'
__author__ = 'Vishal P.R'
__email__ = '[email protected]'
import os
import sys
import getopt
import re
import json
import subprocess
#add module lookup paths to sys.path so that import can find them
#we are inserting at the begining of sys.path so that we can be sure that we are importing the right module
exe_path = os.path.dirname(os.path.realpath(__file__)).rsplit('/', 1)[0]
sys.path.insert(0, exe_path + '/lib')
sys.path.insert(0, exe_path + '/src')
import version_info
from universal import Universal
from constructs import unicode, read_input, JSONfig
def usage(is_help = False):
"""
Function to show usage information
Args:
is_help: Whether to show the full help or just the command to show help
Returns:
True
"""
if is_help == False:
sys.stdout.write('Run \'%s --help\' for more information\n' % sys.argv[0])
return True
usage_info = 'Usage: %s [options]\nOptions:\n' % sys.argv[0]
usage_info += ' --eENVIRONMENT_VAR <arg>, ... ENVIRONMENT_VAR to be exported\n'
usage_info += ' -f, --file <arg>, ... File containing the environment variable configuration\n'
usage_info += ' --restart After configuring, restart the agent\n'
usage_info += ' --version Display version information\n'
usage_info += ' -h, --help Display this information\n'
sys.stdout.write(usage_info)
return True
def read_env_vars(f, env_vars):
"""
Function to show usage information
Args:
f: File descriptor containing the environment variable details to configure
env_vars: dict representing the env vars to update
Returns:
The number of env variables parsed
"""
line_regex = re.compile(r'^[# \t]+(SL\_[A-Z\_0-9]+)([ \t]*:[ \t]*([^ \t\n;]+[^\n;]*)(;[ \t]*((default[ \t]+([^ \t\n]+[^\n]*))|optional))?)?[ \t]*$')
env_vars_count = 0
#there can be multiple lines, so loop through all of them
for line in f:
match = line_regex.match(line) #extract the variable details
#not a config
if not match:
continue
match, value = match.groups(), ''
required = False if match[4] == 'optional' else True
default = env_vars.get(match[0], match[6].strip() if match[6] else '' )
prompt = '%s%s: ' % (match[2].strip() if match[2] else match[0], ' (%s)' % match[4] if not required else '')
env_vars_count += 1
#read the value from the terminal
while not value:
value = read_input(prompt, default) #use the default value if nothing is read from the terminal
#for a required variable continue as long as it available
if not required:
break
if value:
env_vars[match[0]] = value
else: #discard any unset variables
try:
del env_vars[match[0]] #it could be possible the key does not even exists
except:
pass
return env_vars_count
try:
#try to read the environment variables from sealion config
env_vars = Universal().config.sealion.get_dict(('env', {}))['env']
except:
env_vars = {}
try:
env_vars_count, env_vars_regex, restart_agent = 0, re.compile(r'^--(e[a-zA-Z\_][a-zA-Z0-9\_]*)=?$'), False
long_options = ['file=', 'restart', 'version', 'help']
#add environment variables specified in the format --eENVIRONMENT_VAR
#we identify them and add as long options
for arg in sys.argv[1:]:
match = env_vars_regex.match(arg)
if not match:
continue
long_options.append(match.groups()[0] + '=') #these variables need a value; hence append =
long_options = dict(zip(long_options, [True] * len(long_options))).keys() #extract out unique options only
options = getopt.getopt(sys.argv[1:], 'f:h', long_options)[0]
for option, arg in options:
if option[:3] == '--e': #environment variable
env_vars[option[3:]] = arg
env_vars_count += 1
elif option in ['-f', '--file']: #environment variable description
with open(arg) as f:
env_vars_count += read_env_vars(f, env_vars)
elif option == '--restart':
restart_agent = True
elif option == '--version':
version_info.print_version() and sys.exit(0)
elif option in ['-h', '--help']:
usage(True) and sys.exit(0)
if not env_vars_count:
sys.stderr.write('Please specify the environment variables to configure\n')
usage() and sys.exit(1)
except getopt.GetoptError as e:
sys.stderr.write(unicode(e).capitalize() + '\n') #missing option value
usage() and sys.exit(1)
except (KeyboardInterrupt, EOFError):
sys.stdout.write('\n');
sys.exit(0)
except Exception as e:
sys.stderr.write('Error: ' + unicode(e) + '\n') #any other exception
sys.exit(1)
try:
#perform the action
JSONfig.perform(filename = exe_path + '/etc/config.json', action = 'set', keys = 'env', value = json.dumps(env_vars), pretty_print = True)
except KeyError as e:
sys.stderr.write('Error: unknown key ' + unicode(e) + '\n')
sys.exit(1)
except Exception as e:
sys.stderr.write('Error: ' + unicode(e) + '\n')
sys.exit(1)
try:
not restart_agent and sys.exit(0) #exit if we dont ask for restart
subprocess.call([exe_path + '/etc/init.d/sealion', 'restart'], close_fds = True)
except Exception as e:
sys.stderr.write('Failed to restart agent; %s\n' % unicode(e))
| null |
code/bin/configenv.py
|
configenv.py
|
py
| 5,867 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.dirname",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sys.path.insert",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "constructs.read_input",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "universal.Universal",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "getopt.getopt",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "version_info.print_version",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "getopt.GetoptError",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "constructs.unicode",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "constructs.unicode",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "constructs.JSONfig.perform",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "constructs.JSONfig",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "constructs.unicode",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "constructs.unicode",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "constructs.unicode",
"line_number": 163,
"usage_type": "call"
}
] |
568778118
|
import sqlite3
from employee import Employee
def insert_emp(conn, cursor, emp):
with conn:
cursor.execute(
"INSERT INTO employees VALUES (:first, :last, :pay)",
{'first': emp.first, 'last': emp.last, 'pay': emp.pay}
)
def get_emps_by_name(cursor, lastname):
cursor.execute("SELECT * FROM employees WHERE last=:last", {'last': lastname})
return cursor.fetchall()
def update_pay(conn, cursor, emp, pay):
with conn:
cursor.execute(
'''UPDATE employees SET pay = :pay
WHERE first = :first AND last = :last''',
{'first': emp.first, 'last': emp.last, 'pay': pay}
)
def remove_emp(conn, cursor, emp):
with conn:
cursor.execute(
"DELETE from employees WHERE first = :first AND last = :last",
{'first': emp.first, 'last': emp.last}
)
def main():
conn = sqlite3.connect('employee.db')
cursor = conn.cursor()
try:
cursor.execute(
'''CREATE TABLE employees (
first text,
last text,
pay integer
)'''
)
conn.commit()
except:
pass
emps = [Employee('John', 'Doe', 30000), Employee('Jane', 'Doe', 32000)]
for emp in emps:
insert_emp(conn, cursor, emp)
if emp.last == 'Doe':
update_pay(conn, cursor, emp, 50000)
print(get_emps_by_name(cursor, 'Doe'))
for emp in emps:
remove_emp(conn, cursor, emp)
conn.close()
if __name__ == '__main__':
main()
| null |
main.py
|
main.py
|
py
| 1,575 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlite3.connect",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "employee.Employee",
"line_number": 49,
"usage_type": "call"
}
] |
330572394
|
#!/usr/bin/env python
# coding: utf-8
#Testing chipping part#!/usr/bin/env python
# coding: utf-8
from keras.models import load_model
import os
import keras
from keras.layers import Dense, Input, Reshape, Flatten, Dropout
from keras import models
import matplotlib.pyplot as plt
import numpy as np
from keras.layers import (Conv2D, Input, Reshape, Dropout, LeakyReLU, UpSampling2D, MaxPooling2D, Flatten, Dense, BatchNormalization, Dropout)
import tensorflow as tf
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from PIL import Image, ImageDraw
import skimage.filters as filters
import io
import glob
from tqdm import tqdm
import logging
import argparse
import os
import json
import wv_util as wv
#import tfr_util as tfr
#import aug_util as aug
import csv
import random
import six
import cv2
import glob
import mahotas as mh
#from chainer.dataset import dataset_mixin
from tqdm import tqdm
import time
def get_labels(fname="xView_train.geojson"):
with open(fname) as f:
data = json.load(f)
coords = np.zeros((len(data['features']),4))
chips = np.zeros((len(data['features'])),dtype="object")
classes = np.zeros((len(data['features'])))
for i in range(len(data['features'])):
if data['features'][i]['properties']['bounds_imcoords'] != []:
b_id = data['features'][i]['properties']['image_id']
val = np.array([int(num) for num in data['features'][i]['properties']['bounds_imcoords'].split(",")])
chips[i] = b_id
classes[i] = data['features'][i]['properties']['type_id']
coords[i] = val
else:
chips[i] = 'None'
return coords, chips, classes
def get_chips():
print("\nStarting to chip the xView dataset.\n")
thechips = []
theclasses = []
thecoords = []
thecoords, thechips, theclasses = get_labels()
per = 1
X_data = []
files2 = glob.glob ("/root/Desktop/seniordesign/chippedimages/*.tif")# Change this to ur own directory
files = glob.glob ("/root/Desktop/seniordesign/testing_images/*.tif")# Change this to ur own directory
for myFile in files:
t = 0
print('\nChipping image at this location: ', myFile)
image = cv2.imread (myFile)
#X_data.append (image) # https://stackoverflow.com/questions/37747021/create-numpy-array-of-images
chipped_img, chipped_box, chipped_classes = wv.chip_image(img = image, coords = thecoords, classes=theclasses, shape=(256,256))
numberOfChips = chipped_img.shape[0]
print("This image created %d chips." % chipped_img.shape[0])
while t < numberOfChips:
#print(t + 1)
os.chdir(r"/root/Desktop/seniordesign/chippedimages") # Change this to ur own directory
mh.imsave('%d.tif' % per, chipped_img[t])
os.chdir(r"/root/Desktop/seniordesign") # Change this to ur own directory
t += 1
per += 1
os.chdir(r"/root/Desktop/seniordesign/chippedimages") # Change this to ur own directory
for myFile in files2:
chipimage = mh.imread(myFile)
X_data.append(chipimage)
npchipped = np.array([np.array(Image.open(myFile)) for myFile in files2]) ### This puts all of the images in to one nparray, ( i think the block of code above does the same thing)
# https://stackoverflow.com/questions/39195113/how-to-load-multiple-images-in-a-numpy-array
npchipped2 = np.array(X_data) # nchipped2 is the numpy array, I use it down below
#npchipped and npchipped are the same
return npchipped2, numberOfChips
def chip_images():
print("\nStarting to chip the xView dataset.\n")
thechips = []
theclasses = []
thecoords = []
thecoords, thechips, theclasses = get_labels()
per = 1
X_data = []
files2 = glob.glob ("/root/Desktop/seniordesign/chippedimages/*.tif")# Change this to ur own directory
files = glob.glob ("/root/Desktop/seniordesign/testing_images/*.tif")# Change this to ur own directory
for myFile in files:
t = 0
print('\nChipping image at this location: ', myFile)
image = cv2.imread (myFile)
#X_data.append (image) # https://stackoverflow.com/questions/37747021/create-numpy-array-of-images
chipped_img, chipped_box, chipped_classes = wv.chip_image(img = image, coords = thecoords, classes=theclasses, shape=(256,256))
numberOfChips = chipped_img.shape[0]
print("This image created %d chips." % chipped_img.shape[0])
while t < numberOfChips:
#print(t + 1)
os.chdir(r"/root/Desktop/seniordesign/chippedimages") # Change this to ur own directory
mh.imsave('%d.tif' % per, chipped_img[t])
os.chdir(r"/root/Desktop/seniordesign") # Change this to ur own directory
t += 1
per += 1
def get_numpy():
os.chdir(r"/root/Desktop/seniordesign/chippedimages")
files2 = glob.glob ("/root/Desktop/seniordesign/chippedimages/*.tif")# # Change this to ur own directory
image_data = []
for myFile in files2:
chipimage = mh.imread(myFile)
image_data.append(chipimage)
npchipped = np.array([np.array(Image.open(myFile)) for myFile in files2]) ### This puts all of the images in to one nparray, ( i think the block of code above does the same thing)
# https://stackoverflow.com/questions/39195113/how-to-load-multiple-images-in-a-numpy-array
npchipped2 = np.array(image_data)
numberOfChips = len(image_data)
return npchipped2, numberOfChips
def get_array():
print("Getting the numpy array...")
#chip_images()
#print('\n\nChipping comeplete.\n')
| null |
working_gan/team_gmu_chip.py
|
team_gmu_chip.py
|
py
| 6,064 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "wv_util.chip_image",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "mahotas.imsave",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "mahotas.imread",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "wv_util.chip_image",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "mahotas.imsave",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "mahotas.imread",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 140,
"usage_type": "call"
}
] |
67449781
|
from py_komoran3.komoran3py import KomoranPy
ko = KomoranPy()
ko.set_user_dictionary('./py_komoran3/user_dictionary.txt')
# 채팅로그 불러오는 함수
def openlog( bj_code, filename, platform = 'twitch_live_chat') :
"""
'../../data/{0}/{1}/{2}'.format(platform, bj_code, filename)
platform : 플랫폼 폴더명 (기본 : twitch_live_chat)
bj_code : bj 폴더명
filename : 로그 파일이름
"""
chatlog = '../../data/{0}/{1}/{2}'.format(platform, bj_code, filename)
with open(chatlog) as f :
chat = f.readlines()
return chat
# Openlog or 직접 불러온 채팅 로그 넣으면 정규식으로 id, 날짜, 채팅 내용 구분해줌
def preprocessing_chat(chat, num = 3) :
"""
chat : chat에는 채팅로그 넣기
num : 그룹 선택 / 안주면 기본값 3 (0 = 전체, 1 = 시간(방송기준) 2 = ID 3 = 채팅)
"""
import re
text = chat
my = re.compile('\[([0-9:]*)\] <(\S*[ ]*\S*)> (\w.*)')
word_list = []
for line in text:
mytext = my.search(line)
if mytext :
word_list.append(mytext.group(num))
return word_list
# 전처리 된 채팅 로그 넣으면 형태소 분석 돌려서 형태소 리스트로 반환
def tokenize(word_list) :
"""
word_list : 전처리된 리스트(prepro_chat함수의 리턴값 넣기)
"""
tokens_ko = []
for wd in word_list :
for i in range(len(ko.pos(wd))) :
tokens_ko.append(ko.pos(wd)[i][0])
return tokens_ko
# 형태소 리스트 넣으면 빈도분석 가능한 형태 전환 시킴
def nltk_text(tokens_ko) :
"""
tokens_ko : tokenize 함수 리턴값
"""
import nltk
tokens_ko_nltk = nltk.Text(tokens_ko)
return tokens_ko_nltk
# Nltk_text 리턴값 넣으면, 상위 단어 리스트 num만큼 리턴
def top_words(tokens_ko_nltk, num = 100) :
"""
tokens_ko_nltk : nltk_text 함수 리턴값
num : 상위 단어 리스트 num 만큼 리턴 / 기본값 100
"""
data= tokens_ko_nltk.vocab().most_common(num)
return data
# 형태소 분석하는데, num 값 이상의 글자 크기만 리턴해줌
def tokenize_over(tokens_ko, num = 2) :
"""
num : 원하는 글자 크기 / 기본값 : 2 (num보다 큰 수만 출력)
"""
tokens_ko2 = []
for wd in tokens_ko :
if len(wd) >= num:
tokens_ko2.append(wd)
return tokens_ko2
# 분석된 데이터 pickle를 이용해서 파일로 저장
def pickle_data(name, bj_code, data) :
"""
name : 파일이름 지정
data : data는 저장할 data
"""
import pickle
with open('./top_words/{0}/{1}.pickle'.format(bj_code, name), 'wb') as f :
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
# 폴더 내 파일을 리스트로 리턴해주는 함수
def call_file_list(path) :
"""
path : 파일 경로
"""
from os import listdir
path_dir = path
file_list = listdir(path_dir)
return file_list
# 디렉토리 체크해서 디렉토리 없으면 디렉토리 만들어줌
def mk_dir(dirpath) :
"""
dirpath : 체크할 폴더의 상대경로
"""
import os
dirname = dirpath
if not os.path.isdir(dirname):
os.mkdir(dirname)
if __name__ == '__main__' :
try :
bj_list = call_file_list('../../data/twitch_live_chat')
for bj in bj_list :
file_list = call_file_list('../../data/twitch_live_chat/%s'%bj)
for file in file_list :
log = openlog(bj, file)
print('%s로그 불러오기 완료' %file)
word_list = preprocessing_chat(log)
print('%s로그 전처리 완료'%file)
tokens_ko = tokenize(word_list)
print('%s로그 토큰화 완료'%file)
tokens_ko2 = tokenize_over(tokens_ko)
print('2글자 이상 추출')
tokens_ko_nltk = nltk_text(tokens_ko)
print('%s로그 자연어처리 완료'%file)
tokens_ko_nltk2 = nltk_text(tokens_ko2)
print('%s로그 2글자 이상자연어처리 완료'%file)
top_data = top_words(tokens_ko_nltk)
print('%s상위 단어 추출 완료'%file)
top_data2 = top_words(tokens_ko_nltk2)
print('%s상위 2단어이상 추출 완료'%file)
mk_dir('./top_words/{}'.format(bj))
pickle_data('%s기본형태소'%file[:-4],bj,top_data)
print('기본형태소 파일저장')
pickle_data('%s2글자이상형태소'%file[:-4],bj,top_data2)
print('2글자이상형태소 파일저장')
except Exception as e :
print('stopped due to ', e)
print('$$$$$$$$$$$ ALL DONE $$$$$$$$$$$$$$$')
| null |
lib/analysis/chat_wordcount.py
|
chat_wordcount.py
|
py
| 4,932 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "py_komoran3.komoran3py.KomoranPy",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "nltk.Text",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "pickle.HIGHEST_PROTOCOL",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 102,
"usage_type": "call"
}
] |
139187466
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from typing import Optional
from PyQt5.QtCore import QCoreApplication, pyqtSlot, Qt
from PyQt5.QtWidgets import QApplication, QComboBox
from window import Window, Dialog
class MainWindow(Window):
def __init__(self):
super().__init__("../MainWindow.ui")
self.dialog: Optional[MyDialog] = None
mnu = self.actionOptions
mnu.setShortcut("Ctrl+O")
mnu.setStatusTip("Show options")
mnu.triggered.connect(self.show_options)
sb = self.statusBar()
sb.showMessage("Hello Statusbar!")
cb: QComboBox = self.comboBox
cb.addItem("A", 100)
cb.addItem("B", 200)
cb.addItem("C", 300)
self.register_widget(self.dockWidget)
self.register_widget(self.lineEdit)
self.register_widget(self.checkBox, changefunc=self.toggled_dockcheckbox)
self.register_widget(self.comboBox)
self.register_widget(self.dateTimeEdit)
def toggled_dockcheckbox(self, new_state):
if new_state:
self.addDockWidget(Qt.LeftDockWidgetArea, self.dockWidget)
self.dockWidget.show()
else:
self.removeDockWidget(self.dockWidget)
def show_options(self):
self.open_dialog()
@pyqtSlot()
def on_pushButton_clicked(self):
self.open_dialog()
@pyqtSlot()
def on_pushButtonModal_clicked(self):
self.open_dialog(True)
def open_dialog(self, modal: bool = False):
if self.dialog is None:
self.dialog = MyDialog()
self.dialog.show(modal)
def closeEvent(self, e):
super().closeEvent(e)
e.accept()
QCoreApplication.exit()
class MyDialog(Dialog):
def __init__(self):
super(MyDialog, self).__init__("../Dialog.ui")
def closeEvent(self, e):
super().closeEvent(e)
e.accept()
if __name__ == "__main__":
QCoreApplication.setOrganizationName("MyCompany")
QCoreApplication.setApplicationName("MyApp")
QCoreApplication.setOrganizationDomain("MyCompany.example.com")
qapp = QApplication(sys.argv)
root = MainWindow()
root.show()
ret = qapp.exec_()
sys.exit(ret)
| null |
PyQt5/main.py
|
main.py
|
py
| 2,228 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "window.Window",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QComboBox",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.LeftDockWidgetArea",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QCoreApplication.exit",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QCoreApplication",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "window.Dialog",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QCoreApplication.setOrganizationName",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QCoreApplication",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QCoreApplication.setApplicationName",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QCoreApplication",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QCoreApplication.setOrganizationDomain",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QCoreApplication",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 85,
"usage_type": "call"
}
] |
471615803
|
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty,\
ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from kivy.uix.button import Button
from kivy.core.window import Window
from kivy.uix.label import Label
import sys
import random
#declaring the pong paddle class
class PongPaddle(Widget):
insectLives = NumericProperty(20) #Set the lives of the insects to x number
lives = NumericProperty(3) #Set the lives of the player to x number
def bounce_ball(self, ball):
#if the ball colides with the pong paddle then bounce back with increased speed,if it hits the top or bottom change angle of rebound.
if self.collide_widget(ball):
vx, vy = ball.velocity
offset = (ball.center_y - self.center_y) / (self.height / 2)
bounced = Vector(-1 * vx, vy)
vel = bounced * 1.2 #times the ball by 1.2 when bounced
ball.velocity = vel.x, vel.y + offset
#declaring the pong ball class
class PongBall(Widget):
velocity_x = NumericProperty(0) #velocity x is horizontal speed.
velocity_y = NumericProperty(0) #vlocity y is vertical speed.
velocity = ReferenceListProperty(velocity_x, velocity_y)
def move(self):
self.pos = Vector(*self.velocity) + self.pos
#declaring the pong game class
class PongGame(Widget):
ball = ObjectProperty(None)
player2 = ObjectProperty(None)
bee = ObjectProperty(None)
def serve_ball(self, vel=(4, 0)): #This is how the ball resets and serves the ball after.
self.ball.center = self.center #serves ball from center.
self.ball.velocity = vel
def update(self, dt):
self.ball.move()
#bounce of puppet
self.player2.bounce_ball(self.ball)
#bounce ball off bottom or top
if (self.ball.y < self.y) or (self.ball.top > self.top):
self.ball.velocity_y *= -1
#went of to a bounces back left side
if (self.ball.x < self.x) or (self.ball.width > self.width):
self.ball.velocity_x *= -1
self.player1.insectLives -= 1
#if the player misses the ball he will lose a life and it will serve the ball----losing lifes
if self.ball.x > self.width:
self.player2.lives -= 1
self.serve_ball(vel=(+4, 0))
#Ends game by putting the ball stationary when the player loses all their lives----loses all player lifes
if self.player2.lives == 0 :
self.serve_ball(vel=(+0, 0))
#Ends game by putting the ball stationary when the player hits the left side 20 times.----wins the game
if self.player1.insectLives == 0 :
self.serve_ball(vel=(+0, 0))
def on_touch_move(self, touch):
if touch.x > self.width - self.width / 3:
self.player2.center_y = touch.y
#main
class DefendApp(App):
def build(self):
game = PongGame()
game.serve_ball()
Clock.schedule_interval(game.update, 1.0 / 60.0)
return game
if __name__ == '__main__':
DefendApp().run()
| null |
game.py
|
game.py
|
py
| 3,403 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "kivy.uix.widget.Widget",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "kivy.vector.Vector",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "kivy.uix.widget.Widget",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "kivy.properties.NumericProperty",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ReferenceListProperty",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "kivy.vector.Vector",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "kivy.uix.widget.Widget",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "kivy.properties.ObjectProperty",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "kivy.app.App",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "kivy.clock.Clock.schedule_interval",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock",
"line_number": 96,
"usage_type": "name"
}
] |
98837974
|
from __future__ import absolute_import
import io
import pandas as pd
from dask.bytes import open_files, read_bytes
import dask
from ..utils import insert_meta_param_description, make_meta
def to_json(
df,
url_path,
orient="records",
lines=None,
storage_options=None,
compute=True,
encoding="utf-8",
errors="strict",
compression=None,
**kwargs
):
"""Write dataframe into JSON text files
This utilises ``pandas.DataFrame.to_json()``, and most parameters are
passed through - see its docstring.
Differences: orient is 'records' by default, with lines=True; this
produces the kind of JSON output that is most common in big-data
applications, and which can be chunked when reading (see ``read_json()``).
Parameters
----------
df: dask.DataFrame
Data to save
url_path: str, list of str
Location to write to. If a string, and there are more than one
partitions in df, should include a glob character to expand into a
set of file names, or provide a ``name_function=`` parameter.
Supports protocol specifications such as ``"s3://"``.
encoding, errors:
The text encoding to implement, e.g., "utf-8" and how to respond
to errors in the conversion (see ``str.encode()``).
orient, lines, kwargs
passed to pandas; if not specified, lines=True when orient='records',
False otherwise.
storage_options: dict
Passed to backend file-system implementation
compute: bool
If true, immediately executes. If False, returns a set of delayed
objects, which can be computed at a later time.
encoding, errors:
Text conversion, ``see str.encode()``
compression : string or None
String like 'gzip' or 'xz'.
"""
if lines is None:
lines = orient == "records"
if orient != "records" and lines:
raise ValueError(
"Line-delimited JSON is only available with" 'orient="records".'
)
kwargs["orient"] = orient
kwargs["lines"] = lines and orient == "records"
outfiles = open_files(
url_path,
"wt",
encoding=encoding,
errors=errors,
name_function=kwargs.pop("name_function", None),
num=df.npartitions,
compression=compression,
**(storage_options or {})
)
parts = [
dask.delayed(write_json_partition)(d, outfile, kwargs)
for outfile, d in zip(outfiles, df.to_delayed())
]
if compute:
dask.compute(parts)
return [f.path for f in outfiles]
else:
return parts
def write_json_partition(df, openfile, kwargs):
with openfile as f:
df.to_json(f, **kwargs)
@insert_meta_param_description
def read_json(
url_path,
orient="records",
lines=None,
storage_options=None,
blocksize=None,
sample=2 ** 20,
encoding="utf-8",
errors="strict",
compression="infer",
meta=None,
engine=pd.read_json,
**kwargs
):
"""Create a dataframe from a set of JSON files
This utilises ``pandas.read_json()``, and most parameters are
passed through - see its docstring.
Differences: orient is 'records' by default, with lines=True; this
is appropriate for line-delimited "JSON-lines" data, the kind of JSON output
that is most common in big-data scenarios, and which can be chunked when
reading (see ``read_json()``). All other options require blocksize=None,
i.e., one partition per input file.
Parameters
----------
url_path: str, list of str
Location to read from. If a string, can include a glob character to
find a set of file names.
Supports protocol specifications such as ``"s3://"``.
encoding, errors:
The text encoding to implement, e.g., "utf-8" and how to respond
to errors in the conversion (see ``str.encode()``).
orient, lines, kwargs
passed to pandas; if not specified, lines=True when orient='records',
False otherwise.
storage_options: dict
Passed to backend file-system implementation
blocksize: None or int
If None, files are not blocked, and you get one partition per input
file. If int, which can only be used for line-delimited JSON files,
each partition will be approximately this size in bytes, to the nearest
newline character.
sample: int
Number of bytes to pre-load, to provide an empty dataframe structure
to any blocks wihout data. Only relevant is using blocksize.
encoding, errors:
Text conversion, ``see bytes.decode()``
compression : string or None
String like 'gzip' or 'xz'.
engine : function object, default ``pd.read_json``
The underlying function that dask will use to read JSON files. By
default, this will be the pandas JSON reader (``pd.read_json``).
$META
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_json('myfile.1.json') # doctest: +SKIP
Load multiple files
>>> dd.read_json('myfile.*.json') # doctest: +SKIP
>>> dd.read_json(['myfile.1.json', 'myfile.2.json']) # doctest: +SKIP
Load large line-delimited JSON files using partitions of approx
256MB size
>> dd.read_json('data/file*.csv', blocksize=2**28)
"""
import dask.dataframe as dd
if lines is None:
lines = orient == "records"
if orient != "records" and lines:
raise ValueError(
"Line-delimited JSON is only available with" 'orient="records".'
)
if blocksize and (orient != "records" or not lines):
raise ValueError(
"JSON file chunking only allowed for JSON-lines"
"input (orient='records', lines=True)."
)
storage_options = storage_options or {}
if blocksize:
first, chunks = read_bytes(
url_path,
b"\n",
blocksize=blocksize,
sample=sample,
compression=compression,
**storage_options
)
chunks = list(dask.core.flatten(chunks))
if meta is None:
meta = read_json_chunk(first, encoding, errors, engine, kwargs)
meta = make_meta(meta)
parts = [
dask.delayed(read_json_chunk)(
chunk, encoding, errors, engine, kwargs, meta=meta
)
for chunk in chunks
]
return dd.from_delayed(parts, meta=meta)
else:
files = open_files(
url_path,
"rt",
encoding=encoding,
errors=errors,
compression=compression,
**storage_options
)
parts = [
dask.delayed(read_json_file)(f, orient, lines, engine, kwargs)
for f in files
]
return dd.from_delayed(parts, meta=meta)
def read_json_chunk(chunk, encoding, errors, engine, kwargs, meta=None):
s = io.StringIO(chunk.decode(encoding, errors))
s.seek(0)
df = engine(s, orient="records", lines=True, **kwargs)
if meta is not None and df.empty:
return meta
else:
return df
def read_json_file(f, orient, lines, engine, kwargs):
with f as f:
return engine(f, orient=orient, lines=lines, **kwargs)
| null |
downloadable-site-packages/dask/dataframe/io/json.py
|
json.py
|
py
| 7,327 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "dask.bytes.open_files",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "dask.delayed",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "dask.compute",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pandas.read_json",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "dask.bytes.read_bytes",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "dask.core.flatten",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "dask.core",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "utils.make_meta",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "dask.delayed",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "dask.dataframe.from_delayed",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "dask.dataframe",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "dask.bytes.open_files",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "dask.delayed",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "dask.dataframe.from_delayed",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "dask.dataframe",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "utils.insert_meta_param_description",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "io.StringIO",
"line_number": 219,
"usage_type": "call"
}
] |
395430693
|
import boto3
file=open(r"C:\Users\vikash.kumar67\Desktop\instance.txt","r")
session =boto3.Session(profile_name='default')
ec2=session.client(service_name='ec2')
ssm=session.client(service_name='ssm')
for each in file:
#ec2.stop_instances(InstanceIds=each)
print(each)
#each=str(each).strip('\n')
response=ssm.send_command(InstanceIds=[each],
DocumentName = 'AWS-RunPowerShellScript',
Parameters = {"commands": ["date;ipconfig"]} )
print(response)
| null |
stopping services.py
|
stopping services.py
|
py
| 501 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "boto3.Session",
"line_number": 5,
"usage_type": "call"
}
] |
453090071
|
from django import forms
from django.utils.safestring import mark_safe
import re
import string
from stars.apps.credits.models import (CreditSet,
Category,
Subcategory,
Credit,
DocumentationField)
TYPE_CHOICES = (
("", "-------"),
('institution_type', 'Institution Type'),
('rating__name', 'STARS Rating'),
)
EMPTY_CHOICES = (
("", "-------"),
)
class CharacteristicFilterForm(forms.Form):
type = forms.CharField(required=False,
widget=forms.widgets.Select(choices=EMPTY_CHOICES))
item = forms.CharField(required=False,
widget=forms.widgets.Select(choices=EMPTY_CHOICES))
def clean(self):
"""
This form can be empty, but if one is filled out then both must be
"""
cleaned_data = self.cleaned_data
type = cleaned_data.get("type")
item = cleaned_data.get("item")
if type and not item:
self._errors["item"] = self.error_class(
[u"This field is required"])
del cleaned_data["item"]
return cleaned_data
def __init__(self, available_filters, **kwargs):
super(CharacteristicFilterForm, self).__init__(**kwargs)
choices = [("", "-------")]
for f in available_filters:
choices.append((f.key, f.title))
self.fields['type'].widget = forms.widgets.Select(
choices=choices, attrs={'onchange': 'applyLookup(this);',})
class DelCharacteristicFilterForm(forms.Form):
delete = forms.BooleanField(required=False, widget=forms.HiddenInput)
def __init__(self, instance, *args, **kwargs):
self.instance = instance
super(DelCharacteristicFilterForm, self).__init__(*args, **kwargs)
class CreditSetElementField(forms.CharField):
def to_python(self, value):
"""Normalize data to a Category, Subcategory, Credit or
CreditSet."""
# Return an empty list if no input was given.
if value and value != 'select_one':
pattern = "(\w+)_(\d+)"
m = re.match(pattern, value)
if m:
obj = m.groups()[0]
id = m.groups()[1]
if obj == "cat":
return Category.objects.get(pk=id)
if obj == "sub":
return Subcategory.objects.get(pk=id)
if obj == "crd":
return Credit.objects.get(pk=id)
if obj == "cs":
return CreditSet.objects.get(pk=id)
return None
class ScoreColumnForm(forms.Form):
col1 = CreditSetElementField(required=False)
col2 = CreditSetElementField(required=False)
col3 = CreditSetElementField(required=False)
col4 = CreditSetElementField(required=False)
def __init__(self, credit_set, *args, **kwargs):
# self.instance = instance
if 'initial' in kwargs:
initial = kwargs['initial']
new_initial = {}
count = 0
if initial:
for k, col in initial:
if isinstance(col, Category):
new_initial[k] = "cat_%d" % col.id
elif isinstance(col, Subcategory):
new_initial[k] = "sub_%d" % col.id
elif isinstance(col, Credit):
new_initial[k] = "crd_%d" % col.id
elif isinstance(col, CreditSet):
new_initial[k] = "cs_%d" % col.id
else:
new_initial[k] = "select_one"
count += 1
else:
for i in range(1, 5):
new_initial['column_%d' % i] = "select_one"
kwargs['initial'] = new_initial
super(ScoreColumnForm, self).__init__(*args, **kwargs)
choices = [("", "Select One"),
("cs_%d" % credit_set.id, "Overall Score"),
('', '')]
# disabled = []
for cat in credit_set.category_set.filter(include_in_score=True):
choices.append(("cat_%d" % cat.id, string.upper(cat.title)))
# spacer = ("cat_%d_spacer" % cat.id, "")
choices.append(('', ''))
# disabled.append(spacer)
for sub in cat.subcategory_set.all():
choices.append(("sub_%d" % sub.id,
mark_safe(" %s" % sub.title)))
# spacer = ("sub_%d_spacer" % sub.id, "")
# choices.append(spacer)
# disabled.append(spacer)
choices.append(('', ''))
for c in sub.get_tier1_credits():
choices.append(
("crd_%d" % c.id,
mark_safe(" %s" % c.title)))
t2 = sub.get_tier2_credits()
if t2:
# spacer = ("sub_%d_t2spacer" % sub.id,
# mark_safe(" -------"))
choices.append(('',
mark_safe(' -------')))
# choices.append(spacer)
# disabled.append(spacer)
for c in t2:
choices.append(
("crd_%d" % c.id,
mark_safe(" %s" % c.title)))
# spacer = ("sub_%d_spacer2" % sub.id, "")
# choices.append(spacer)
# disabled.append(spacer)
choices.append(('', ''))
w = forms.Select(choices=choices)
self.fields['col1'].widget = w
self.fields['col2'].widget = w
self.fields['col3'].widget = w
self.fields['col4'].widget = w
self.fields['col1'].label = "Column 1"
self.fields['col2'].label = "Column 2"
self.fields['col3'].label = "Column 3"
self.fields['col4'].label = "Column 4"
class ReportingFieldSelectForm(forms.Form):
reporting_field = forms.ModelChoiceField(DocumentationField,
required=False)
def __init__(self, *args, **kwargs):
super(ReportingFieldSelectForm, self).__init__(*args, **kwargs)
cs = CreditSet.objects.get(pk=2)
cs_lookup = "credit__subcategory__category__creditset"
self.fields['reporting_field'].queryset = (
DocumentationField.objects.filter(**{cs_lookup: cs}))
self.fields['reporting_field'].widget.choices = (('', '--------'),)
def clean(self):
cleaned_data = self.cleaned_data
return cleaned_data
| null |
stars/apps/institutions/data_displays/forms.py
|
forms.py
|
py
| 6,943 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.forms.Form",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.forms.widgets.Select",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.forms.widgets",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.forms.widgets.Select",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.forms.widgets",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.forms.widgets.Select",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.forms.widgets",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.forms.Form",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "django.forms.BooleanField",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "django.forms.HiddenInput",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "django.forms.CharField",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "re.match",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "stars.apps.credits.models.Category.objects.get",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "stars.apps.credits.models.Category.objects",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "stars.apps.credits.models.Category",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "stars.apps.credits.models.Subcategory.objects.get",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "stars.apps.credits.models.Subcategory.objects",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "stars.apps.credits.models.Subcategory",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "stars.apps.credits.models.Credit.objects.get",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "stars.apps.credits.models.Credit.objects",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "stars.apps.credits.models.Credit",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "stars.apps.credits.models.CreditSet.objects.get",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "stars.apps.credits.models.CreditSet.objects",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "stars.apps.credits.models.CreditSet",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "django.forms.Form",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "stars.apps.credits.models.Category",
"line_number": 108,
"usage_type": "argument"
},
{
"api_name": "stars.apps.credits.models.Subcategory",
"line_number": 110,
"usage_type": "argument"
},
{
"api_name": "stars.apps.credits.models.Credit",
"line_number": 112,
"usage_type": "argument"
},
{
"api_name": "stars.apps.credits.models.CreditSet",
"line_number": 114,
"usage_type": "argument"
},
{
"api_name": "string.upper",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "django.forms.Select",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "django.forms.Form",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelChoiceField",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "stars.apps.credits.models.DocumentationField",
"line_number": 183,
"usage_type": "argument"
},
{
"api_name": "django.forms",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "stars.apps.credits.models.CreditSet.objects.get",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "stars.apps.credits.models.CreditSet.objects",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "stars.apps.credits.models.CreditSet",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "stars.apps.credits.models.DocumentationField.objects.filter",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "stars.apps.credits.models.DocumentationField.objects",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "stars.apps.credits.models.DocumentationField",
"line_number": 193,
"usage_type": "name"
}
] |
165473543
|
# -*- coding: utf-8 -*-
"""
This module manages all pip processes.
"""
import os
import logging
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QProcess
from PyQt5.QtWidgets import QApplication
logger = logging.getLogger(__name__)
#]===========================================================================[#
#] INSTALL SELECTED PACKAGES [#==============================================[#
#]===========================================================================[#
def has_bash():
"""
Test if bash is available.
"""
process = QProcess()
process.start("which bash")
process.waitForStarted()
process.waitForFinished()
if process.exitStatus() == QProcess.NormalExit:
return bool(process.readAll())
return False
class PipManager(QObject):
"""
Manage `pip` processes.
"""
started = pyqtSignal()
finished = pyqtSignal()
failed = pyqtSignal()
textChanged = pyqtSignal(str)
def __init__(self, venv_dir, venv_name, parent=None):
super().__init__(parent)
self._venv_dir = venv_dir
self._venv_name = venv_name
self._process = QProcess(self)
self._process.setWorkingDirectory(venv_dir)
self._process.readyReadStandardOutput.connect(
self.on_ready_read_stdout
)
self._process.readyReadStandardError.connect(
self.on_ready_read_stderr
)
# started
self._process.started.connect(self.started)
# updated
self._process.stateChanged.connect(self.on_state_changed)
# finished
self._process.finished.connect(self.finished)
self._process.finished.connect(self.on_finished)
def run_pip(self, command="", options=None):
"""
Activate the virtual environment and run pip commands.
"""
if has_bash():
if options is None:
options = []
venv_path = os.path.join(self._venv_dir, self._venv_name)
pip = f"pip {command} {' '.join(options)};"
pipdeptree = f"pipdeptree {' '.join(options)};"
task = pipdeptree if command == "pipdeptree" else pip
script = (
f"source {venv_path}/bin/activate;"
f"{task}"
"deactivate;"
)
self._process.start("bash", ["-c", script])
def process_stop(self):
"""Stop the process."""
self._process.close()
@pyqtSlot(QProcess.ProcessState)
def on_state_changed(self, state):
"""Show the current process state.
"""
if state == QProcess.Starting:
#print("[PROCESS]: Started")
logger.debug("Started")
elif state == QProcess.Running:
#print("[PROCESS]: Running")
logger.debug("Running")
elif state == QProcess.NotRunning:
#print("[PROCESS]: Stopped")
logger.info("Done.")
self.textChanged.emit(
"\n\nPress [ESC] to continue..."
)
@pyqtSlot(int, QProcess.ExitStatus)
def on_finished(self, exitCode):
"""Show exit code when finished.
"""
#print(f"[PROCESS]: Exit code: {exitCode}")
logger.debug(f"Exit code: {exitCode}")
self._process.kill()
@pyqtSlot()
def on_ready_read_stdout(self):
"""Read from `stdout` and send the output to `update_status()`.
"""
message = self._process.readAllStandardOutput().data().decode().strip()
#print(f"[PIP]: {message}")
logger.debug(message)
self.textChanged.emit(message)
@pyqtSlot()
def on_ready_read_stderr(self):
"""Read from `stderr`, then kill the process.
"""
message = self._process.readAllStandardError().data().decode().strip()
#print(f"[ERROR]: {message}")
logger.error(message)
self.textChanged.emit(message)
self.failed.emit()
self._process.kill()
if __name__ == "__main__":
import sys
from wizard import ConsoleDialog
app = QApplication(sys.argv)
console = ConsoleDialog()
current_dir = os.path.dirname(os.path.realpath(__file__))
_venv_name = "testenv" # need to have a virtual env in current_dir
manager = PipManager(current_dir, _venv_name)
manager.textChanged.connect(console.update_status)
manager.started.connect(console.show)
manager.run_pip(
"freeze", [f" > {current_dir}/{_venv_name}/requirements.txt"]
)
sys.exit(app.exec_())
| null |
venvipy/manage_pip.py
|
manage_pip.py
|
py
| 4,560 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QProcess",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QProcess.NormalExit",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QProcess",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QObject",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QProcess",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QProcess.Starting",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QProcess",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QProcess.Running",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QProcess",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QProcess.NotRunning",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QProcess",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QProcess.ProcessState",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QProcess",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QProcess.ExitStatus",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QProcess",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "wizard.ConsoleDialog",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 165,
"usage_type": "call"
}
] |
68522467
|
"""eshop_grigoris URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from tools.views import set_up_database,clear_sessions
from mysite.views import *
from cart.views import remove_item_from_cart,add_to_cart
from products.views import welcome_page
urlpatterns = [
url(r'^admin/', admin.site.urls),
#site urls
url(r'^$', view=homepage),
url(r'^product/(?P<slug>[-\w]+)/$', view=product_page, name='product_page'),
url(r'^product-a/(?P<slug>[-\w]+)/(?P<dk>\d+)/$', view=product_page_with_color, name='product_page_with_color'),
url(r'^ajax-search/$', view=ajax_results, name='ajax_search'),
url(r'^page-results/$', view=page_results, name='page_results'),
url(r'^register/$', view=register_page, name='register_page'),
url(r'^costumer-page/$', view=costumer_page, name='costumer_page'),
url(r'^costumer-page-ask/$', view=costumer_ask_page, name='costumer_ask_page'),
url(r'^costumer-page-order/$', view=costumer_page_order, name='costumer_page_order'),
url(r'^costumer-order/(?P<order_id>.*)/$', view=costumer_specific_order, name='costumer_order'),
url(r'^contact/$', view=contact_page, name='contact_page'),
url(r'^faq/$', view=faq_page, name='faq_page'),
url(r'^info-eshop/$', view=informations_page, name='info_page'),
url(r'^my-account/$', view=my_account, name='my_account'),
url(r'^category/(?P<slug>[-\w]+)/$', view=category_site, name='category_site'),
url(r'^brand/(?P<slug>[-\w]+)/$', view=brand_page_products, name='brand_site'),
url(r'^change-show-product/(?P<dk>\d+)/$', view=change_show_product_number, name='change_show_product'),
url(r'^site/clear-session/(?P<slug>[-\w]+)/$', view=my_site_clear_session, name='my_site_clear_session'),
url(r'^καλάθι-αγορών/$', view=basket, name='basket'),
url(r'^cart_html/$', view=cart_html,),
url(r'^add-to-cart/(?P<dk>\d+)/$', view=add_to_cart, name='cart_item_add'),
url(r'^cart-item-delete/(?P<dk>\d+)/$', view=remove_item_from_cart, name='cart_item_delete'),
url(r'^checkout/$', view=checkout, name='checkout'),
url(r'^checkout-review/$', view=checkout_review, name='checkout_review'),
#url(r'^checkout/$', view=checkout, name='checkout'),
#url(r'^checkout/$', view=checkout, name='checkout'),
#print statements
url(r'^print/(?P<dk>\d+)/$', view=print_order, name='print_order'),
#warehouse urls
url(r'^home/$', view=welcome_page, name='welcome_page'),
url(r'^accounts/',include('account.urls')),
url(r'^αποθήκη/',include('products.urls')),
url(r'^πληρωμές-εισπράξεις/',include('transcations.urls')),
url(r'^συνταγές/',include('recipes.urls')),
url(r'^PoS/',include('PoS.urls')),
url(r'^reports/',include('reports.urls')),
url(r'^blog/',include('blog.urls')),
url(r'^site/',include('mysite.urls')),
# that url setup the database, comment it after the use
url(r'^database/',view=set_up_database),
url(r'^clear-sessions/',view=clear_sessions),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
'''
url(r'^inventory/',include('inventory_manager.urls')),
url(r'^site/$','mysite.views.homepage',),
url(r'^homepage/$','products.views.homepage',),
url(r'^homepage/new_all/last_order/$','products.views.add_product_to_order',name='last_one'),
url(r'^homepage/new_all/last_order/(?P<dk>\d+)/$','products.views.order_done',),
url(r'^homepage/new_all/last_order/edit/(?P<dk>\d+)/$','products.views.done_order_edit_id',),
url(r'^homepage/new_all/last_order/product/edit/(?P<dk>\d+)/$','products.views.done_order_product_id',),
url(r'^homepage/new_all/last_order/product/add/(?P<dk>\d+)/$','products.views.done_order_add_product',),
url(r'^homepage/new_all/last_order/product/delete/(?P<dk>\d+)/$','products.views.done_order_delete_id',),
url(r'^homepage/edit_all/$','products.views.edit_all',),
url(r'^homepage/edit_all/orders/$','products.views.edit_orders_section',),
url(r'^homepage/edit_all/order/(?P<dk>\d+)/$','products.views.edit_order',),
url(r'^homepage/edit_all/order_id/(?P<dk>\d+)/$','products.views.edit_order_id',),
url(r'^homepage/edit_all/order_id/delete/(?P<dk>\d+)/$','products.views.delete_order_id',),
url(r'^homepage/edit_all/orders/add/(?P<dk>\d+)/$','products.views.add_order_id',),
url(r'^homepage/edit_all/products/(?P<dk>\d+)/$','products.views.edit_product_id',),
url(r'^homepage/edit_all/products/$','products.views.products_edit_section',),
url(r'^homepage/edit_all/products/vendor/(?P<dk>\d+)/$','products.views.edit_product_vendor_id'),
url(r'^homepage/edit_all/vendors/$','products.views.edit_vendor_section',),
url(r'^homepage/edit_all/vendors/(?P<dk>\d+)/$','products.views.edit_vendor_id',),
url(r'^inventory_informations/$','products.views.informations_inventory',),
url(r'^inventory_informations/προμηθευτές/$','products.views.info_vendors_section',),
url(r'^inventory_informations/προμηθευτές-ανάλυση/$','products.views.info_vendor_personal_stuff',),
url(r'^inventory_informations/προμηθευτές-υπόλοιπο/$','products.views.info_vendor_ipoloipo',),
url(r'^inventory_informations/προμηθευτές-υπόλοιπο/(?P<dk>\d+)/$','products.views.info_vendor_ipoloipo_id',),
url(r'^inventory_informations/προμηθευτές-ανά-προμηθευτή/$','products.views.info_vendor_order',),
url(r'^inventory_informations/προμηθευτές-ανά-προμηθευτή/(?P<dk>\d+)/$','products.views.info_vendor_order_id',),
url(r'^inventory_informations/calendar/$','products.views.inventory_info_calendar',),
url(r'^inventory_informations/order/$','products.views.info_order',),
url(r'^inventory_informations/πληρωμές/$','products.views.info_calendar_payments',),
url(r'^inventory_informations/προιόντα/$','products.views.info_products',),
url(r'^inventory_informations/κατηγορία/','products.views.info_products_category',),
url(r'^inventory_informations/προμηθευτής/','products.views.info_products_vendors',),
url(r'^inventory_informations/χονδρική/','products.views.info_products_xondriki',),
url(r'^inventory_informations/προιόντα/(?P<dk>\d+)$','products.views.info_products_id',),
'''
| null |
eshop_grigoris/urls.py
|
urls.py
|
py
| 7,184 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.conf.urls.url",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "cart.views.add_to_cart",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cart.views.remove_item_from_cart",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "products.views.welcome_page",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "tools.views.set_up_database",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "tools.views.clear_sessions",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.static.static",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.STATIC_URL",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.STATIC_ROOT",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.MEDIA_URL",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 83,
"usage_type": "attribute"
}
] |
131462249
|
import tensorflow as tf
import numpy as np
import sklearn as sk
from sklearn.linear_model import SGDClassifier
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
import xgboost as xgb
from sklearn import svm, naive_bayes
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
np.random.seed(42)
def mnist_sk():
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
train_X, test_X, train_y, test_y = train_test_split(X, y, train_size=60000, test_size=10000, random_state=42)
ranker = naive_bayes.MultinomialNB()
print(train_X.shape, train_y.shape)
ranker.fit(train_X, train_y)
predict_y = ranker.predict(test_X)
print('precision:', sk.metrics.precision_score(test_y, predict_y, average='micro'))
class Mnist():
def __init__(self):
self.mnist = tf.keras.datasets.mnist
(train_data, train_label), (test_data, test_label) = self.mnist.load_data()
self.train_data = np.expand_dims(train_data.astype(np.float32) / 255.0, axis=-1) # [60000, 28, 28, 1]
self.test_data = np.expand_dims(test_data.astype(np.float32) / 255.0, axis=-1) # [10000, 28, 28, 1]
self.train_label = train_label
self.test_label = test_label
def get_batch(self, batch_size):
index = np.random.randint(0, self.train_data.shape[0], batch_size)
return self.train_data[index, :], self.train_label[index]
def get_testset(self):
return self.test_data, self.test_label
class Linear(tf.keras.Model):
def __init__(self):
super().__init__()
self.flat = tf.keras.layers.Flatten()
self.dense = tf.keras.layers.Dense(units=10, kernel_initializer=tf.zeros_initializer(),
bias_initializer=tf.zeros_initializer())
def __call__(self, input):
x = self.flat(input)
x = self.dense(x)
output = tf.nn.softmax(x)
return output
class MLP(tf.keras.Model):
def __init__(self):
super().__init__()
self.flat = tf.keras.layers.Flatten()
self.dense1 = tf.keras.layers.Dense(units=128, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(units=10)
def __call__(self, input):
x = self.flat(input)
x = self.dense1(x)
x = self.dense2(x)
output = tf.nn.softmax(x)
return output
class CNN(tf.keras.Model):
def __init__(self):
super().__init__()
self.conv1 = tf.keras.layers.Conv2D(
filters=32, # 卷积层神经元(卷积核)数目
kernel_size=[5, 5], # 感受野大小
padding='same', # padding策略(vaild 或 same)
activation=tf.nn.relu # 激活函数
)
self.pool1 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
self.dropout = tf.keras.layers.Dropout(0.1)
self.conv2 = tf.keras.layers.Conv2D(
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu
)
self.pool2 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
self.flatten = tf.keras.layers.Reshape(target_shape=(7 * 7 * 64,))
self.dense1 = tf.keras.layers.Dense(units=1024, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(units=10)
def call(self, inputs):
x = self.conv1(inputs) # [batch_size, 28, 28, 32]
x = self.dropout(x)
x = self.pool1(x) # [batch_size, 14, 14, 32]
x = self.conv2(x) # [batch_size, 14, 14, 64]
x = self.pool2(x) # [batch_size, 7, 7, 64]
x = self.flatten(x) # [batch_size, 7 * 7 * 64]
x = self.dense1(x) # [batch_size, 1024]
x = self.dense2(x) # [batch_size, 10]
output = tf.nn.softmax(x)
return output
def mnist_tf_li():
model = CNN() # Linear()
num_epoch = 500
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)
# train
mnist = Mnist()
for e in range(num_epoch):
if e % 10 == 0:
print(e)
with tf.GradientTape() as tape:
x, y = mnist.get_batch(256)
y_pred = model(x)
loss = tf.keras.losses.sparse_categorical_crossentropy(y_true=y, y_pred=y_pred)
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))
test_data, test_label = mnist.get_testset()
y_pred = model.predict(test_data)
sparse_categorical_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
sparse_categorical_accuracy.update_state(y_true=test_label, y_pred=y_pred)
print("test accuracy: %f" % sparse_categorical_accuracy.result())
if __name__ == '__main__':
# mnist_sk()
mnist_tf_li()
| null |
tf_mnist.py
|
tf_mnist.py
|
py
| 4,774 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sklearn.datasets.fetch_openml",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.MultinomialNB",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.precision_score",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "numpy.expand_dims",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "numpy.expand_dims",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Flatten",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.zeros_initializer",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "tensorflow.zeros_initializer",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.softmax",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Flatten",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.softmax",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.MaxPool2D",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dropout",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.MaxPool2D",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Reshape",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.softmax",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.optimizers.Adam",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.GradientTape",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.losses.sparse_categorical_crossentropy",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.metrics.SparseCategoricalAccuracy",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 131,
"usage_type": "attribute"
}
] |
617183754
|
"""Registry URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.views.generic import TemplateView
from .views import registry as registry_views
from .views import setup as setup_views
from .views import edit as edit_views
urlpatterns = [
# The Portal home page
url(r'^$', registry_views.home, name='portal'),
# Static requirements
url(r'^imprint/$', TemplateView.as_view(template_name="static/imprint.html"), name='imprint'),
url(r'^privacy/$', TemplateView.as_view(template_name="static/privacy.html"), name='privacy'),
# Login / Logout
url(r'^login/$', auth_views.login, {'template_name': 'auth/login.html'},
name='login'),
url('^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
# Registration
url('^register/$', setup_views.register, name='register'),
# Initial data Setup
url(r'^setup/$', setup_views.setup, name='setup'),
url(r'^setup/address/$', setup_views.address, name='setup_address'),
url(r'^setup/social/$', setup_views.social, name='setup_social'),
url(r'^setup/jacobs/$', setup_views.jacobs, name='setup_jacobs'),
url(r'^setup/job/$', setup_views.job, name='setup_job'),
url(r'^setup/skills/$', setup_views.skills, name='setup_skills'),
url(r'^setup/payment/$', setup_views.SubscribeView.as_safe_view(), name='setup_payment'),
# the portal for the user
url(r'portal/', registry_views.portal, name='portal'),
# Edit views
url(r'^edit/$', edit_views.edit, name='edit'),
url(r'^edit/password/$', edit_views.password, name='edit_password'),
url(r'^edit/address/$', edit_views.address, name='edit_address'),
url(r'^edit/social/$', edit_views.social, name='edit_social'),
url(r'^edit/jacobs/$', edit_views.jacobs, name='edit_jacobs'),
url(r'^edit/job/$', edit_views.job, name='edit_job'),
url(r'^edit/skills/$', edit_views.skills, name='edit_skills'),
]
| null |
registry/urls.py
|
urls.py
|
py
| 2,583 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.conf.urls.url",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "views.registry.home",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "views.registry",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.views.generic.TemplateView.as_view",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.views.generic.TemplateView.as_view",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.login",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.views",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.logout",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.views",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "views.setup.register",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "views.setup",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "views.setup.setup",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "views.setup",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "views.setup.address",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "views.setup",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "views.setup.social",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "views.setup",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "views.setup.jacobs",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "views.setup",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "views.setup.job",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "views.setup",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "views.setup.skills",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "views.setup",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "views.setup.SubscribeView.as_safe_view",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "views.setup.SubscribeView",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "views.setup",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "views.registry.portal",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "views.registry",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "views.edit.edit",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "views.edit",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "views.edit.password",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "views.edit",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "views.edit.address",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "views.edit",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "views.edit.social",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "views.edit",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "views.edit.jacobs",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "views.edit",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "views.edit.job",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "views.edit",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "views.edit.skills",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "views.edit",
"line_number": 60,
"usage_type": "name"
}
] |
300879573
|
import logging
import re
import semver
import sys
import reconcile.queries as queries
from reconcile.status import ExitCodes
from utils.aws_api import AWSApi
from utils.aws.route53 import State, Account, Record, Zone
from utils.aws.route53 import DuplicateException
QONTRACT_INTEGRATION = 'aws-route53'
QONTRACT_INTEGRATION_VERSION = semver.format_version(0, 1, 0)
DEFAULT_RECORD_TTL = 300
def create_zone(dry_run, awsapi, account, zone):
"""
Create a DNS zone (callable action)
:param dry_run: Do not execute for real
:param awsapi: the AWS api object to use to call AWS
:param account: the aws account to operate on
:param zone: the DNS zone to create
:type dry_run: bool
:type awsapi: AWSApi
:type account: Account
:type zone: Zone
"""
logging.info(f'[{account.name}] Create {zone}')
if not dry_run:
awsapi.create_route53_zone(account.name, zone.name)
def delete_zone(dry_run, awsapi, account, zone):
"""
Delete a DNS zone (callable action)
:param dry_run: Do not execute for real
:param awsapi: the AWS api object to use to call AWS
:param account: the aws account to operate on
:param zone: the DNS zone to delete
:type dry_run: bool
:type awsapi: AWSApi
:type account: Account
:type zone: Zone
"""
logging.info(f'[{account.name}] Delete {zone}')
if not dry_run:
awsapi.delete_route53_zone(account.name, zone.data.get('Id'))
def create_record(dry_run, awsapi, account, zone, record):
"""
Create a DNS record (callable action)
:param dry_run: Do not execute for real
:param awsapi: the AWS api object to use to call AWS
:param account: the aws account to operate on
:param zone: the DNS zone to operate on
:param record: the DNS record to delete
:type dry_run: bool
:type awsapi: AWSApi
:type account: Account
:type zone: Zone
:type record: Record
"""
logging.info(f'[{account.name}] Create {record} in {zone}')
zone_id = zone.data.get('Id')
if not zone_id:
logging.error(
f'[{account.name}] Cannot create {record} in {zone}: '
f'missing Id key in zone data'
)
return
if not dry_run:
awsapi.upsert_route53_record(
account.name,
zone_id,
{
'Name': f'{record.name}.{zone.name}',
'Type': record.type,
'TTL': record.ttl,
'ResourceRecords': [{'Value': v} for v in record.values]
}
)
def update_record(dry_run, awsapi, account, zone, recordset):
"""
Update a DNS record (callable action)
:param dry_run: Do not execute for real
:param awsapi: the AWS api object to use to call AWS
:param account: the aws account to operate on
:param zone: the DNS zone to operate on
:param recordset: a tuple comprised of the desired and current record
:type dry_run: bool
:type awsapi: AWSApi
:type account: Account
:type zone: Zone
:type recordset: (Record, Record)
"""
desired_record = recordset[0]
current_record = recordset[1]
logging.info(f'[{account.name}] Update {current_record} in {zone}')
logging.info(f' Current: {current_record.name} {current_record.type} '
f'{current_record.ttl} {current_record.values}')
logging.info(f' Desired: {desired_record.name} {desired_record.type} '
f'{desired_record.ttl} {desired_record.values}')
zone_id = zone.data.get('Id')
if zone_id is None:
logging.error(
f'[{account.name}] Cannot update {current_record} in {zone}: '
f'missing Id key in zone data'
)
return
if not dry_run:
awsapi.upsert_route53_record(
account.name,
zone_id,
{
'Name': f'{desired_record.name}.{zone.name}',
'Type': desired_record.type,
'TTL': desired_record.ttl,
'ResourceRecords': [
{'Value': v} for v in desired_record.values
]
}
)
def delete_record(dry_run, awsapi, account, zone, record):
"""
Delete a DNS record (callable action)
:param dry_run: Do not execute for real
:param awsapi: the AWS api object to use to call AWS
:param account: the aws account to operate on
:param zone: the DNS zone to operate on
:param record: the DNS record to delete
:type dry_run: bool
:type awsapi: AWSApi
:type account: Account
:type zone: Zone
:type record: Record
"""
logging.info(f'[{account.name}] Delete {record} from {zone}')
zone_id = zone.data.get('Id')
if not zone_id:
logging.error(
f'[{account.name}] Cannot delete {record} in {zone}: '
f'missing Id key in zone data'
)
return
if not dry_run:
awsapi.delete_route53_record(
account.name, zone_id, record.awsdata
)
def removesuffix(s, suffix):
"""
Removes suffix a string
:param s: string to remove suffix from
:param suffix: suffix to remove
:type s: str
:type suffix: str
:return: a copy of the string with the suffix removed
:rtype: str
"""
return s if not s.endswith(suffix) else s[:-len(suffix)]
def build_current_state(awsapi):
"""
Build a State object that represents the current state
:param awsapi: the aws API object to use
:type awsapi: AWSApi
:return: returns a tuple that contains the State object and whether there \
were any errors
:rtype: (State, bool)
"""
state = State('aws')
errors = False
awsapi.map_route53_resources()
aws_state = awsapi.get_route53_zones()
for account_name, zones in aws_state.items():
account = Account(account_name)
for zone in zones:
zone_name = zone['Name']
new_zone = Zone(zone_name, zone)
for record in zone['records']:
# Can't manage SOA records, so ignore it
if record['Type'] in ['SOA']:
continue
# Can't manage NS records at apex, so ignore them
if record['Type'] == 'NS' and record['Name'] == zone_name:
continue
record_name = removesuffix(record['Name'], zone_name)
new_record = Record(new_zone, record_name, {
'type': record['Type'],
'ttl': record['TTL'],
'values': [v['Value'] for v in record['ResourceRecords']],
}, record)
new_zone.add_record(new_record)
account.add_zone(new_zone)
state.add_account(account)
return state, errors
def build_desired_state(zones):
"""
Build a State object that represents the desired state
:param zones: a representation of DNS zones as retrieved from app-interface
:type zones: dict
:return: returns a tuple that contains the State object and whether there \
were any errors
:rtype: (State, bool)
"""
state = State('app-interface')
errors = False
for zone in zones:
account_name = zone['account']['name']
account = state.get_account(account_name)
if not account:
account = Account(account_name)
new_zone = Zone(zone['name'], zone)
for record in zone['records']:
new_record = Record(new_zone, record['name'], {
'type': record['type'],
'ttl': record['ttl'] or DEFAULT_RECORD_TTL
}, record)
targets = []
record_target = record.get('target')
if record_target:
if record['type'] == 'TXT':
# TXT records values need to be enclosed in double quotes
targets.append(f'"{record_target}"')
else:
targets.append(record_target)
record_targets = record.get('targets')
if record_targets:
targets.extend(record_targets)
record_target_cluster = record.get('target_cluster')
if record_target_cluster:
cluster = record_target_cluster
cluster_name = cluster['name']
elb_fqdn = cluster.get('elbFQDN')
if not elb_fqdn:
logging.error(
f'[{account}] elbFQDN not set for cluster '
f'{cluster_name}'
)
errors = True
continue
targets.append(elb_fqdn)
if not targets:
logging.error(
f'[{account}] no targets found for '
f'{new_record} in {new_zone}'
)
errors = True
continue
new_record.add_targets(targets)
new_zone.add_record(new_record)
try:
account.add_zone(new_zone)
except DuplicateException as e:
logging.error(e)
errors = True
if not state.get_account(account_name):
state.add_account(account)
return state, errors
def diff_sets(desired, current):
"""
Diff two state dictionaries by key
:param desired: the desired state
:param current: the current state
:type desired: dict
:type current: dict
:return: returns a tuple that contains lists of added, removed and \
changed elements from the desired dict
:rtype: (list, list, list)
"""
added = [desired[item] for item in desired if item not in current]
removed = [current[item] for item in current if item not in desired]
changed = []
common = [(desired[item], current[item])
for item in current if item in desired]
for item in common:
if not item[0] == item[1]:
# Append the desired item set to changed zones list
changed.append(item)
return added, removed, changed
def reconcile_state(current_state, desired_state):
"""
Reconcile the state between current and desired State objects
:param current_state: the current state
:param desired_state: the desired_state state
:type desired: State
:type current: State
:return: a list of AWS API actions to run and whether there were any errors
:rtype: (list, bool)
"""
actions = []
errors = False
for desired_account in desired_state.accounts.values():
current_account = current_state.get_account(desired_account.name)
new_zones = []
add, rem, _ = diff_sets(desired_account.zones, current_account.zones)
for zone in rem:
# Removed zones
for _, record in zone.records.items():
actions.append((delete_record, current_account, zone, record))
actions.append((delete_zone, current_account, zone))
for zone in add:
# New zones
new_zones.append(zone.name)
actions.append((create_zone, current_account, zone))
for _, zone in desired_account.zones.items():
current_zone = current_account.get_zone(zone.name)
if not zone.records:
# No records defined, so we skip it (and don't manage records)
continue
if zone.name in new_zones and current_zone is None:
# This is a new zone to be created and thus we don't have
# a Route53 zone ID yet. Skip creating the records for now,
# they will be created on the next run
# TODO: Find a way to create the records on the same run?
continue
# Check if we have unmanaged_record_names (urn) and compile them
# all as regular expressions
urn_compiled = []
if 'unmanaged_record_names' in zone.data:
for urn in zone.data['unmanaged_record_names']:
urn_compiled.append(re.compile(urn))
for record in zone.records.values():
for regex in urn_compiled:
if regex.fullmatch(record.name):
logging.debug(f'{desired_account} excluding unmanaged '
f'record {record} because it matched '
f'unmanaged_record_names pattern '
f'\'{regex.pattern}\'')
zone.remove_record(record.name)
current_zone.remove_record(record.name)
add, remove, update = diff_sets(zone.records, current_zone.records)
for record in remove:
# Removed records
actions.append(
(delete_record, current_account, current_zone, record))
for record in add:
# New records
actions.append(
(create_record, current_account, current_zone, record))
for recordset in update:
# Updated records
actions.append(
(update_record, current_account, current_zone, recordset))
return actions, errors
def run(dry_run=False, thread_pool_size=10):
settings = queries.get_app_interface_settings()
zones = queries.get_dns_zones()
desired_state, err = build_desired_state(zones)
if err:
sys.exit(ExitCodes.ERROR)
participating_accounts = [z['account'] for z in zones]
awsapi = AWSApi(thread_pool_size, participating_accounts, settings)
current_state, err = build_current_state(awsapi)
if err:
sys.exit(ExitCodes.ERROR)
actions, err = reconcile_state(current_state, desired_state)
if err:
sys.exit(ExitCodes.ERROR)
for action in actions:
err = action[0](dry_run, awsapi, *action[1:])
if err:
sys.exit(ExitCodes.ERROR)
| null |
reconcile/aws_route53.py
|
aws_route53.py
|
py
| 14,031 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "semver.format_version",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "utils.aws.route53.State",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "utils.aws.route53.Account",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "utils.aws.route53.Zone",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "utils.aws.route53.Record",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "utils.aws.route53.State",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "utils.aws.route53.Account",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "utils.aws.route53.Zone",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "utils.aws.route53.Record",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "utils.aws.route53.DuplicateException",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "logging.error",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "reconcile.queries.get_app_interface_settings",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "reconcile.queries",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "reconcile.queries.get_dns_zones",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "reconcile.queries",
"line_number": 413,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "reconcile.status.ExitCodes.ERROR",
"line_number": 417,
"usage_type": "attribute"
},
{
"api_name": "reconcile.status.ExitCodes",
"line_number": 417,
"usage_type": "name"
},
{
"api_name": "utils.aws_api.AWSApi",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "reconcile.status.ExitCodes.ERROR",
"line_number": 423,
"usage_type": "attribute"
},
{
"api_name": "reconcile.status.ExitCodes",
"line_number": 423,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "reconcile.status.ExitCodes.ERROR",
"line_number": 427,
"usage_type": "attribute"
},
{
"api_name": "reconcile.status.ExitCodes",
"line_number": 427,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "reconcile.status.ExitCodes.ERROR",
"line_number": 432,
"usage_type": "attribute"
},
{
"api_name": "reconcile.status.ExitCodes",
"line_number": 432,
"usage_type": "name"
}
] |
556760686
|
import numpy as np
import zmes_hook_helpers.common_params as g
import zmes_hook_helpers.log as log
import sys
import cv2
import time
import datetime
import re
# Class to handle Yolo based detection
class Yolo:
# The actual CNN object detection code
# opencv DNN code credit: https://github.com/arunponnusamy/cvlib
def __init__(self):
self.initialize = True
self.net = None
self.classes = None
def populate_class_labels(self):
if g.config['yolo_type'] == 'tiny':
class_file_abs_path = g.config['tiny_labels']
else:
class_file_abs_path = g.config['labels']
f = open(class_file_abs_path, 'r')
self.classes = [line.strip() for line in f.readlines()]
def get_classes(self):
return self.classes
def get_output_layers(self):
layer_names = self.net.getLayerNames()
output_layers = [
layer_names[i[0] - 1] for i in self.net.getUnconnectedOutLayers()
]
return output_layers
def detect(self, image):
Height, Width = image.shape[:2]
modelW = 416
modelH = 416
g.logger.debug(
'|---------- YOLO (input image: {}w*{}h, resized to: {}w*{}h) ----------|'
.format(Width, Height, modelW, modelH))
scale = 0.00392 # 1/255, really. Normalize inputs.
if g.config['yolo_type'] == 'tiny':
config_file_abs_path = g.config['tiny_config']
weights_file_abs_path = g.config['tiny_weights']
else:
config_file_abs_path = g.config['config']
weights_file_abs_path = g.config['weights']
if self.initialize:
g.logger.debug('Initializing Yolo')
g.logger.debug('config:{}, weights:{}'.format(
config_file_abs_path, weights_file_abs_path),level=2)
start = datetime.datetime.now()
self.populate_class_labels()
self.net = cv2.dnn.readNet(weights_file_abs_path,
config_file_abs_path)
#self.net = cv2.dnn.readNetFromDarknet(config_file_abs_path, weights_file_abs_path)
if g.config['use_opencv_dnn_cuda'] == 'yes':
(maj, minor, patch) = cv2.__version__.split('.')
min_ver = int(maj + minor)
if min_ver < 42:
g.logger.error('Not setting CUDA backend for OpenCV DNN')
g.logger.error(
'You are using OpenCV version {} which does not support CUDA for DNNs. A minimum of 4.2 is required. See https://www.pyimagesearch.com/2020/02/03/how-to-use-opencvs-dnn-module-with-nvidia-gpus-cuda-and-cudnn/ on how to compile and install openCV 4.2'
.format(cv2.__version__))
else:
g.logger.debug(
'Setting CUDA backend for OpenCV. If you did not set your CUDA_ARCH_BIN correctly during OpenCV compilation, you will get errors during detection related to invalid device/make_policy'
)
self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
else:
g.logger.debug("Not using CUDA backend")
diff_time = (datetime.datetime.now() - start).microseconds / 1000
g.logger.debug(
'YOLO initialization (loading model from disk) took: {} milliseconds'
.format(diff_time))
self.initialize = False
start = datetime.datetime.now()
ln = self.net.getLayerNames()
ln = [ln[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(image,
scale, (modelW, modelH), (0, 0, 0),
True,
crop=False)
self.net.setInput(blob)
outs = self.net.forward(ln)
diff_time = (datetime.datetime.now() - start).microseconds / 1000
g.logger.debug(
'YOLO detection took: {} milliseconds'.format(diff_time))
class_ids = []
confidences = []
boxes = []
nms_threshold = 0.4
conf_threshold = 0.2
# first nms filter out with a yolo confidence of 0.2 (or less)
if g.config['yolo_min_confidence'] < conf_threshold:
conf_threshold = g.config['yolo_min_confidence']
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
start = datetime.datetime.now()
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold,
nms_threshold)
diff_time = (datetime.datetime.now() - start).microseconds / 1000
g.logger.debug(
'YOLO NMS filtering took: {} milliseconds'.format(diff_time))
bbox = []
label = []
conf = []
# now filter out with configured yolo confidence, so we can see rejections in log
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
object_area = w * h
max_object_area = object_area
if g.config['max_object_size']:
g.logger.debug('Max object size found to be:'.format(g.config['max_object_size']),level=3)
# Let's make sure its the right size
m = re.match('(\d*\.?\d*)(px|%)?$', g.config['max_object_size'],
re.IGNORECASE)
if m:
max_object_area = float(m.group(1))
if m.group(2) == '%':
max_object_area = float(m.group(1))/100.0*(modelH * modelW)
g.logger.debug ('Converted {}% to {}'.format(m.group(1), max_object_area), level=2);
else:
g.logger.error('max_object_area misformatted: {} - ignoring'.format(
g.config['max_object_area']))
if (object_area > max_object_area):
g.logger.debug ('Ignoring object:{}, as its area: {}px exceeds max_object_area of {}px'.format(str(self.classes[class_ids[i]]), object_area, max_object_area))
continue
if confidences[i] >= g.config['yolo_min_confidence']:
bbox.append([
int(round(x)),
int(round(y)),
int(round(x + w)),
int(round(y + h))
])
label.append(str(self.classes[class_ids[i]]))
conf.append(confidences[i])
g.logger.info(
'object:{} at {} has a acceptable confidence:{} compared to min confidence of: {}, adding'
.format(label[-1], bbox[-1], conf[-1],
g.config['yolo_min_confidence']))
else:
g.logger.info(
'rejecting object:{} at {} because its confidence is :{} compared to min confidence of: {}'
.format(str(self.classes[class_ids[i]]), [
int(round(x)),
int(round(y)),
int(round(x + w)),
int(round(y + h))
], confidences[i], g.config['yolo_min_confidence']))
return bbox, label, conf
| null |
hook/zmes_hook_helpers/yolo.py
|
yolo.py
|
py
| 8,038 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.debug",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.debug",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.debug",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "cv2.dnn.readNet",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "cv2.__version__.split",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "cv2.__version__",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.error",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.error",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "cv2.__version__",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.debug",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "cv2.dnn",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "cv2.dnn",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.debug",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.debug",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "cv2.dnn.blobFromImage",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.debug",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "numpy.argmax",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "cv2.dnn.NMSBoxes",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.debug",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.debug",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "re.match",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "re.IGNORECASE",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.debug",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.error",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.debug",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.info",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.logger.info",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "zmes_hook_helpers.common_params.logger",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "zmes_hook_helpers.common_params.config",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "zmes_hook_helpers.common_params",
"line_number": 194,
"usage_type": "name"
}
] |
166514249
|
from collections import namedtuple
import networkx as nx
import matplotlib.pyplot as plt
Chemin = namedtuple('Chemin', ['Trajet', 'Cout', 'Autonomie'])
def creerGraphe(nomFichier):
fichierTexte = open(nomFichier, "r")
blocks = fichierTexte.read().rsplit('\n\n')
sommets = []
for line in blocks[0].splitlines():
line = line.split(',')
sommets.append(Node(int(line[0]), bool(line[1])))
for line in blocks[1].splitlines():
line = line.split(',')
relierSommets(sommets[int(line[0]) - 1], sommets[int(line[1]) - 1], int(line[2]))
return Graphe(sommets)
def relierSommets(sommet1, sommet2, distance):
sommet1.ajouterVoisin(sommet2, distance)
sommet2.ajouterVoisin(sommet1, distance)
def trouverCheminMinimal(listeChemins):
chemin = listeChemins[0]
for trajet in listeChemins:
if trajet.Cout < chemin.Cout:
chemin = trajet
return chemin
class Voiture:
def __init__(self):
self.clients = []
self.position = 15
self.charge = 100
self.cheminParcouru = None
class Node:
def __init__(self, id, station):
self.id = id
self.station = bool(station)
self.voisins = []
def ajouterVoisin(self, node, distance):
sommet = {'Node': node,
'Distance': distance}
self.voisins.append(sommet)
def __repr__(self):
return str(self.id)
def plusCourtCheminDansListe(listeChemins):
cheminCourt = listeChemins[0]
for chemin in listeChemins:
if chemin.Cout < cheminCourt.Cout:
cheminCourt = chemin
return cheminCourt
class Graphe:
def __init__(self, sommets):
self.sommets = sommets
self.voiture = Voiture()
def trouverSommet(self, id):
return self.sommets[id - 1]
def verification(self, chemin, *args):
for arg in args:
if self.trouverSommet(arg) not in chemin.Trajet:
return False
return True
def cheminsDisponibles(self, debut, fin, listeChemins=None, limite=float('Inf')):
sommetDepart = self.trouverSommet(debut)
sommetFin = self.trouverSommet(fin)
listeSommets = [sommetDepart]
if listeChemins is None:
chemin = Chemin(listeSommets, 0, 100)
else:
chemin = plusCourtCheminDansListe(listeChemins)
listeCheminsConnus = [chemin]
listeCheminsCourts = []
while listeCheminsConnus:
dernierNode = chemin.Trajet[-1]
if dernierNode not in listeSommets:
listeSommets.append(dernierNode)
for voisin in dernierNode.voisins:
nodeVoisin = voisin['Node']
distance = voisin['Distance']
if nodeVoisin not in listeSommets and chemin.Autonomie > 15 and chemin.Cout + distance < limite:
listeCheminsConnus.append(Chemin(chemin.Trajet + [nodeVoisin],
chemin.Cout + distance,
chemin.Autonomie - distance))
if nodeVoisin.station:
listeCheminsConnus.append(Chemin(chemin.Trajet + [nodeVoisin],
chemin.Cout + distance + 10,
100))
listeCheminsConnus.remove(chemin)
if listeCheminsConnus:
chemin = trouverCheminMinimal(listeCheminsConnus)
if chemin.Trajet[-1] == sommetFin and sommetDepart in chemin.Trajet:
listeCheminsCourts.append(chemin)
# for trajet in listeCheminsCourts:
# print(trajet)
return listeCheminsCourts
def detourPossible(self, debut, fin, listeChemins):
sommetDepart = self.trouverSommet(debut)
sommetFin = self.trouverSommet(fin)
listeTriee = []
for chemin in listeChemins:
if sommetFin in chemin.Trajet and sommetDepart in chemin.Trajet \
and chemin.Trajet.index(sommetDepart) < chemin.Trajet.index(sommetFin):
listeTriee.append(chemin)
if listeTriee:
listeChemins.clear()
listeChemins += listeTriee
return True
return False
def trierListe(self, listeChemins):
requetes = [[16, 7], [14, 4], [10, 12], [19, 17]]
listeTriee = []
condition = True
for chemin in listeChemins:
for requete in requetes:
sommetDepart = self.trouverSommet(requete[0])
sommetFin = self.trouverSommet(requete[1])
if sommetFin in chemin.Trajet and sommetDepart in chemin.Trajet \
and chemin.Trajet.index(sommetDepart) < chemin.Trajet.index(sommetFin):
condition &= True
else:
condition &= False
if condition:
listeTriee.append(chemin)
condition = True
listeChemins.clear()
listeChemins += listeTriee
def trierChemins(self, listeChemins):
requetes = {'Origines': [16, 14, 10, 19], 'Destinations': [7, 4, 12, 17]}
liste = [plusCourtCheminDansListe(listeChemins)]
for chemin in listeChemins:
for origine in requetes['Origines']:
sommet = self.trouverSommet(origine)
if sommet != liste[0].Trajet[-1] and sommet in chemin.Trajet and chemin not in liste:
liste.append(chemin)
listeChemins.clear()
listeChemins += liste
def plusCourtChemin(self, debut, fin, limite=float('Inf')):
listeChemins = self.cheminsDisponibles(debut, fin, limite=limite)
return plusCourtCheminDansListe(listeChemins)
def requetes(self):
positionDepart = 15
requetes = [[16, 7], [14, 4]]
listeChemins = self.cheminsDisponibles(15, 16)
if self.detourPossible(16, 7, listeChemins):
print(True)
else:
listeChemins = self.cheminsDisponibles(16, 7, listeChemins, 135)
print(len(listeChemins))
if self.detourPossible(14, 7, listeChemins):
print(True)
listeChemins = self.cheminsDisponibles(14, 4, listeChemins, 125)
self.trierListe(listeChemins)
print(listeChemins)
def testLotfi(self):
listeChemins = self.cheminsDisponibles(19, 17, limite=100)
for chemin in listeChemins:
if self.trouverSommet(4) in chemin.Trajet:
print(chemin)
def afficherGraphe(self):
arretes = []
texteArretes = {}
for sommet in self.sommets:
for voisin in sommet.voisins:
if [voisin['Node'].id, sommet.id] not in arretes:
arretes.append([sommet.id, voisin['Node'].id])
texteArretes.update({(sommet.id, voisin['Node'].id): voisin['Distance']})
G = nx.Graph()
G.add_edges_from(arretes)
pos = nx.spring_layout(G)
plt.figure()
nx.draw(G, pos, edge_color='black', width=1, linewidths=1,
node_size=500, node_color='pink', alpha=0.9,
labels={node: node for node in G.nodes()})
nx.draw_networkx_edge_labels(G, pos, edge_labels=texteArretes, font_color='red')
plt.axis('off')
plt.show()
if __name__ == '__main__':
graphe = creerGraphe("src/arrondissements.txt")
listeChemins = graphe.cheminsDisponibles(15, 16)
print(len(listeChemins))
graphe.trierChemins(listeChemins)
print(len(listeChemins))
listeChemins = graphe.cheminsDisponibles(16, 7, listeChemins, 135)
print(len(listeChemins))
graphe.trierChemins(listeChemins)
print(len(listeChemins))
listeChemins = graphe.cheminsDisponibles(7, 14, listeChemins, 135)
print(len(listeChemins))
graphe.trierChemins(listeChemins)
print((listeChemins))
listeChemins = graphe.cheminsDisponibles(14, 4, listeChemins, 135)
print((listeChemins))
listeChemins = graphe.cheminsDisponibles(4, 10)
print((listeChemins))
# graphe.detourPossible(1, 19)
# graphe.afficherGraphe()
# graphe.requetes()
# graphe.testLotfi()
| null |
perso/graphe_lotfi.py
|
graphe_lotfi.py
|
py
| 8,289 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.namedtuple",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "networkx.Graph",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "networkx.spring_layout",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "networkx.draw",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "networkx.draw_networkx_edge_labels",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 222,
"usage_type": "name"
}
] |
393485838
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import matplotlib
matplotlib.use( 'Agg' ) # use Agg backend to run on servers
import matplotlib.pyplot as plt
import numpy as np
import scipy.io
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import os, sys, itertools, argparse
from vae import AE, GaussVAE, CatVAE, CgVAE
def plot_zspace( vae, sess, filename, nx=25, ny=25, MAX=2.5, W=32 ):
'''
a convenient method to plot the 2D latent space
'''
x_values = np.linspace(-MAX, MAX, nx)
y_values = np.linspace(-MAX, MAX, ny)
canvas = np.empty( [W*nx, W*ny, 3] )
for i, yi in enumerate( x_values ):
for j, xi in enumerate( y_values ):
z_mu = np.array( [[xi, yi]] )
theta = vae.extract_decoder()
x_mean = sess.run( tf.sigmoid(theta), feed_dict={ vae.decodeZ:z_mu } )[0]
canvas[(nx-i-1)*W:(nx-i)*W, j*W:(j+1)*W, :] = x_mean
plt.figure()
plt.xticks( [0, W*nx], ['-1', '1'] )
plt.yticks( [0, W*ny], ['1', '-1'] )
plt.imshow( canvas, interpolation=None, origin="upper", cmap="gray" )
plt.savefig( '{0}.pdf'.format( filename ), bbox_inches='tight', pad_inches=0, transparent=True )
sys.exit(0)
def load_data( dataset ):
if dataset.lower() == 'mnist':
data = input_data.read_data_sets( 'data/' )
return data.train.images, data.validation.images, data.test.images
elif dataset.lower() == 'svhn':
def extract( path ):
data = scipy.io.loadmat( path )
data = np.array( data['X'], dtype=np.float32 ) / 255
N = data.shape[3]
data = np.array( [ data[:,:,:,i] for i in range( N ) ] )
np.random.shuffle( data )
data = data.mean( -1, keepdims=True ) # convert to gray scale for simplicity
return data
np.random.seed( 2017 )
train = extract( 'data/train_32x32.mat' )
test = extract( 'data/test_32x32.mat' )
N_valid = int( train.shape[0] * 0.1 )
return train[:-N_valid], train[-N_valid:], test
else:
raise RuntimeError( 'unknown dataset: {0}'.format(name) )
def exp( model, dataset, seed, config, output_itr=100 ):
'''
benchmark the specified model (can be 'gauss', 'cat', 'cg' )
with given network shape/learning rate/random seed/etc..
at the end, the training/validation/testing error will be output in config
in the fields train_err (tuple), valid_err(tuple), test_err(tuple)
'''
# clear everything
tf.reset_default_graph()
# load data
data_train, data_valid, data_test = load_data( dataset )
def show_dataset( name, data ):
print( name )
print( 'shape:{0} dtype:{1}'.format( data.shape, data.dtype ) )
show_dataset( 'train', data_train )
show_dataset( 'valid', data_valid )
show_dataset( 'test', data_test )
# get the dimensions
D = np.prod( data_train.shape[1:] )
nn_shape = config.shape[0]
mid = config.shape[1]
d = nn_shape[mid]
encode_shape = nn_shape[:mid]
decode_shape = nn_shape[(mid+1):]
conv = ( dataset.lower() == 'svhn' )
print( 'D=', D )
print( 'd=', d )
print( 'encoder (hidden layers)', encode_shape )
print( 'decoder (hidden layers)', decode_shape )
print( 'conv:', conv )
if model.startswith( 'plain' ):
vae = AE( nn_shape )
elif model.startswith( 'gauss' ):
vae = GaussVAE( D, d, encode_shape, decode_shape, config.L, conv )
elif model.startswith( 'cat' ):
vae = CatVAE( D, d, encode_shape, decode_shape, config.L, config.ncat, conv )
elif model.startswith( 'cg' ):
vae = CgVAE( D, d, encode_shape, decode_shape, config.L, config.order, config.R, conv )
else:
raise RuntimeError( 'unknown model' )
loss = vae.loss()
train_op = tf.train.AdamOptimizer( config.lrate ).minimize( sum(loss) )
loss_sum0 = tf.summary.scalar( "L0", loss[0] )
loss_sum1 = tf.summary.scalar( "L1", loss[1] )
summary_op = tf.summary.merge_all()
# estimate the final loss, need to split data into trucks to avoid memory error
def err( data, temp, test_trunk_size=1000 ):
l = []
for i in range( int( np.ceil( data.shape[0]/test_trunk_size ) ) ):
batch = data[i*test_trunk_size:(i+1)*test_trunk_size]
if model.startswith( 'cat' ) or model.startswith( 'cg' ):
l.append( sess.run( loss, feed_dict={ vae.X:batch, vae.temperature:temp } ) )
else:
l.append( sess.run( loss, feed_dict={ vae.X:batch } ) )
return list( np.array(l).mean(0) )
with tf.Session() as sess:
tf.set_random_seed( seed )
sess.run( tf.global_variables_initializer() )
summary_writer = tf.summary.FileWriter( 'log', graph=sess.graph )
for itr in range( 1, config.nbatch+1 ):
start_idx = np.mod( (itr-1)*config.batchsize, data_train.shape[0] )
end_idx = start_idx + config.batchsize
if end_idx <= data_train.shape[0]:
batch = data_train[start_idx:end_idx]
else:
end_idx = np.mod( end_idx, data_train.shape[0] )
batch = np.vstack( [data_train[start_idx:], data_train[:end_idx]] )
if model.startswith( 'cat' ) or model.startswith( 'cg' ):
progress = min( 2*(itr-1)/(config.nbatch-1), 1 )
cur_temp = np.exp( np.log( config.temp0 ) * (1-progress)
+ np.log( config.temp1 ) * progress )
_, cur_loss0, cur_loss1, summary_str = sess.run(
[train_op, loss[0], loss[1], summary_op],
feed_dict={ vae.X: batch, vae.temperature: cur_temp } )
else:
cur_temp = 0
_, cur_loss0, cur_loss1, summary_str = sess.run(
[train_op, loss[0], loss[1], summary_op],
feed_dict={ vae.X: batch } )
summary_writer.add_summary( summary_str, itr )
if itr % output_itr == 0:
print( "[{0:5d}] L={1:6.2f}+{2:6.2f}={3:6.2f}".format( itr, cur_loss0, cur_loss1, cur_loss0+cur_loss1 ) )
#if itr % 2000 == 0:
# _e = err( data.validation, cur_temp )
# print( 'valid :', _e, sum(_e) )
config.train_err = err( data_train, cur_temp )
config.valid_err = err( data_valid, cur_temp )
config.test_err = err( data_test, cur_temp )
#numG = 2000
#prior = vae.generator( numG )
#print( prior.get_shape() )
#result = sess.run( prior )
#plot_zspace( vae, sess, model )
class Configuration( object ):
'''a configuration object'''
def __str__( self ):
def report( a ):
rep = '{0}={1}'.format( a, getattr(self, a) )
if a.endswith( 'err' ): rep += '={0}'.format( sum( getattr(self, a) ) )
return rep
return '\n'.join( [ report( a ) for a in self.__dict__ ] )
def pack( args, subset ):
'''
return a list of configurations
'''
ret = []
l = [ getattr(args, key) for key in subset ] + [ range(args.repeat) ]
for line in itertools.product( *l ):
item = Configuration()
for key, number in zip( subset, line ):
setattr( item, key, number )
setattr( item, 'trial', line[-1] )
ret.append( item )
return ret
def main( args ):
'''
validation different hyperparameter settings
'''
basic_arr = [ 'lrate', 'shape', 'nbatch', 'batchsize' ]
if args.model.startswith( 'cat' ):
configs = pack( args, basic_arr + ['L', 'temp0', 'temp1', 'ncat'] )
elif args.model.startswith( 'cg' ):
configs = pack( args, basic_arr + ['L', 'order', 'R', 'temp0', 'temp1'] )
elif args.model.startswith( 'gauss' ):
configs = pack( args, basic_arr + ['L'] )
else:
configs = pack( args, basic_arr )
print( 'trying {0} configurations'.format(len(configs)) )
for seed, config in enumerate( configs ):
print( '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' )
print( config )
exp( args.model, args.dataset, 2017+seed, config )
# sort the models based on validation error
top = np.argsort( np.array( [ sum(c.valid_err) for c in configs ] ) )
def report( rank ):
if rank >= len(top): return
print( '--== rank {0} ==--'.format( rank+1 ) )
print( configs[top[rank]] )
print( '' )
# report the best 5 models
print( args.model )
for rank in range( 5 ): report( rank )
if __name__ == '__main__':
parser = argparse.ArgumentParser( description='run VAE on various configurations' )
# common options
parser.add_argument( '--lrate', nargs='+', type=float, default=[5e-4,1e-4,5e-3,1e-3], help='learning rates' )
parser.add_argument( '--nbatch', nargs='+', type=int, default=[10000], help='number of batches' )
parser.add_argument( '--batchsize', nargs='+', type=int, default=[100], help='batch size' )
parser.add_argument( '--L', nargs='+', type=int, default=[1, 10], help='number of latent samples' )
parser.add_argument( '--repeat', type=int, default=1, help='number of random initializations' )
# options for CatVAE
parser.add_argument( '--temp0', nargs='+', type=float, default=[ 1, .5 ], help='initial temperature' )
parser.add_argument( '--temp1', nargs='+', type=float, default=[ .5, ], help='final temperature' )
parser.add_argument( '--ncat', nargs='+', type=int, default=[5, 10, 15, 20], help='number of categories' )
# options for CgVAE
parser.add_argument( '--order', nargs='+', type=int, default=[5, 10, 15, 20], help='polynomial order' )
parser.add_argument( '--R', nargs='+', type=int, default=[51, 101], help='resolution' )
parser.add_argument( 'model', choices=('plain', 'gauss', 'cat', 'cg') )
parser.add_argument( 'dataset', choices=('mnist', 'svhn') )
args = parser.parse_args()
# the candidate network shapes are hard-coded in the following
if args.dataset == 'mnist':
args.shape = (
( [400,20,400], 1 ),
( [400,50,400], 1 ),
( [400,80,400], 1 ),
( [400,400,20,400,400], 2 ),
( [400,400,50,400,400], 2 ),
( [400,400,80,400,400], 2 ),
)
else:
args.shape = (
( [400,20,400], 1 ),
( [400,30,400], 1 ),
( [400,40,400], 1 ),
( [400,50,400], 1 ),
)
for key, value in args.__dict__.items():
print( key, ':', value )
main( args )
| null |
run_vae.py
|
run_vae.py
|
py
| 10,994 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.use",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "vae.extract_decoder",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tensorflow.sigmoid",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "vae.decodeZ",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tensorflow.examples.tutorials.mnist.input_data",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "scipy.io.io.loadmat",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "scipy.io.io",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "scipy.io",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.reset_default_graph",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.prod",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "vae.AE",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "vae.GaussVAE",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "vae.CatVAE",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "vae.CgVAE",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "vae.loss",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.AdamOptimizer",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.scalar",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.merge_all",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "numpy.ceil",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "vae.X",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "vae.temperature",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "vae.X",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "tensorflow.set_random_seed",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.FileWriter",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "numpy.mod",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.mod",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "vae.X",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "vae.temperature",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "vae.X",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "itertools.product",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 239,
"usage_type": "call"
}
] |
214711115
|
import traceback
from unittest import TestCase
import asyncio
import time
from multicp.pool.TransactionPool import Transaction
from multicp.query.Query import Query
from injection.Boostrap import Bootstrap
from injection.Facades import inject
from multicp.Facades import Multicp
from multicp.util.DateUtil import time_to_str
class TBTest(TestCase):
@inject()
def set_up(self, boot: Bootstrap=None):
self.loop = asyncio.get_event_loop()
boot.set_common_service([
{
"class": "multicp.Loader.MulticpLoader",
"params": {}
}
])
boot.load_service()
Multicp().load_db_config([
{
"type": "mysql",
"name": "test1",
"max_num": "10",
"init_num": "0",
"params": {
"host": "192.168.81.60",
"port": "3306",
"charset": "utf8",
"db": "basic",
"user": "root",
"password": "niceday",
}
},
])
def TBTest(self):
with Transaction.with_transaction() as trans:
Query.add_trans(trans)
query = Query("test1.admin.admin_id")
query.add({
"admin_name":time_to_str()
})
time.sleep(1)
query.add({
"admin_name": time_to_str()
})
print(trans.is_active())
print(Query._thread_transaction_dict.values())
try:
with Transaction.with_transaction() as trans:
Query.add_trans(trans)
query = Query("test1.admin.admin_id")
query.add({
"admin_name":time_to_str()
})
time.sleep(1)
query.add({
"admin_name": time_to_str()
})
raise RuntimeError("xxx")
except Exception as e:
print(e)
print(Query._thread_transaction_dict.values())
| null |
multicp/tests/TBTest.py
|
TBTest.py
|
py
| 2,108 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "injection.Boostrap.Bootstrap",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "multicp.Facades.Multicp",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "injection.Facades.inject",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "multicp.pool.TransactionPool.Transaction.with_transaction",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "multicp.pool.TransactionPool.Transaction",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "multicp.query.Query.Query.add_trans",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "multicp.query.Query.Query",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "multicp.query.Query.Query",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "multicp.util.DateUtil.time_to_str",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "multicp.util.DateUtil.time_to_str",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "multicp.query.Query.Query._thread_transaction_dict.values",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "multicp.query.Query.Query._thread_transaction_dict",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "multicp.query.Query.Query",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "multicp.pool.TransactionPool.Transaction.with_transaction",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "multicp.pool.TransactionPool.Transaction",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "multicp.query.Query.Query.add_trans",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "multicp.query.Query.Query",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "multicp.query.Query.Query",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "multicp.util.DateUtil.time_to_str",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "multicp.util.DateUtil.time_to_str",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "multicp.query.Query.Query._thread_transaction_dict.values",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "multicp.query.Query.Query._thread_transaction_dict",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "multicp.query.Query.Query",
"line_number": 72,
"usage_type": "name"
}
] |
328838796
|
import os
import sys
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
sys.path.append(os.pardir)
from Notebooks.LinkDatabases.FacebookData import FacebookDataDatabase
class SentimentAnalyzer:
analyser = SentimentIntensityAnalyzer()
@staticmethod
def GetPostSentiment(postId):
def get_sentiment_scores(sentence):
sentiment = SentimentAnalyzer.analyser.polarity_scores(sentence)
compound = sentiment["compound"]
if (sentiment["neu"] == 1) and sentiment["neg"] == 0 and sentiment["pos"] == 0 and (compound == 0):
return -1
else:
return compound
message = facebookDb.get_message(postId)
if message:
sentiment = get_sentiment_scores(message)
return sentiment
import csv
sentiments = []
facebookDb = FacebookDataDatabase()
for data in facebookDb.get_post_ids():
postId = data[0]
sentiment_score = SentimentAnalyzer.GetPostSentiment(postId)
post_average_sentiment = facebookDb.getSentiment(postId)
if sentiment_score and post_average_sentiment:
if sentiment_score > -1 and post_average_sentiment > -1:
sentiments.append([sentiment_score, post_average_sentiment])
# format is post_sentiment, comments_sentiment
myFile = open('sentiments.csv', 'w')
with myFile:
writer = csv.writer(myFile)
writer.writerows(sentiments)
| null |
graphs/qq_plots/PostVsCommentSentiment/Regression.py
|
Regression.py
|
py
| 1,424 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.pardir",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "Notebooks.LinkDatabases.FacebookData.FacebookDataDatabase",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 44,
"usage_type": "call"
}
] |
179213678
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/2 23:49
# @Author : Paulson
# @File : day3.py
# @Software: PyCharm
# @define : function
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression, SGDRegressor, Ridge, LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, classification_report
from sklearn.externals import joblib
def mylinear():
"""
现行回归直接预测房屋价格
:return: None
"""
# 一、获取数据
lb = load_boston()
# 二、分割数据集到训练集、测试集
x_train, x_test, y_train, y_test = train_test_split(lb.data, lb.target, test_size=0.25)
# print(y_train, y_test)
# 三、进行标准化处理(?) 目标值要不要进行标准化处理?需要
# 特征值和目标值都必须进行标准化,实例化2个标准化Api
std_x = StandardScaler()
x_train = std_x.fit_transform(x_train)
x_test = std_x.transform(x_test)
# 目标值
std_y = StandardScaler()
y_train = std_y.fit_transform(y_train.reshape(-1, 1)) # 二维
y_test = std_y.transform(y_test.reshape(-1, 1))
# 四、estimator预测
# 正规方程求解方式预测结果
lr = LinearRegression()
lr.fit(x_train, y_train)
print("回归系数", lr.coef_)
# 保存训练好的模型
# joblib.dump(lr, "./temp/test.pkl")
# 使用训练好的模型训练
modle = joblib.load('./temp/test.pkl')
y_predict = std_y.inverse_transform(modle.predict(x_test))
print("保存的模型预测的结果", y_predict)
# 预测测试集的房子价额
y_r_predict = std_y.inverse_transform(lr.predict(x_test))
# print("正规方程测试集里面每个房子的预测价格是: ", y_r_predict)
print("正规方程的均方误差:", mean_squared_error(std_y.inverse_transform(y_test), y_r_predict))
# 梯度下降去进行房价预测
sgd = SGDRegressor()
sgd.fit(x_train, y_train)
print("回归系数", sgd.coef_)
# 预测测试集的房子价额
y_sgd_predict = std_y.inverse_transform(sgd.predict(x_test))
# print("梯度下降测试集里面每个房子的预测价格是: ", y_sgd_predict)
print("梯度下降的均方误差:", mean_squared_error(std_y.inverse_transform(y_test), y_sgd_predict))
# 岭回归下降去进行房价预测
rd = Ridge(alpha=1.0)
rd.fit(x_train, y_train)
print("回归系数", rd.coef_)
# 预测测试集的房子价额
y_rd_predict = std_y.inverse_transform(rd.predict(x_test))
# print("岭回归测试集里面每个房子的预测价格是: ", y_rd_predict)
print("岭回归的均方误差:", mean_squared_error(std_y.inverse_transform(y_test), y_rd_predict))
return None
def logistic():
"""
逻辑回归做二分类进行癌症预测(根据细胞的属性特征)
:return: None
"""
# 构造列标签名
column = ['Sample code number', 'Clump Thickness',
'Uniformity of Cell Size', 'Uniformity of Cell Shape',
'Marginal Adhesion', 'Single Epithelial Cell Size',
'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class']
# 读取数据
data = pd.read_csv('./data/breast-cancer-wisconsin.data', names=column)
print(data)
# 缺失值处理
data = data.replace(to_replace='?', value=np.nan)
data = data.dropna()
# 进行数据的分割
x_train, x_test, y_train, y_test = train_test_split(data[column[1:10]], data[column[10]], test_size=0.25)
# 进行标准化处理
std = StandardScaler()
std.fit_transform(x_train)
std.transform(x_test)
# 逻辑回归预测
lg = LogisticRegression(C=1.0)
lg.fit(x_train, y_train)
print(lg.coef_)
y_predict = lg.predict(x_test)
print("准备率:", lg.score(x_test, y_test))
print('召回率:\n', classification_report(y_test, y_predict, labels=[2, 4], target_names=["良性", "恶性"]))
return None
if __name__ == '__main__':
# mylinear()
logistic()
| null |
sklearn_base/day3.py
|
day3.py
|
py
| 4,290 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sklearn.datasets.load_boston",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sklearn.externals.joblib.load",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "sklearn.externals.joblib",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.mean_squared_error",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.SGDRegressor",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.mean_squared_error",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.Ridge",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.mean_squared_error",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 117,
"usage_type": "call"
}
] |
413730152
|
import os
import sys
import healpy as hp
import matplotlib.pyplot as plt
import numpy as np
import pylab
import requirements as req
#pylab.rc('text', usetex=True)
#pylab.rc('font', family='serif')
telescope = "LAT"
bands = "LFL1", "LFL2", "MFL1", "MFL2", "HFL1", "HFL2"
nband = len(bands)
site = "chile"
nside = 4096
lmax_tf = 2048
npix = 12 * nside ** 2
lmax = 2 * nside
flavor = "noise_atmo_7splits" # cmb_r0 cmb_tensor_only_r3e-3 foregrounds noise_atmo_7splits
#flavor = "noise" # cmb_r0 cmb_tensor_only_r3e-3 foregrounds noise_atmo_7splits
split = 1
nsplits = 1
# Deep56 is 565 sq.deg (0.0137 fsky) of which 340 sq.deg (0.00824 fsky) is usable for power spectrum estimation
# ell pa1(150GHz) pa2(150GHz) pa3(150GHz) pa3(98GHz) pa3(98x150GHz)
act_tt = np.genfromtxt("deep56_TT_Nl_out_210317.txt", skip_header=1).T
act_ee = np.genfromtxt("deep56_EE_Nl_out_210317.txt", skip_header=1).T
def get_mask(fname_hits):
fname_mask = "mask_" + os.path.basename(fname_hits)
if os.path.isfile(fname_mask):
mask = hp.read_map(fname_mask)
else:
hits = hp.read_map(fname_hits)
good = hits > 0
ngood = np.sum(good)
sorted_hits = np.sort(hits[good])
hit_lim = sorted_hits[np.int(ngood * .01)]
mask = hits > hit_lim
pix = np.arange(hits.size)
lon, lat = hp.pix2ang(nside, pix, lonlat=True)
lat_min = np.amin(lat[mask])
lat_max = np.amax(lat[mask])
mask = np.zeros(npix)
tol = 10.0 # degrees
mask[np.logical_and(lat_min + tol < lat, lat < lat_max - tol)] = 1
mask = hp.smoothing(mask, fwhm=np.radians(3), lmax=2048)
hp.write_map(fname_mask, mask)
return mask
def map2cl(m, mask):
m[0] = hp.remove_dipole(m[0])
m[m == hp.UNSEEN] = 0
fsky = np.sum(mask) / mask.size
cl = hp.anafast(m * mask, lmax=lmax, iter=0) / fsky
return cl
def get_cl(fname_cl, fname_map, fname_hits):
if os.path.isfile(fname_cl):
cl = hp.read_cl(fname_cl)
else:
mask = get_mask(fname_hits)
m = hp.read_map(fname_map, None)
cl = map2cl(m, mask)
hp.write_cl(fname_cl, cl)
return cl
def get_tf(fname_tf, fname_cmb_unlensed, fname_cmb_lensing, fname_output, fname_hits):
if os.path.isfile(fname_tf):
tf = hp.read_cl(fname_tf)
else:
inmap = hp.read_map(fname_cmb_unlensed, None) + hp.read_map(fname_cmb_lensing, None)
inmap *= 1e-6 # into K_CMB
inmap[0] = hp.remove_dipole(inmap[0])
outmap = hp.read_map(fname_output, None)
mask = get_mask(fname_hits)
cl_in = map2cl(inmap, mask)
cl_out = map2cl(outmap, mask)
tf = cl_out / cl_in
hp.write_cl(fname_tf, tf)
tf[:, lmax_tf:] = 1
tf[tf > 1] = 1
return tf
rootdir = "/global/cscratch1/sd/zonca/cmbs4/map_based_simulations/202102_design_tool_run"
nrow, ncol = nband, 3
fig1 = plt.figure(figsize=[6 * ncol, 6 * 1])
ax1 = fig1.add_subplot(1, 3, 1)
ax2 = fig1.add_subplot(1, 3, 2)
ax3 = fig1.add_subplot(1, 3, 3)
fig2 = plt.figure(figsize=[6 * ncol, 6 * nrow])
ell = np.arange(lmax + 1)
ellnorm = ell * (ell + 1) / (2 * np.pi) * 1e12
iplot = 0
for band in bands:
path_hits = os.path.join(
rootdir,
"noise_atmo_7splits",
f"{telescope}-{band}_{site}/cmbs4_hitmap_{telescope}-{band}_{site}_nside{nside}_{split}_of_{nsplits}.fits",
)
# Transfer function
path_tf = f"tf_{telescope}_{band}_{site}.fits"
path_cmb_unlensed = os.path.join(
"/global/cscratch1/sd/zonca/cmbs4/map_based_simulations/202102_design_tool_input",
f"{nside}/cmb_unlensed_solardipole/0000/"
f"cmbs4_cmb_unlensed_solardipole_uKCMB_{telescope}-{band}_nside{nside}_0000.fits",
)
path_cmb_lensing = os.path.join(
"/global/cscratch1/sd/zonca/cmbs4/map_based_simulations/202102_design_tool_input",
f"{nside}/cmb_lensing_signal/0000/"
f"cmbs4_cmb_lensing_signal_uKCMB_{telescope}-{band}_nside{nside}_0000.fits",
)
path_cmb_output = os.path.join(
rootdir,
"cmb_r0",
f"{telescope}-{band}_{site}/cmbs4_KCMB_{telescope}-{band}_{site}_nside{nside}_1_of_1.fits",
)
tf = get_tf(path_tf, path_cmb_unlensed, path_cmb_lensing, path_cmb_output, path_hits)
# N_ell
path_cl = f"cl_{telescope}_{band}_{site}_{flavor}.fits"
path_noise_map = os.path.join(
rootdir,
flavor,
f"{telescope}-{band}_{site}/"
f"cmbs4_KCMB_{telescope}-{band}_{site}_nside{nside}_{split}_of_{nsplits}.fits",
)
cl = get_cl(path_cl, path_noise_map, path_hits) / tf
ax1.plot(ell[2:], tf[0][2:], label=band)
ax2.plot(ell[2:], tf[1][2:], label=band)
ax3.plot(ell[2:], tf[2][2:], label=band)
freq = req.band2freq[band]
fwhm = req.Chile_LAT[freq][0]
bl = req.get_bl(fwhm, ell)
nltt = req.NlTT_Chile_LAT[freq]
nlee = req.NlEE_Chile_LAT[freq]
iplot += 1
ax = fig2.add_subplot(nrow, ncol, iplot)
ax.set_title(f"TT {band} / {freq}GHz")
ax.set_xlabel("Multipole, $\ell$")
ax.set_ylabel("D$\ell$ [$\mu$K$^2$]")
ax.loglog(req.fiducial_ell, req.fiducial_TT, "k", label="CMB")
ax.loglog(req.ells, nltt, label="requirement")
ax.loglog(ell, ellnorm * cl[0] * bl, label=f"Sim")
if band == "MFL1":
ax.loglog(act_tt[0], act_tt[4], label="ACT PA3")
elif band == "MFL2":
ax.loglog(act_tt[0], act_tt[1], label="ACT PA1")
ax.loglog(act_tt[0], act_tt[2], label="ACT PA2")
ax.loglog(act_tt[0], act_tt[3], label="ACT PA3")
#ax.set_xscale("linear")
#ax.set_yscale("log")
ax.set_xlim([20, 8000])
ax.set_ylim([1e-1, 1e7])
iplot += 1
ax = fig2.add_subplot(nrow, ncol, iplot)
ax.set_title(f"EE {band} / {freq}GHz")
ax.set_xlabel("Multipole, $\ell$")
ax.set_ylabel("D$\ell$ [$\mu$K$^2$]")
ax.loglog(req.fiducial_ell, req.fiducial_EE, "k", label="CMB")
ax.loglog(req.ells, nlee, label="requirement")
ax.loglog(ell, ellnorm * cl[1] * bl, label=f"Sim")
if band == "MFL1":
ax.loglog(act_ee[0], act_ee[4], label="ACT PA3")
elif band == "MFL2":
ax.loglog(act_ee[0], act_ee[1], label="ACT PA1")
ax.loglog(act_ee[0], act_ee[2], label="ACT PA2")
ax.loglog(act_ee[0], act_ee[3], label="ACT PA3")
#ax.set_xscale("linear")
#ax.set_yscale("log")
ax.set_xlim([20, 8000])
ax.set_ylim([1e-4, 1e5])
iplot += 1
ax = fig2.add_subplot(nrow, ncol, iplot)
ax.set_title(f"BB {band} / {freq}GHz")
ax.set_xlabel("Multipole, $\ell$")
ax.set_ylabel("D$\ell$ [$\mu$K$^2$]")
ax.loglog(req.fiducial_ell, req.fiducial_BB, "k", label="CMB")
ax.loglog(req.ells, nlee, label="requirement")
ax.loglog(ell, ellnorm * cl[2] * bl, label=f"Sim")
if band == "MFL1":
ax.loglog(act_ee[0], act_ee[4], label="ACT PA3")
elif band == "MFL2":
ax.loglog(act_ee[0], act_ee[1], label="ACT PA1")
ax.loglog(act_ee[0], act_ee[2], label="ACT PA2")
ax.loglog(act_ee[0], act_ee[3], label="ACT PA3")
ax.legend(loc="best")
#ax.set_xscale("linear")
#ax.set_yscale("log")
ax.set_xlim([20, 8000])
ax.set_ylim([1e-4, 1e5])
for ax in [ax1, ax2, ax3]:
ax.set_xlabel(r"Multipole, $\ell$")
ax.set_ylabel("Transfer function")
ax.set_xlim([1, lmax + 1])
ax.set_ylim([-0.1, 1.1])
ax.axhline(1.0, color="k", linestyle="--")
ax.set_xscale("log")
ax3.legend(loc="best")
fig1.savefig("chile_lat_tf.png")
fig2.savefig(f"chile_lat_validation.{flavor}.png")
plt.show()
| null |
reference_tool_round_2/validation/validate_chlat.apod.py
|
validate_chlat.apod.py
|
py
| 7,534 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.genfromtxt",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "healpy.read_map",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "healpy.read_map",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "healpy.pix2ang",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.logical_and",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "healpy.smoothing",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "healpy.write_map",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "healpy.remove_dipole",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "healpy.UNSEEN",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "healpy.anafast",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "healpy.read_cl",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "healpy.read_map",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "healpy.write_cl",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "healpy.read_cl",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "healpy.read_map",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "healpy.remove_dipole",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "healpy.read_map",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "healpy.write_cl",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "requirements.band2freq",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "requirements.Chile_LAT",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "requirements.get_bl",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "requirements.NlTT_Chile_LAT",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "requirements.NlEE_Chile_LAT",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "requirements.fiducial_ell",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "requirements.fiducial_TT",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "requirements.ells",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "requirements.fiducial_ell",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "requirements.fiducial_EE",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "requirements.ells",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "requirements.fiducial_ell",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "requirements.fiducial_BB",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "requirements.ells",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 222,
"usage_type": "name"
}
] |
39051373
|
from django.contrib import admin
from cisuba.account.models import Account, Hello
from cisuba.models import PartnerProduct, Board
from cisuba.models import PartnerProduct_Image, ProductTag, ProductDivsion
from web.models import Anounce, Faq, Event, PartnerJoin
# Register your models here.
"""
class PartnerbranchProductAdmin(admin.TabularInline):
model = Partnerbranch_Product
extra = 1
class PartnerbranchImageAdmin(admin.TabularInline):
model = Partnerbranch_Image
extra = 2
class PartnerbranchAdmin(admin.ModelAdmin):
inlines = [
PartnerbranchProductAdmin,
PartnerbranchImageAdmin
]
admin.site.register(Board)
admin.site.register(Partnerbranch_Product)
admin.site.register(ProductDivsion)
admin.site.register(Partnerbranch, PartnerbranchAdmin)
admin.site.register(Partnerbranch_Image)
admin.site.register(ProductTag)
"""
class PartnerProductImageAdmin(admin.TabularInline):
model = PartnerProduct_Image
extra = 2
class PartnerProductAdmin(admin.ModelAdmin):
list_display = ('id','gubunAdress','detailAddress','partnerName')
search_fields = ['partnerName']
fieldsets = [
(None, {'fields': ['partnerName','mainThumbnail','isFreePartner','hit']}),
('주소정보', {'fields': ['gubunAdress','highlightAddress','shortAddress','detailAddress']}),
('상품정보', {'fields': ['detailAbout','useAbout']}),
('개장 및 폐장시간', {'fields': ['startStime','startEtime']}),
('전화번호 및 할인가', {'fields': ['phone','discount']}),
('GPS 정보', {'fields': ['lat','lng']}),
('태그', {'fields': ['tag']}),
('구분', {'fields': ['divisions']}),
#('조조,점심,야간 시간대 설정', {'fields': ['morningStime','morningEtime','lunchStime','lunchEtime','dinnerStime','dinnerEtime']}),
('조조,점심,야간 가격 설정', {'fields': ['morningPrice','lunchPrice','dinnerPrice']}),
('생성일', {'fields': ['createdDate','publishedDate']}),
]
inlines = [
PartnerProductImageAdmin
]
class PostModelAdmin(admin.ModelAdmin):
pass
admin.site.register(PartnerProduct, PartnerProductAdmin)
admin.site.register(Board)
admin.site.register(ProductTag)
admin.site.register(ProductDivsion)
admin.site.register(Anounce, PostModelAdmin)
admin.site.register(Faq, PostModelAdmin)
admin.site.register(Event, PostModelAdmin)
admin.site.register(PartnerJoin)
admin.site.register(Account)
admin.site.register(Hello)
| null |
cisuba/admin.py
|
admin.py
|
py
| 2,503 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.contrib.admin.TabularInline",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "cisuba.models.PartnerProduct_Image",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "cisuba.models.PartnerProduct",
"line_number": 64,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "cisuba.models.Board",
"line_number": 65,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "cisuba.models.ProductTag",
"line_number": 66,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cisuba.models.ProductDivsion",
"line_number": 67,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "web.models.Anounce",
"line_number": 69,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "web.models.Faq",
"line_number": 70,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "web.models.Event",
"line_number": 71,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "web.models.PartnerJoin",
"line_number": 72,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "cisuba.account.models.Account",
"line_number": 73,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "cisuba.account.models.Hello",
"line_number": 74,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 74,
"usage_type": "name"
}
] |
215381946
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
import time
from help_sheet import *
URL = 'http://localhost:1667'
class TestConduitapp(object):
def setup(self):
browser_options = Options()
browser_options.headless = True
self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=browser_options)
self.driver.get(URL)
def teardown(self):
self.driver.quit()
###########Test1_REGISTRATION
def test_registration(self):
self.driver.find_element_by_xpath('//a[@href="#/register"]').click()
self.driver.find_element_by_xpath('//input[@placeholder="Username"]').send_keys("Leille")
self.driver.find_element_by_xpath('//input[@placeholder="Email"]').send_keys("[email protected]")
self.driver.find_element_by_xpath('//input[@placeholder="Password"]').send_keys("Progmasters2021")
time.sleep(4)
self.driver.find_element_by_xpath('//button').click()
success = WebDriverWait(
self.driver, 10).until(
EC.visibility_of_element_located((By.XPATH, ('/html/body/div[2]/div/div[3]')))
)
assert success.text == "Your registration was successful!"
self.driver.find_element_by_xpath('//button[@class="swal-button swal-button--confirm"]').click()
time.sleep(2)
###########Test2_LOGIN
def test_login(self):
self.driver.find_element_by_xpath('//a[@href="#/login"]').click()
self.driver.find_element_by_xpath('//input[@placeholder="Email"]').send_keys("[email protected]")
self.driver.find_element_by_xpath('//input[@placeholder="Password"]').send_keys("Progmasters2021")
sign_in_button = WebDriverWait(
self.driver, 10).until(
EC.visibility_of_element_located((By.XPATH, ('//form/button')))
)
sign_in_button.click()
time.sleep(4)
proof = self.driver.find_element_by_xpath("//a[@href = '#/settings']")
element = WebDriverWait(
self.driver, 10).until(
EC.visibility_of_element_located((By.XPATH, "//a[@href = '#/settings']"))
)
assert proof.text == " Settings"
###########Test3_LOGOUT
def test_logout(self):
login(self.driver)
logout_button = WebDriverWait(
self.driver, 10).until(
EC.visibility_of_element_located((By.XPATH, ('//i[@class="ion-android-exit"]')))
)
logout_button.click()
buttonlogin = WebDriverWait(
self.driver, 10).until(
EC.visibility_of_element_located((By.XPATH, ('//*[@href="#/login"]')))
)
assert buttonlogin.text == 'Sign in'
###########Test4_COOKIES
def test_cookies(self):
beforecookies = self.driver.get_cookies()
numberofbeforecookies = (len(beforecookies))
acceptbutton = self.driver.find_element_by_xpath('//div[@id="cookie-policy-panel"]//button[2]')
acceptbutton.click()
time.sleep(4)
aftercookies = self.driver.get_cookies()
numberofaftercookies = (len(aftercookies))
assert numberofaftercookies > numberofbeforecookies
###########Test5_MULTIPLEPAGES
def test_multiplepages(self):
login(self.driver)
yourfeed = self.driver.find_element_by_xpath("//a[@href = '#/my-feed']")
yourfeed.click()
firstarticle = self.driver.find_element_by_xpath('//*[@id="app"]/div/div[2]/div/div[1]/div[2]/div/div/div[1]')
time.sleep(4)
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(4)
secondpage = self.driver.find_element_by_xpath('//ul[@class="pagination"]/li[2]/a')
secondpage.click()
time.sleep(4)
secondpagefirstarticle = self.driver.find_element_by_xpath(
'//*[@id="app"]/div/div[2]/div/div[1]/div[2]/div/div/div')
assert firstarticle != secondpagefirstarticle
###########Test6_NEWDATA
def test_createnewarticle(self):
login(self.driver)
self.driver.find_elements_by_xpath('//a[@class="nav-link"]')[0].click()
time.sleep(4)
self.driver.find_element_by_xpath('//input[@placeholder="Article Title"]').send_keys(
"Test article title")
self.driver.find_elements_by_xpath('//form//input')[1].send_keys("About article")
self.driver.find_element_by_xpath(
'//form//textarea[@placeholder="Write your article (in markdown)"]').send_keys(
"Content of article")
self.driver.find_element_by_xpath('//button[normalize-space(text()="Publish Article")]').click()
time.sleep(4)
deletebutton = self.driver.find_element_by_xpath('//span[contains(text(),"Delete")]')
assert deletebutton.text == " Delete Article"
###########Test7_DELETEDATA
def test_deletearticle(self):
login(self.driver)
newarticle(self.driver)
home = self.driver.find_element_by_xpath("//a[@href = '#/']")
home.click()
time.sleep(4)
yourfeed = self.driver.find_element_by_xpath("//a[@href = '#/my-feed']")
yourfeed.click()
beforetitles = self.driver.find_elements_by_xpath("//a[@class = 'preview-link']/h1")
numberofbefortitles = (len(beforetitles))
time.sleep(4)
madearticle = self.driver.find_element_by_xpath("//a[@href = '#/articles/test-article-title']")
madearticle.click()
time.sleep(4)
deletebutton = self.driver.find_element_by_xpath('//span[contains(text(),"Delete")]')
deletebutton.click()
time.sleep(4)
home = self.driver.find_element_by_xpath("//a[@href = '#/']")
home.click()
time.sleep(4)
yourfeed.click()
aftertitles = self.driver.find_elements_by_xpath("//a[@class = 'preview-link']/h1")
numberofaftertitles = (len(aftertitles))
assert numberofbefortitles != numberofaftertitles
###########Test8_MODIFYDATA
def test_modifydata(self):
login(self.driver)
self.driver.find_element_by_xpath("//a[@href = '#/settings']").click()
time.sleep(4)
self.driver.find_element_by_xpath('//input[@placeholder="URL of profile picture"]').send_keys(
"https://upload.wikimedia.org/wikipedia/commons/3/3f/Spiders_web.svg")
time.sleep(4)
self.driver.find_element_by_xpath('//button[@class="btn btn-lg btn-primary pull-xs-right"]').click()
time.sleep(4)
successfulupdate = self.driver.find_element_by_xpath('/html/body/div[2]/div/div[2]')
assert successfulupdate.text == "Update successful!"
###########Test9_LISTDATA
def test_listdata(self):
tags = self.driver.find_elements_by_xpath("//div[@class='sidebar']/div/a")
list_of_tags = []
for i in tags:
list_of_tags.append(i.text)
print(list_of_tags)
assert len(list_of_tags) == len(tags)
###########Test10_DATAFROMSOURCE
def test_datafromsource(self):
login(self.driver)
newarticle(self.driver)
time.sleep(4)
with open("datafromsource.txt", "r") as f:
comments = f.readlines()
for i in comments:
self.driver.refresh()
time.sleep(4)
writecomment = self.driver.find_element_by_xpath("//textarea[@placeholder='Write a comment...']")
time.sleep(4)
writecomment.send_keys(i)
time.sleep(4)
commentbutton = self.driver.find_element_by_xpath("//button[text()='Post Comment']")
time.sleep(4)
commentbutton.click()
time.sleep(4)
cards = self.driver.find_elements_by_xpath("//div[@class='card']")
assert len(cards) == 4
###########Test11_SAVEDATA
def test_savedata(self):
tags = self.driver.find_elements_by_xpath("//div[@class='sidebar']/div/a")
with open("text.txt", "w") as f:
for i in tags:
f.write(i.text + "\n")
with open("text.txt", "r") as g:
tagss = g.readlines()
number_of_tags = 0
for i in tagss:
number_of_tags = number_of_tags + 1
print(number_of_tags)
print(len(tags))
assert number_of_tags == len(tags)
| null |
test/test_conduit.py
|
test_conduit.py
|
py
| 8,618 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "webdriver_manager.chrome.ChromeDriverManager",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.wait.WebDriverWait",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.wait.WebDriverWait",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.wait.WebDriverWait",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.wait.WebDriverWait",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.wait.WebDriverWait",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 241,
"usage_type": "call"
}
] |
382472416
|
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Category, Product, Brand
from cart.forms import CartAddProductForm
from .forms import PriceForm
from blog.forms import CommentForm
from blog.models import Comment
def base(request):
categories = Category.objects.all()
brands = Brand.objects.all()
featured_items = Product.objects.filter(features_items=True)
recommended_item = Product.objects.filter(recommended_item=True)
slider_items = Product.objects.filter(slider_item=True)
cart_product_form = CartAddProductForm()
return render(request, "shop/base.html",
{"brands": brands,
"categories": categories,
"featured_items": featured_items,
"recommended_item": recommended_item,
"cart_product_form": cart_product_form,
"slider_items": slider_items})
def product_list(request, category_slug=None, brand_slug=None):
category = None
brand = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
brands = Brand.objects.all()
cart_product_form = CartAddProductForm()
price_form = PriceForm(request.GET)
featured_items = Product.objects.filter(features_items=True)
recommended_item = Product.objects.filter(recommended_item=True)
slider_items = Product.objects.filter(slider_item=True)
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=category)
elif brand_slug:
brand = get_object_or_404(Brand, slug=brand_slug)
products = products.filter(brand=brand)
if price_form.is_valid():
if price_form.cleaned_data["min_price"]:
products = products.filter(price__gte=price_form.cleaned_data["min_price"])
if price_form.cleaned_data["max_price"]:
products = products.filter(price__lte=price_form.cleaned_data["max_price"])
paginator = Paginator(products, 6)
page = request.GET.get("page")
products_list = paginator.get_page(page)
return render(request, "shop/product_list.html",
{"category": category, "brand": brand,
"brands": brands, "price_form": price_form,
"categories": categories,
"products": products,
"featured_items": featured_items,
"slider_items": slider_items,
"recommended_item": recommended_item,
"cart_product_form": cart_product_form,
"products_list": products_list})
def product_detail(request, id, slug):
product = get_object_or_404(Product, id=id,
slug=slug)
cart_product_form = CartAddProductForm()
categories = Category.objects.all()
brands = Brand.objects.all()
featured_items = Product.objects.filter(features_items=True)
recommended_item = Product.objects.filter(recommended_item=True)
slider_items = Product.objects.filter(slider_item=True)
if product.product_comment:
comments = product.product_comment.filter(active=True)
if request.method == "POST":
comment_form = CommentForm(data=request.POST)
if comment_form.is_valid():
new_comment = comment_form.save(commit=False)
new_comment.comment_author = request.user
new_comment.product = product
new_comment.save()
else:
comment_form = CommentForm()
return render(request, "shop/detail.html",
{"product": product,
"brands": brands,
"categories": categories,
"cart_product_form": cart_product_form,
"featured_items": featured_items,
"slider_items": slider_items,
"recommended_item": recommended_item,
"comments": comments,
"comment_form": comment_form})
| null |
shop/views.py
|
views.py
|
py
| 4,117 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "models.Category.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Category.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "models.Category",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "models.Brand.objects.all",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.Brand.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.Brand",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "models.Product.objects.filter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.Product.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "models.Product",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "models.Product.objects.filter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.Product.objects",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "models.Product",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "models.Product.objects.filter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.Product.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "models.Product",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "cart.forms.CartAddProductForm",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "models.Category.objects.all",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "models.Category.objects",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "models.Category",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "models.Product.objects.filter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "models.Product.objects",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "models.Product",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "models.Brand.objects.all",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "models.Brand.objects",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "models.Brand",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "cart.forms.CartAddProductForm",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "forms.PriceForm",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "models.Product.objects.filter",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "models.Product.objects",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "models.Product",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "models.Product.objects.filter",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "models.Product.objects",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "models.Product",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "models.Product.objects.filter",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "models.Product.objects",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "models.Product",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "models.Category",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "models.Brand",
"line_number": 45,
"usage_type": "argument"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "models.Product",
"line_number": 71,
"usage_type": "argument"
},
{
"api_name": "cart.forms.CartAddProductForm",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "models.Category.objects.all",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "models.Category.objects",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "models.Category",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "models.Brand.objects.all",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "models.Brand.objects",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "models.Brand",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "models.Product.objects.filter",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "models.Product.objects",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "models.Product",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "models.Product.objects.filter",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "models.Product.objects",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "models.Product",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "models.Product.objects.filter",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "models.Product.objects",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "models.Product",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "blog.forms.CommentForm",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "blog.forms.CommentForm",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 92,
"usage_type": "call"
}
] |
459568341
|
# -*- coding: utf-8 -*-
'''
import subprocess
#p = subprocess.check_output(["python", "/Users/shrey/test.py"])
#p = subprocess.check_output(["echo" "hi"])
#p = subprocess.run(["echo $(python /Users/shrey/test.py)"], shell=True, stdout=subprocess.PIPE)
p1 = subprocess.run(['C:/Python27/python.exe', 'C:/Users/Amanul/Downloads/GM/Project/RNN code/story_cloze-master/skip_thought_vector.py'], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print (p1.stdout)
print(p1)
'''
from stanfordcorenlp import StanfordCoreNLP
import pandas as pd
from nltk.tag import StanfordNERTagger
from nltk.tokenize import word_tokenize
import os
import sys
import json
from skip_thought_vector import skip_thought_vector
class interface:
def __init__(self, host='http://localhost', port=9000):
#self.file_path = 'ROCStories_winter2017 - ROCStories_winter2017.csv'
self.file_path = 'process_data_final_.csv'
self.nlp = StanfordCoreNLP(host, port=port, timeout=30000)
self.props = {
'annotators': 'tokenize,ssplit,pos,lemma,ner,parse,depparse,dcoref,relation',
'pipelineLanguage': 'en',
'outputFormat': 'json'
}
def annotate(self, sentence):
return json.loads(self.nlp.annotate(sentence, properties=self.props))
def get_ner_tags(self, ann_dict):
person_list = dict()
c = 1
for sent in ann_dict['sentences']:
for line in sent['tokens']:
#print("line: " , line)
if(line['ner'] == 'PERSON'):
#print("line : ", line)
name = line['word']
if(name not in person_list):
if(name == 'John' or name == 'Peter' or name == 'Jake' or name == 'David'):
person_list[line['word']] ='Male'
else:
person_list[line['word']] ='Female'
c+=1
#print("found ", line['word'])
return(person_list)
def sent_tokenized_dict(self, ann_dict):
sent_dict = {}
i_sent = 1
for sent in ann_dict['sentences']:
sent_dict[i_sent] = []
#w_index = 0
#print(len(sent['tokens']))
for line in sent['tokens']:
#print(line)
#if(line["word"] != '.'):
sent_dict[i_sent].append(line["word"])
#w_index += 1
#sent_dict[i_sent].append('.')
i_sent+=1
#print(sent_dict)
return sent_dict
def replace_co_ref(self, ann_dict, person_list):
sent_dict = self.sent_tokenized_dict(ann_dict)
#for sent in ann_dict['sentences']:
# print(sent["tokens"])
coref_dict = ann_dict['corefs']
j = 1
coref_flag = False
for key in coref_dict:
name = None
#word_list = set()
ref = coref_dict[key]
#print("Ref [",j,"]: ", ref, "\n")
if(len(ref) > 0 and (ref[0]['text'] in person_list) and ref[0]['type'] == 'PROPER'):
coref_flag = True
name = ref[0]['text']
#print("Name = ", name)
for ref in coref_dict[key]:
if(ref['type'] == 'PROPER' or ref['type'] == 'PRONOMINAL'):
#print("REF word ", ref['text'], "\t", ref['sentNum'], "\t", ref['startIndex'])
sent_num = ref['sentNum']
start_index = ref['startIndex']-1
#end_index = ref['endIndex']-1
#for index in range(start_index, end_index):
if(ref['text'] == 'his' or ref['text'] == 'her' or ref['text'] == 'His' or ref['text'] == 'Her'):
sent_dict[sent_num][start_index] = name +str("'s")
else:
sent_dict[sent_num][start_index] = name
j+=1
if(coref_flag == False):
print("sent ", sent_dict)
if(coref_flag != True):
return None
processed_text = []
for key in sent_dict:
#print("Key = ", key)
#print(sent_dict[key])
processed_text.append(" ".join(sent_dict[key]))
return(processed_text)
def find_character_details(self, sent):
ann_dict = self.annotate(sent)
person_list = self.get_ner_tags(ann_dict)
return person_list
if __name__ == '__main__':
story = []
interf_ = interface()
stv = skip_thought_vector()
character_name = "Mohit"
first_sentence = "John thought Peter should buy a trailer and haul it with his car."
character_list = interf_.find_character_details(first_sentence)
story = ["John thought Peter should buy a trailer and haul it with his car.",
"Peter thought a truck would be better for what he needed.",
"John pointed out two vehicles were much more expensive.",
"Peter was set in his ways with conventional thinking.",
"He ended up buying the truck he wanted despite John's advice."]
'''
for i in range(1,5):
sentence = " ".join(story[0:i])
ann_dict = interf_.annotate(sentence)
processed_text = interf_.replace_co_ref(ann_dict, character_list)
print(processed_text, " \n")
'''
system_generated_sentences = ["Mohit was a man of principal and it would violate his principals to not do it even for a day.",
"It was getting close to the end of day and Tyler was nowhere in sight.",
"Finally his friend Amanul came to his rescue and gave him what he wanted.",
"Mohit jumped with joy and was happy as never before."]
print("Welcome to the world of imagination")
print("How it works? \nThis is an interactive story generating system that uses human authoring along with",
"the system's expertise to generate intriguing short stories. \n The system generates the first sentence",
"for the story, following which it makes suggestions for the next sentence. You can choose from the",
"suggestions you see in the list by typing the corresponding digit for your choosen next sentence, ",
"or you can eneter a sentence which then becomes the part of the narration.\n This procedure continues till",
" we have a short 5 sentence story.\n")
print("Put your author hat one and get ready to help the system generate novel and interesting stories\n")
print("This is a short story about", character_name)
print("The story starts with this opening sentence \n\n'",first_sentence,"'\n")
print("Choose either from the following system generated suggestions for the next sentence in the story or you can enter you own sentence here\n")
print("To choose you just need to enter the digit corresponding to that suggestion\n")
j = 1
story = [first_sentence]
character_list = interf_.find_character_details(first_sentence)
system_generated_sentences = stv.generate_vector(story)
for sent in system_generated_sentences:
print(j, " ",sent)
j += 1
inp = input()
while(1):
if(len(inp) == 1 and int(inp) in range(5)):
inp = int(inp)
next_sentence = system_generated_sentences[inp-1]
story.append(next_sentence)
#story = story + str(system_generated_sentences[inp-1])
else:
next_sentence = str(inp)
story.append(next_sentence)
#story = story + " " + str(inp)
j = 1
print("Story so far")
for st in story:
print(st)
sentence = " ".join(story[0:i])
ann_dict = interf_.annotate(sentence)
processed_story = interf_.replace_co_ref(ann_dict, character_list)
system_generated_sentences = stv.generate_vector(processed_story)
for sent in system_generated_sentences:
print(j, " ",sent)
j += 1
if(len(story) == 5):
break
inp = input()
print("\nFantastic! This is an amazing story. You should definitely try your hands on writing more stories, you could be the next Shakespeare")
print("\n1final story: ")
print(" ".join(story))
| null |
CODE/.ipynb_checkpoints/interface-checkpoint.py
|
interface-checkpoint.py
|
py
| 8,587 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "stanfordcorenlp.StanfordCoreNLP",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "skip_thought_vector.skip_thought_vector",
"line_number": 127,
"usage_type": "call"
}
] |
519800689
|
# -*_ coding:utf-8 _*-
import openpyxl
class DoExcel:
# def __init__(self,ExcelName,SheetName):
# #ExcelReadName,SheetReadName,ExcelWriteName,SheetWriteName):
# self.ExcelName = ExcelName
# self.SheetName = SheetName
# # self.ExcelWriteName = ExcelWriteName
# # self.SheetWriteName = SheetWriteName
@staticmethod
def get_top_row(ExcelName,SheetName):
top_row = {}
wb = openpyxl.load_workbook(ExcelName)
sheet = wb[SheetName]
for i in range(1,(sheet.max_column+1)):
top_row[sheet.cell(1,i).value] = i
return top_row,sheet
def read_excel(self,ExcelReadName,SheetReadName):
"""获取excel内容并返回[{},{},{}]格式"""
excel_list = []
top_row_read,sheet = DoExcel.get_top_row(ExcelReadName,SheetReadName)
for i in range(2,(sheet.max_row+1)):
each_dict = {}
for item in top_row_read.keys():
each_dict[item] = sheet.cell(i,top_row_read[item]).value
excel_list.append(each_dict)
return excel_list
def write_excel(self,ExcelWriteName,SheetWriteName,Excel_list=[]):
top_row_write,aa = DoExcel.get_top_row(ExcelWriteName,SheetWriteName)
wb = openpyxl.load_workbook(ExcelWriteName)
sheet = wb.active
#sheet.cell(row=2, column=1, value=1)在某行某列中写入数据
k = 2
for item in Excel_list:
for i in top_row_write.keys():
sheet.cell(row=k,column=top_row_write[i],value=str(item[i]))
k += 1
wb.save(ExcelWriteName)
wb.close()
if __name__ == "__main__":
list1 = [{'TestResult': '未通过', 'res_json': {'status': 0, 'code': '20110', 'data': None, 'msg': '手机号码已被注册'}, 'real_code': '20110', 'id': 1, 'module': '注册', 'description': '正常注册'}, {'TestResult': '通过', 'res_json': {'status': 0, 'code': '20103', 'data': None, 'msg': '手机号不能为空'}, 'id': 2, 'module': '注册', 'description': '手机号为空'}, {'TestResult': '通过', 'res_json': {'status': 0, 'code': '20109', 'data': None, 'msg': '手机号码格式不正确'}, 'id': 3, 'module': '注册', 'description': '手机号位数不正确'}, {'TestResult': '通过', 'res_json': {'status': 0, 'code': '20110', 'data': None, 'msg': '手机号码已被注册'}, 'id': 4, 'module': '注册', 'description': '手机号已经被注册'}, {'TestResult': '通过', 'res_json': {'status': 0, 'code': '20103', 'data': None, 'msg': '密码不能为空'}, 'id': 5, 'module': '注册', 'description': '密码为空'}, {'TestResult': '通过', 'res_json': {'status': 0, 'code': '20108', 'data': None, 'msg': '密码长度必须为6~18'}, 'id': 6, 'module': '注册', 'description': '密码过短'}]
DoExcel().write_excel("test_data/ExcelResult.xlsx","testCaseWrite",list1)
| null |
homework/homework_1108/DoExcel.py
|
DoExcel.py
|
py
| 2,888 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "openpyxl.load_workbook",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "openpyxl.load_workbook",
"line_number": 35,
"usage_type": "call"
}
] |
353183196
|
__author__ = 'salamio'
import configparser
class RConfig(configparser.RawConfigParser):
def as_dict(self):
d = dict(self._sections)
for k in d:
d[k] = dict(self._defaults, **d[k])
d[k].pop('__name__', None)
return d
if __name__ == "__main__":
f = RConfig()
f.read("../etc/setup.cfg")
d = f.as_dict()
print (d)
print (d['TestA']['class'])
| null |
aliens/PythonProjects/RedshiftDeployment/core/RConfig.py
|
RConfig.py
|
py
| 415 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "configparser.RawConfigParser",
"line_number": 7,
"usage_type": "attribute"
}
] |
393366134
|
from collections import defaultdict, deque
input = [line.strip() for line in open("../Inputs/day12", "r")]
map = defaultdict(list)
for line in input:
left, right = line.split("-")
if right != "start":
map[left].append(right)
if left != "start":
map[right].append(left)
def singleVisit(map):
validPaths = 0
paths = deque([(["start"], set(), False)])
while paths:
path, smallCaves, doubleVisited = paths.popleft()
for cave in map[path[-1]]:
if cave == 'end':
validPaths += 1
elif cave.islower():
if cave not in smallCaves:
s = smallCaves|{cave}
paths.append((path + [cave], s, doubleVisited))
else:
paths.append((path + [cave], smallCaves, doubleVisited))
return validPaths
def doubleVisit(map):
validPaths = 0
paths = deque([(["start"], set(), False)])
while paths:
path, smallCaves, doubleVisited = paths.popleft()
for cave in map[path[-1]]:
if cave == 'end':
validPaths += 1
elif cave.islower():
if cave not in smallCaves:
s = smallCaves|{cave}
paths.append((path + [cave], s, doubleVisited))
elif not doubleVisited:
paths.append((path + [cave], smallCaves, True))
else:
paths.append((path + [cave], smallCaves, doubleVisited))
return validPaths
print("2021 Day 12, Part 1: " + str(singleVisit(map)))
print("2021 Day 12, Part 2: " + str(doubleVisit(map)))
| null |
2021/AoC2021Day12/AoC2021Day12.py
|
AoC2021Day12.py
|
py
| 1,638 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.defaultdict",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 31,
"usage_type": "call"
}
] |
328548779
|
# Given a set of n jobs where each job i has a deadline di >=1 and profit pi>=0.
# Only one job can be scheduled at a time. Each job takes 1 unit of time to complete.
# We earn the profit if and only if the job is completed by its deadline.
# The task is to find the subset of jobs that maximizes profit.
import itertools
# from disjoint_set import DisJointSet
counter = itertools.count()
class DisJointSet(object):
def __init__(self, n):
self.parent = [i for i in range(n + 1)]
def find(self, x):
if self.parent[x] != x:
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def union(self, u, v):
self.parent[v] = u
class Job(object):
def __init__(self, id, deadline, profit):
# self.id = next(counter)
self.id = id
self.dead_line = deadline
self.profit = profit
def __lt__(self, other):
return self.profit > other.profit
def job_sequence(self, jobs):
jobs = sorted(jobs)
max_time_slot = max(jobs, key=lambda x: x.dead_line)
sequence = []
# print(max_time_slot.dead_line)
dsu = DisJointSet(max_time_slot.dead_line)
for job in jobs:
available_slot = dsu.find(job.dead_line)
if available_slot > 0:
dsu.union(dsu.find(available_slot - 1), available_slot)
sequence.append(job.id)
return sequence
jobs = list()
jobs.append(Job('a', 3, 100))
jobs.append(Job('b', 3, 19))
jobs.append(Job('c', 3, 27))
jobs.append(Job('d', 2, 25))
jobs.append(Job('e', 3, 15))
j = Job(None, None, None)
print(j.job_sequence(jobs))
| null |
python/disjoint_set/job_sequencing_problem.py
|
job_sequencing_problem.py
|
py
| 1,659 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "itertools.count",
"line_number": 9,
"usage_type": "call"
}
] |
54741425
|
import sys
from cx_Freeze import setup, Executable
from sportorg import config
base = None
if sys.platform == 'win32':
base = 'Win32GUI'
include_files = [config.LOCALE_DIR, config.TEMPLATE_DIR, config.IMG_DIR, config.SOUND_DIR, config.base_dir('LICENSE'),
config.base_dir('changelog.md'), config.base_dir('changelog_ru.md'),
config.base_dir('status_comments.txt'), config.base_dir('regions.txt'), config.base_dir('names.txt'),
config.base_dir('ranking.txt')]
includes = ['atexit', 'codecs']
excludes = ['Tkinter']
options = {
'build_exe': {
'includes': includes,
'excludes': excludes,
"packages": ['idna', 'requests', 'encodings', 'asyncio', 'pywinusb/hid'],
'include_files': include_files
}
}
executables = [
Executable(
'SportOrg.pyw',
base=base,
icon=config.ICON,
shortcutDir=config.NAME.lower(),
copyright='MIT Licence {}'.format(config.NAME)
)
]
setup(
name=config.NAME,
version=config.VERSION.file,
description=config.NAME,
options=options,
executables=executables
)
| null |
setup.py
|
setup.py
|
py
| 1,141 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.platform",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sportorg.config.LOCALE_DIR",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sportorg.config",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "sportorg.config.TEMPLATE_DIR",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sportorg.config.IMG_DIR",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sportorg.config.SOUND_DIR",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sportorg.config.base_dir",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sportorg.config.base_dir",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sportorg.config",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sportorg.config.base_dir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sportorg.config",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sportorg.config.base_dir",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sportorg.config",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "cx_Freeze.Executable",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sportorg.config.ICON",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "sportorg.config",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "sportorg.config.NAME.lower",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sportorg.config.NAME",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "sportorg.config",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "sportorg.config.NAME",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sportorg.config",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "cx_Freeze.setup",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sportorg.config.NAME",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "sportorg.config",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "sportorg.config.VERSION",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "sportorg.config",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "sportorg.config.NAME",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "sportorg.config",
"line_number": 38,
"usage_type": "name"
}
] |
295819976
|
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
mnist = tf.keras.datasets.mnist #28*28 images of hand-written digits 0-9
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = tf.keras.utils.normalize(x_train, axis=1) #normalization
x_test = tf.keras.utils.normalize(x_test, axis=1) #normalization
model = tf.keras.models.Sequential() #sequential models, it's a feed-forward like the image we drew
model.add(tf.keras.layers.Flatten()) #using flatten as input layer just to make our lives easier
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) #how many unit in the layer, we're gonna use 128 neurons in the layer. activation function is what is gonna make that neuron fire or sort of fire. relu is rectified linear
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax)) #output layer, if it's in the case of classification then it'll have your number of class. activation function using softmax for a probability distribution
#parameter for model
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=3) #training model
#neural netword doesn't actually attempt to optimize for accuracy, it's doesn't try to maximize accuracy, it's always trying to minimize loss
#so the way you calculate loss can make a huge impact because it's what's losses relationship to your accuracy optimizer
#plt.imshow(x_train[0], cmap = plt.cm.binary)
#plt.show()
#the goal: the model is actually generalizing, rather memorizing it
#calculate validation loss and validation accuracy
val_loss, val_acc = model.evaluate(x_test, y_test)
print(val_loss, val_acc)
model.save('epic_num_reader.model') #save the model
new_model = tf.keras.models.load_model('epic_num_reader.model') #load existing model
predictions = new_model.predict([x_test])
print(predictions)
print(np.argmax(predictions[0])) #print prediction at index 0
plt.imshow(x_test[0])
plt.show()
| null |
sentdex tutorial/tensorflow/trying_tensorflow.py
|
trying_tensorflow.py
|
py
| 2,039 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tensorflow.keras",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.utils.normalize",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.utils.normalize",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.models.Sequential",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Flatten",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "numpy.argmax",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
}
] |
636782877
|
#!/usr/bin/env python3
#SenseHatLogger. Author: kurtd5105
from sense_hat import SenseHat
import argparse
import sys
import time
from datetime import datetime
from datetime import timedelta
from itertools import product
#Create a dictionary for the representation of the numbers in a pixel array
numbers = {
"0":
[
[0, 1, 1, 1],
[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 1, 1, 1]
],
"1":
[
[0, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 1, 1]
],
"2":
[
[0, 1, 1, 1],
[0, 0, 0, 1],
[0, 1, 1, 1],
[0, 1, 0, 0],
[0, 1, 1, 1]
],
"3":
[
[0, 1, 1, 1],
[0, 0, 0, 1],
[0, 1, 1, 1],
[0, 0, 0, 1],
[0, 1, 1, 1]
],
"4":
[
[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 1, 1, 1],
[0, 0, 0, 1],
[0, 0, 0, 1]
],
"5":
[
[0, 1, 1, 1],
[0, 1, 0, 0],
[0, 1, 1, 1],
[0, 0, 0, 1],
[0, 1, 1, 1]
],
"6":
[
[0, 1, 1, 1],
[0, 1, 0, 0],
[0, 1, 1, 1],
[0, 1, 0, 1],
[0, 1, 1, 1]
],
"7":
[
[0, 1, 1, 1],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]
],
"8":
[
[0, 1, 1, 1],
[0, 1, 0, 1],
[0, 1, 1, 1],
[0, 1, 0, 1],
[0, 1, 1, 1]
],
"9":
[
[0, 1, 1, 1],
[0, 1, 0, 1],
[0, 1, 1, 1],
[0, 0, 0, 1],
[0, 1, 1, 1]
],
}
"""
generateNumberGroupings
Generates a grid of 0 and 1 for LED off/on for each possible ordering of numbers of a
given grouping length. The grid will be of size screenDimensions, [rows, cols]. Each
number given will be of size numberDimensions, [rows, cols]. A dictionary is returned,
with the number string as the key and the display grid as its value.
"""
def generateNumberGroupings(numbers, groupingLength, screenDimensions, numberDimensions):
groupings = {}
#For every combination of numbers that are of groupingLength
for group in product(range(10), repeat=groupingLength):
#Create an empty screen
grouping = [[0 for col in range(screenDimensions[1])] for row in range(screenDimensions[0])]
#Copy each number onto the screen in the correct position
for i in range(groupingLength):
for row in range(numberDimensions[0]):
for col in range(numberDimensions[1]):
grouping[row][col + (i * numberDimensions[1])] = numbers[str(group[i])][row][col]
groupings[str(group[0]) + str(group[1])] = list(grouping)
return groupings
"""
displayMetrics
Uses the Sense Hat to display the current temperature, as well as the hourly temperature
average, hourly pressure average, and hourly humidity average. currTemp is a float,
metric is in the standard
[time, [avgT, minT, maxT], [avgP, minP, maxP], [avgH, minH, maxH]] format, groupings have
the strings of all possible number combinations of int groupingLength as their key and
the display grid as the value. The time each part will be displayed on screen will be
approximately gap seconds. The default is 1.5 seconds. Color is an rgb list, defaults to green.
"""
def displayMetrics(sense, currTemp, metric, groupings, groupingLength, rotation, gap=1, color=[0, 255, 0]):
#X10 in the bottom 3 rows
extraDigit = [
[128, 128, 128], [0, 0, 0], [128, 128, 128], [255, 255, 255], [0, 0, 0],
[255, 255, 255], [255, 255, 255], [255, 255, 255],
[0, 0, 0], [128, 128, 128], [0, 0, 0], [255, 255, 255], [0, 0, 0],
[255, 255, 255], [0, 0, 0], [255, 255, 255],
[128, 128, 128], [0, 0, 0], [128, 128, 128], [255, 255, 255], [0, 0, 0],
[255, 255, 255], [255, 255, 255], [255, 255, 255]
]
#T in the bottom 3 rows
t = [
[0, 0, 0], [192, 192, 192], [192, 192, 192], [192, 192, 192], [0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0], [192, 192, 192], [0, 0, 0], [0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0], [192, 192, 192], [0, 0, 0], [0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0]
]
#P in the bottom 3 rows
p = [
[0, 0, 0], [192, 192, 192], [192, 192, 192], [0, 0, 0], [0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0],
[0, 0, 0], [192, 192, 192], [192, 192, 192], [0, 0, 0], [0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0],
[0, 0, 0], [192, 192, 192], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0]
]
#H in the bottom 3 rows
h = [
[0, 0, 0], [192, 192, 192], [0, 0, 0], [192, 192, 192], [0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0],
[0, 0, 0], [192, 192, 192], [192, 192, 192], [192, 192, 192], [0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0],
[0, 0, 0], [192, 192, 192], [0, 0, 0], [192, 192, 192], [0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0]
]
sense.clear()
sense.set_rotation(rotation)
groups = []
#Append the number as the whole number and then the decimal and whether it's a decimal or not
groups.append([str(int(currTemp)), False])
groups.append([str(currTemp - int(currTemp))[2:], True])
#Add each metric to the display groups
for m in metric[1]:
groups.append([str(int(m[0])), False])
groups.append([str(m[0] - int(m[0]))[2:], True])
#Set the pressure to ignore the most significant digit, it is probably 1
groups[4][0] = str(int(metric[1][1][0]) % 1000)
overflow = [False for x in range(len(groups))]
for i in range(8):
if groups[i][0] == '':
groups[i][0] = "00"
continue
#Check to see if any group overflows and set its overflow flag and shorten the group
if len(groups[i][0]) > groupingLength:
groups[i][0] = groups[i][0][0:groupingLength]
if i % 2 == 0:
overflow[i] = True
#Add a 0 to the front of a non decimal, or in the back of a decimal if necessary
elif i % 2 == 0:
if len(groups[i][0]) == 1:
groups[i][0] = '0' + groups[i][0]
else:
if len(groups[i][0]) == 1:
groups[i][0] = groups[i][0] + '0'
for i in range(8):
sense.clear()
#Change color accordingly here
#Create a list of r, g, b values for each LED
displayList = [color if groupings[groups[i][0]][row][col] else [0, 0, 0] for row in range(5) for col in range(8)]
#If it's a decimal
if groups[i][1]:
displayList[32] = [255, 255, 255]
#If there is an overflow, add the overflow signal to the screen, and move the thp indicator to the side
if overflow[i]:
if i < 4:
displayList[0] = [255, 0, 0]
elif i < 6:
displayList[8] = [255, 255, 0]
else:
displayList[16] = [0, 0, 255]
displayList.extend(extraDigit)
#If there isn't an overflow, display the thp symbol on the bottom of the screen
else:
if i < 4:
displayList.extend(t)
elif i < 6:
displayList.extend(p)
else:
displayList.extend(h)
sense.set_pixels(displayList)
time.sleep(gap)
sense.clear()
sense.set_rotation(0)
"""
logData
Logs all the data to a data log file, given by the dataPath.
"""
def logData(dataPath, data):
with open(dataPath, 'a') as f:
for point in data:
f.write("{} Temperature: {}; Pressure: {}; Humidity: {};\n".format(*point))
"""
logMetric
Logs the given metric to the metric log file, given by the metricPath.
"""
def logMetric(metricPath, metric):
with open(metricPath, 'a') as f:
f.write("{} ".format(metric[0]))
metric1, metric2, metric3 = metric[1][0], metric[1][1], metric[1][2]
f.write("Temperature avg: {}; min: {}; max: {}; ".format(*metric1))
f.write("Pressure avg: {}; min: {}; max: {}; ".format(*metric2))
f.write("Humidity avg: {}; min: {}; max: {};\n".format(*metric3))
def offHour(timerange, curr):
if timerange[0] > timerange[1]:
if curr.hour >= timerange[0]:
return True
elif curr.hour < timerange[1]:
return True
else:
if curr.hour >= timerange[0] and curr.hour < timerange[1]:
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="A Raspberry Pi Sense Hat sensor logger with LED and text file output.")
parser.add_argument("-t", "--timerange", nargs=2, type=int, help="Optional argument to change the time range the LED matrix should be off.")
parser.add_argument(
"-r", "--rotation", nargs=1, type=int,
help="Optional argument to change the LED matrix rotation in degrees. The screen will be rotated to the nearest 90 degree."
)
parser.add_argument("-b", "--bright", action="store_true", help="Optional argument to turn the LED matrix to full brightness instead of low.")
args = parser.parse_args()
t = []
#Setup custom timerange if there was a valid range provided
if args.timerange:
t = args.timerange
for i in t:
if i < 0 or i > 23:
print("Time out of range, setting to default.")
t = [23, 8]
break
else:
t = [23, 8]
rotation = 0
#Setup the rotation if it was provided
if args.rotation:
rotation = int(((round(args.rotation[0]/90, 0) % 4) * 90))
sense = SenseHat()
#Set the LED matrix to bright if the argument was provided
if args.bright:
sense.low_light = False
else:
sense.low_light = True
groupings = generateNumberGroupings(numbers, 2, (5, 8), (5, 4))
now = datetime.now()
target = datetime.now()
#[time, [avgT, minT, maxT], [avgP, minP, maxP], [avgH, minH, maxH]]
metric = [0, [[20, 0, 0], [1000, 0, 0], [50, 0, 0]]]
while True:
data = []
#From t 0 to 59
for i in range(60):
start = datetime.now()
#Print the current time for debug purposes
print(start)
#Take measurements
data.append([
str(start),
round(sense.get_temperature(), 2),
round(sense.get_pressure(), 2),
round(sense.get_humidity(), 2)
])
#Display the current temperature and the current metrics every 2 minutes
if i % 2 == 0:
if not offHour(t, start):
displayMetrics(sense, data[-1][1], metric, groupings, 2, rotation)
#Add a 60 second time delta from the start
target = timedelta(seconds = 60) - (datetime.now() - start)
delay = target.total_seconds()
if delay < 0:
delay = 0
time.sleep(delay)
start = datetime.now()
metrics = [str(start)]
data.append([
str(start),
round(sense.get_temperature(), 2),
round(sense.get_pressure(), 2),
round(sense.get_humidity(), 2)
])
#Calculate metrics here
metric = [str(start), []]
for i in range(1, 4):
metricData = [d[i] for d in data]
metric[1].append([round(sum(metricData) / len(metricData), 2), min(metricData), max(metricData)])
print(metric)
#Log the data and metric to log files
logData(start.strftime("%d-%m-%Y") + "_data.log", data)
logMetric(start.strftime("%d-%m-%Y") + "_metric.log", metric)
target = timedelta(seconds = 60) - (datetime.now() - start)
delay = target.total_seconds()
if delay < 0:
delay = 0
time.sleep(delay)
| null |
logger.py
|
logger.py
|
py
| 12,544 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "itertools.product",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "sense_hat.SenseHat",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 322,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 323,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 350,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 356,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 376,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 380,
"usage_type": "call"
}
] |
448022421
|
"""
Handler
-------------------
Basic tfs-to-pandas io-functionality.
:author: Jaime
:module: handler
"""
import logging
from collections import OrderedDict
from contextlib import suppress
from os.path import basename, dirname
from typing import Union
import numpy as np
import pandas
from pandas import DataFrame
LOGGER = logging.getLogger(__name__)
HEADER = "@"
NAMES = "*"
TYPES = "$"
COMMENTS = "#"
INDEX_ID = "INDEX&&&"
FLOAT_PARENTS = (float, np.floating)
INT_PARENTS = (int, np.integer, bool, np.bool_)
ID_TO_TYPE = {
"%s": np.str,
"%bpm_s": np.str,
"%le": np.float64,
"%f": np.float64,
"%hd": np.int,
"%d": np.int,
}
DEFAULT_COLUMN_WIDTH = 20
MIN_COLUMN_WIDTH = 10
class TfsDataFrame(pandas.DataFrame):
"""
Class to hold the information of the built Pandas DataFrame,
together with a way of getting the headers of the TFS file.
To get a header value do: data_frame["header_name"] or
data_frame.header_name.
"""
_metadata = ["headers", "indx"]
def __init__(self, *args, **kwargs):
self.headers = {}
with suppress(IndexError, AttributeError):
self.headers = args[0].headers
self.headers = kwargs.pop("headers", self.headers)
self.indx = _Indx(self)
super().__init__(*args, **kwargs)
def __getitem__(self, key: object) -> object:
try:
return super().__getitem__(key)
except KeyError as e:
try:
return self.headers[key]
except KeyError:
raise KeyError(f"{key} is neither in the DataFrame nor in headers.")
except TypeError:
raise e
def __getattr__(self, name: str) -> object:
try:
return super().__getattr__(name)
except AttributeError:
try:
return self.headers[name]
except KeyError:
raise AttributeError(f"{name} is neither in the DataFrame nor in headers.")
@property
def _constructor(self):
return TfsDataFrame
def __str__(self):
return f"{super().__str__().strip()}\nHeaders: {str(self.headers)}\n"
class _Indx(object):
"""
Helper class to mock the metaclass twiss.indx["element_name"]
behaviour.
"""
def __init__(self, tfs_data_frame):
self._tfs_data_frame = tfs_data_frame
def __getitem__(self, key):
name_series = self._tfs_data_frame.NAME
return name_series[name_series == key].index[0]
def read_tfs(tfs_path: str, index: str = None) -> TfsDataFrame:
"""
Parses the TFS table present in tfs_path and returns a custom Pandas DataFrame (TfsDataFrame).
Args:
tfs_path: path to the input TFS file
index: Name of the column to set as index. If not given looks for INDEX_ID-column
Returns:
TfsDataFrame object
"""
LOGGER.debug(f"Reading path: {tfs_path}")
headers = OrderedDict()
column_names = column_types = None
rows_list = []
with open(tfs_path, "r") as tfs_data:
for line in tfs_data:
parts = line.split()
if len(parts) == 0:
continue
if parts[0] == HEADER:
name, value = _parse_header(parts[1:])
headers[name] = value
elif parts[0] == NAMES:
LOGGER.debug("Setting column names.")
column_names = np.array(parts[1:])
elif parts[0] == TYPES:
LOGGER.debug("Setting column types.")
column_types = _compute_types(parts[1:])
elif parts[0] == COMMENTS:
continue
else:
if column_names is None:
raise TfsFormatError("Column names have not been set.")
if column_types is None:
raise TfsFormatError("Column types have not been set.")
parts = [part.strip('"') for part in parts]
rows_list.append(parts)
data_frame = _create_data_frame(column_names, column_types, rows_list, headers)
if index is not None: # Use given column as index
data_frame = data_frame.set_index(index)
else: # Try to find Index automatically
index_column = [c for c in data_frame.columns if c.startswith(INDEX_ID)]
if len(index_column) > 0:
data_frame = data_frame.set_index(index_column)
idx_name = index_column[0].replace(INDEX_ID, "")
if idx_name == "":
idx_name = None # to remove it completely (Pandas makes a difference)
data_frame = data_frame.rename_axis(idx_name)
_validate(data_frame, f"from file {tfs_path:s}")
return data_frame
def write_tfs(tfs_path: str, data_frame: DataFrame, headers_dict: dict = None,
save_index: Union[str, bool] = False, colwidth: int = DEFAULT_COLUMN_WIDTH):
"""
Writes the DataFrame into tfs_path with the headers_dict as
headers dictionary. If you want to keep the order of the headers, use collections.OrderedDict.
Args:
tfs_path: path to the output TFS file
data_frame: TfsDataFrame or pandas.DataFrame to save
headers_dict: Headers of the data_frame, if empty tries to use data_frame.headers
save_index: bool or string. Default: False
If True, saves the index of the data_frame to a column identifiable by INDEX_ID.
If string, it saves the index of the data_frame to a column named by string.
colwidth: Column width
"""
_validate(data_frame, f"to be written in {tfs_path:s}")
if save_index:
if isinstance(save_index, str):
# saves index into column by name given
idx_name = save_index
else:
# saves index into column, which can be found by INDEX_ID
try:
idx_name = INDEX_ID + data_frame.index.name
except TypeError:
idx_name = INDEX_ID
data_frame.insert(0, idx_name, data_frame.index)
LOGGER.debug(f"Attempting to write file: {basename(tfs_path)} in {dirname(tfs_path)}")
if headers_dict is None: # Tries to get headers from TfsDataFrame
try:
headers_dict = data_frame.headers
except AttributeError:
headers_dict = {}
colwidth = max(MIN_COLUMN_WIDTH, colwidth)
headers_str = _get_headers_str(headers_dict)
colnames_str = _get_colnames_str(data_frame.columns, colwidth)
coltypes_str = _get_coltypes_str(data_frame.dtypes, colwidth)
data_str = _get_data_str(data_frame, colwidth)
with open(tfs_path, "w") as tfs_data:
tfs_data.write("\n".join((
headers_str, colnames_str, coltypes_str, data_str
)))
if save_index:
# removed inserted column again
data_frame.drop(data_frame.columns[0], axis=1, inplace=True)
def _get_headers_str(headers_dict):
return "\n".join(_get_header_line(name, headers_dict[name])
for name in headers_dict)
def _get_header_line(name, value):
if not isinstance(name, str):
raise ValueError(f"{name} is not a string")
if isinstance(value, INT_PARENTS):
return f"@ {name} %d {value}"
elif isinstance(value, FLOAT_PARENTS):
return f"@ {name} %le {value}"
elif isinstance(value, str):
return f"@ {name} %s \"{value}\""
else:
raise ValueError(f"{value} does not correspond to recognized types (string, float and int)")
def _get_colnames_str(colnames, colwidth):
fmt = _get_row_fmt_str([None] * len(colnames), colwidth)
return "* " + fmt.format(*colnames)
def _get_coltypes_str(types, colwidth):
fmt = _get_row_fmt_str([str] * len(types), colwidth)
return "$ " + fmt.format(*[_dtype_to_str(type_) for type_ in types])
def _get_data_str(data_frame, colwidth):
if len(data_frame) == 0:
return "\n"
format_strings = " " + _get_row_fmt_str(data_frame.dtypes, colwidth)
return "\n".join(
data_frame.apply(lambda series: format_strings.format(*series), axis=1)
)
def _get_row_fmt_str(dtypes, colwidth):
return " ".join(
"{" + f"{indx:d}:>{_dtype_to_format(type_, colwidth)}" + "}"
for indx, type_ in enumerate(dtypes)
)
class TfsFormatError(Exception):
"""Raised when wrong format is detected in the TFS file."""
pass
def _create_data_frame(column_names, column_types, rows_list, headers):
data = np.array(rows_list) if len(rows_list) else None # case of empty dataframe
data_frame = TfsDataFrame(data=data,
columns=column_names,
headers=headers)
_assign_column_types(data_frame, column_names, column_types)
return data_frame
def _assign_column_types(data_frame, column_names, column_types):
names_to_types = dict(zip(column_names, column_types))
for name in names_to_types:
data_frame[name] = data_frame[name].astype(names_to_types[name])
def _compute_types(str_list):
return [_id_to_type(string) for string in str_list]
def _parse_header(str_list):
type_idx = next((idx for idx, part in enumerate(str_list) if part.startswith("%")), None)
if type_idx is None:
raise TfsFormatError("No data type found in header: '{}'".format(" ".join(str_list)))
name = " ".join(str_list[0:type_idx])
value_str = " ".join(str_list[(type_idx+1):])
return name, _id_to_type(str_list[type_idx])(value_str.strip('"'))
def _id_to_type(type_str):
try:
return ID_TO_TYPE[type_str]
except KeyError:
if type_str.startswith("%") and type_str.endswith("s"):
return str
raise TfsFormatError(f"Unknown data type: {type_str}")
def _dtype_to_str(type_):
if np.issubdtype(type_, np.integer) or np.issubdtype(type_, np.bool_):
return "%d"
elif np.issubdtype(type_, np.floating):
return "%le"
else:
return "%s"
def _dtype_to_format(type_, colsize):
if type_ is None:
return f"{colsize}"
if np.issubdtype(type_, np.integer) or np.issubdtype(type_, np.bool_):
return f"{colsize}d"
if np.issubdtype(type_, np.floating):
return f"{colsize}.{colsize - len('-0.e-000')}g"
return f"{colsize}s"
def _validate(data_frame, info_str=""):
"""
Check if Dataframe contains finite values only
and both indices and columns are unique.
"""
def isnotfinite(x):
try:
return ~np.isfinite(x)
except TypeError: # most likely string
try:
return np.zeros(x.shape, dtype=bool)
except AttributeError: # single entry
return np.zeros(1, dtype=bool)
bool_df = data_frame.apply(isnotfinite)
if bool_df.values.any():
LOGGER.warning(f"DataFrame {info_str:s} contains non-physical values at Index: "
f"{bool_df.index[bool_df.any(axis='columns')].tolist()}")
if not len(set(data_frame.index)) == len(data_frame.index):
raise TfsFormatError("Indices are not Unique.")
if not len(set(data_frame.columns)) == len(data_frame.columns):
raise TfsFormatError("Column names not Unique.")
LOGGER.debug(f"DataFrame {info_str:s} validated.")
| null |
tfs/handler.py
|
handler.py
|
py
| 11,262 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.floating",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "numpy.integer",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.bool_",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.str",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "numpy.str",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "numpy.float64",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "numpy.float64",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "numpy.int",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "numpy.int",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "contextlib.suppress",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "numpy.issubdtype",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "numpy.integer",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "numpy.bool_",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "numpy.issubdtype",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "numpy.floating",
"line_number": 295,
"usage_type": "attribute"
},
{
"api_name": "numpy.issubdtype",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "numpy.integer",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "numpy.bool_",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "numpy.issubdtype",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "numpy.floating",
"line_number": 306,
"usage_type": "attribute"
},
{
"api_name": "numpy.isfinite",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 323,
"usage_type": "call"
}
] |
510142583
|
'''https://www.kaggle.com/crawford/resize-and-save-images-as-hdf5-256x256
This code can be used to convert images into ONE h5 file where the Key address will be "train_img" or change it in line 49 '''
import cv2
import datetime as dt
import h5py
import matplotlib.pyplot as plt
import matplotlib.pylab as plb
import numpy as np
import os
import pandas as pd
from glob import glob
start = dt.datetime.now()
# ../input/
PATH = os.path.abspath(os.path.join('/home/msaha6/Downloads/Pathology-GAN/dataset/vgh_nki/', 'he'))
# ../input/sample/images/
SOURCE_IMAGES = os.path.join(PATH, "patches_h224_w224", "training_data")
# ../input/sample/images/*.png
images = glob(os.path.join(SOURCE_IMAGES, "*.png"))
images.sort()
NUM_IMAGES = len(images)
HEIGHT = 224
WIDTH = 224
CHANNELS = 3
SHAPE = (HEIGHT, WIDTH, CHANNELS)
#Now we will write the h5 file
train_shape = (len(images), HEIGHT, WIDTH, CHANNELS)
hf=h5py.File('data.h5', 'w')
hf.create_dataset("train_img", shape=train_shape, maxshape=train_shape, compression='gzip', compression_opts=9)
for i, img in enumerate(images):
s=dt.datetime.now()
img=cv2.imread(images[i])
img= cv2.resize(img, (WIDTH,HEIGHT), interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
hf["train_img"][i, ...] = img[None]
e=dt.datetime.now()
hf.close()
| null |
read_and_conver_images_to_h5/testing.py
|
testing.py
|
py
| 1,357 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "h5py.File",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 42,
"usage_type": "attribute"
}
] |
485377736
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import logging
import threading
from time import sleep
from datetime import datetime, timedelta
import urlparse
import requests
import urllib
from requests.exceptions import ConnectionError
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException, NoSuchWindowException
from selenium.webdriver.common.action_chains import ActionChains
from django.core.management.base import BaseCommand, CommandError
from django.db import IntegrityError, transaction
from digger.models import Shop, Item, Keyword, ItemKeyword
from digger.utils import UtilsMixin
logger = logging.getLogger('digger.items')
DZT = 'dianzhentan'
DTS = 'diantoushi'
class RetryException(Exception):
pass
class IgnoreException(Exception):
pass
keywords_in_process = []
class MyTask(threading.Thread, UtilsMixin):
help = '获取商品ID,名称,下架时间,类别和所属店铺'
def __init__(self, lock):
threading.Thread.__init__(self)
self.logger = logger
self.lock = lock
self.item_browser = self.get_item_browser(diantoushi=True)
self.browser = self.get_index_browser()
self.browser.get("https://www.taobao.com/")
def run(self):
small_times = sorted(list(set(Keyword.objects.values_list('times', flat=True))))[0]
while True:
with self.lock:
if not Keyword.objects.filter(times=small_times).exclude(id__in=keywords_in_process).exists():
break
keyword = Keyword.objects.filter(times=small_times).exclude(id__in=keywords_in_process).order_by('created').first()
if keyword.page_num >= 10:
keyword.page_num = 1
keyword.save()
self.page_num = keyword.page_num
self.keyword = keyword.name
keywords_in_process.append(keyword.id)
self.process_index(self.browser, keyword, 3)
with self.lock:
keywords_in_process.remove(keyword.id)
keyword.times += 1
keyword.save()
self.browser.close()
def open_item(self, url):
try:
self.item_browser.get(url)
except NoSuchWindowException:
try:
self.item_browser.close()
except NoSuchWindowException:
pass
self.item_browser = self.get_item_browser(diantoushi=True)
self.item_browser.get(url)
@transaction.atomic
def get_item(self, url, pk):
if Item.objects.filter(pk=pk).exists():
logger.debug('已经有此商品的信息:<%s>', pk)
return
self.open_item(url)
browser = self.item_browser
if 'chaoshi.detail.tmall.com' in browser.current_url:
return
url = browser.current_url
shop = self.get_or_create_shop(browser)
item_id = self.get_item_id(browser)
item_name = self.get_item_name(browser)
category_id = self.get_category_id(browser)
del_time = self.get_del_time(browser)
rate = self.get_rate(browser)
sell = self.get_sell(browser)
Item.objects.create(id=item_id,
url=url,
name=item_name,
shop=shop,
xiajia=self.normalize_del_time(del_time),
cid=category_id,
rate=rate,
sell=sell,
)
def process_page(self):
items_container = self.browser.find_element_by_css_selector('.m-itemlist')
items = items_container.find_elements_by_css_selector('.item')
logger.info('num of item: %s' % len(items))
for index, item in enumerate(items):
self.item_index = index
logger.info('%s 第 %s 页的第 %s 个项目', self.keyword, self.page_num, index)
if 'activity' in item.get_attribute('class'):
logger.debug('%s 第 %s 页的第 %s 个项目不是商品 ', self.keyword, self.page_num, index)
continue
item_id = item.find_element_by_css_selector('.title a').get_attribute('data-nid')
name = item.find_element_by_css_selector('.title a').text
# 不扫描天猫超市的宝贝
if '天猫超市' in name:
continue
shop_name = item.find_element_by_css_selector('a.shopname').text
logger.info('提取出商品信息:商品 <%s>-%s; 店铺 %s', item_id, name, shop_name)
item_url = item.find_element_by_css_selector('.title a').get_attribute('href')
logger.info('提取出商品URL:%s', item_url)
for i in range(3):
try:
self.get_item(item_url, item_id)
break
except IndexError:
logger.exception('发生索引错误')
break
except RetryException:
logger.error('重试')
sleep(30)
continue
except IgnoreException:
break
except Exception:
logger.exception("发生未知错误,重试")
def process_page_safe(self):
for i in range(10):
try:
self.process_page()
break
except StaleElementReferenceException:
logger.exception("发生页面元素失效错误")
continue
except RetryException:
continue
except:
logger.exception('发生其它错误,跳过')
break
def go_to_page(self, page_num):
logger.debug("Go to page %s", page_num)
for i in range(60):
try:
elem = self.browser.find_element_by_css_selector('#mainsrp-pager .J_Input')
elem.clear()
elem.send_keys(page_num)
elem.send_keys(Keys.RETURN)
break
except NoSuchElementException:
continue
def process_index(self, browser, keyword, last_page=100):
elem = browser.find_element_by_id('q')
elem.clear()
elem.send_keys(keyword.name)
elem.send_keys(Keys.RETURN)
self.go_to_page(self.page_num)
sleep(5) # TODO use Waits
self.process_page_safe()
while self.page_num < last_page:
self.page_num += 1
self.go_to_page(self.page_num)
sleep(10) # TODO use Waits
keyword.page_num = self.page_num
keyword.save()
self.process_page_safe()
class Command(BaseCommand):
help = '获取商品ID,名称,下架时间,类别和所属店铺'
def add_arguments(self, parser):
parser.add_argument(
'-t',
'--task',
action='store',
dest='task_num',
default=1,
type=int,
)
def handle(self, *args, **options):
lock = threading.Lock()
threads = []
for i in range(options.get('task_num')):
threads.append(MyTask(lock))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
| null |
digger/management/commands/get_items.py
|
get_items.py
|
py
| 7,648 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "digger.utils.UtilsMixin",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "digger.models.Keyword.objects.values_list",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "digger.models.Keyword.objects",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "digger.models.Keyword",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "digger.models.Keyword.objects.filter",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "digger.models.Keyword.objects",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "digger.models.Keyword",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "digger.models.Keyword.objects.filter",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "digger.models.Keyword.objects",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "digger.models.Keyword",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "selenium.common.exceptions.NoSuchWindowException",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "selenium.common.exceptions.NoSuchWindowException",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "digger.models.Item.objects.filter",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "digger.models.Item.objects",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "digger.models.Item",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "digger.models.Item.objects.create",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "digger.models.Item.objects",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "digger.models.Item",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "django.db.transaction.atomic",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "django.db.transaction",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "selenium.common.exceptions.StaleElementReferenceException",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.keys.Keys.RETURN",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.keys.Keys",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "selenium.common.exceptions.NoSuchElementException",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.keys.Keys.RETURN",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.keys.Keys",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "threading.Lock",
"line_number": 228,
"usage_type": "call"
}
] |
517218316
|
"""
# Copyright 2020 ABHINAV RAWAT
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
This module reads and process the GGPA packet received from GPS module.
It returns useful data like, lat, long, time, satellite etc
"""
import serial
class GPS:
def __init__(self, port, baud_rate):
"""
Initialize the serial communication port to access gps module
:param port: port to be used for serial communication.
Use COM1, COM2, COM3 etc in case of windows
Use /dev/ttyUSB0 etc in case of linux based devices
:param baud_rate: Set the appropriate baud rate.
"""
self.gps_serial_port = serial.Serial(port, baud_rate)
def get_lat_long(self):
"""
This function reads and process the GPGGA packet & return lat long
:return: tuple of lat & long
"""
s = self.gps_serial_port.read(500)
s = s.decode('utf-8')
data = s.splitlines()
for i in range(len(data)):
d = data[i].split(',')
if d[0] == "$GPGGA" and len(d) == 15:
if d[2] == '' or d[4] == '':
return "N/A", "N/A"
else:
lat = float(d[2]) / 100
long = float(d[4]) / 100
return lat, long
def get_time(self):
"""
This function reads and process the GPGGA packet & return time value as hh.mm
:return: str time value as hh.mm
"""
s = self.gps_serial_port.read(500)
s = s.decode('utf-8')
data = s.splitlines()
for i in range(len(data)):
d = data[i].split(',')
if d[0] == "$GPGGA" and len(d) == 15:
if d[1] == '':
return "N/A"
else:
time_val = int(float(d[1]) / 100)
time_val = time_val / 100
return time_val
def get_quality_indicator(self):
"""
This function reads and process the GPGGA packet & return quality indicator
:return: str value of quality indicator as below:
1 = Uncorrected coordinate
2 = Differentially correct coordinate (e.g., WAAS, DGPS)
4 = RTK Fix coordinate (centimeter precision)
5 = RTK Float (decimeter precision.
"""
s = self.gps_serial_port.read(500)
s = s.decode('utf-8')
data = s.splitlines()
for i in range(len(data)):
d = data[i].split(',')
if d[0] == "$GPGGA" and len(d) == 15:
return d[6]
def get_no_of_satellites(self):
"""
This function reads and process the GPGGA packet & return no of satellite
:return: str value as no of satellite
"""
s = self.gps_serial_port.read(500)
s = s.decode('utf-8')
data = s.splitlines()
for i in range(len(data)):
d = data[i].split(',')
if d[0] == "$GPGGA" and len(d) == 15:
return d[7]
def get_raw_data(self):
"""
:return: returns raw data of ggpa packet
"""
s = self.gps_serial_port.read(500)
s = s.decode('utf-8')
data = s.splitlines()
for i in range(len(data)):
d = data[i].split(',')
if d[0] == "$GPGGA" and len(d) == 15:
return d
if __name__ == '__main__':
GPS(port="COM1", baud_rate=9600)
| null |
pyembedded/gps_module/gps.py
|
gps.py
|
py
| 4,446 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "serial.Serial",
"line_number": 28,
"usage_type": "call"
}
] |
525891851
|
from django.conf.urls import url, include
from rest_framework import routers
from apps.api.views.lessons import LessonSetViewSet, LessonViewSet, PageViewSet, FavoriteViewSet, \
LogLessonViewSet
from apps.api.views.question import QuestionViewSet, AnswerViewSet, UserAnswerViewSet
from apps.api.views.userprofile import UserViewSet
router = routers.DefaultRouter()
router.register(r'user', UserViewSet, base_name='user'),
router.register(r'lesson_set', LessonSetViewSet, base_name='lesson_set')
router.register(r'lesson', LessonViewSet, base_name='lesson')
router.register(r'page', PageViewSet, base_name='page')
router.register(r'favorite', FavoriteViewSet, base_name='favorite')
router.register(r'log_lesson', LogLessonViewSet, base_name='log_lesson')
router.register(r'question', QuestionViewSet, base_name='question')
router.register(r'answer', AnswerViewSet, base_name='answer')
router.register(r'user_answer', UserAnswerViewSet, base_name='user_answer')
urlpatterns = [
url(r'^', include(router.urls)),
]
| null |
apps/api/urls.py
|
urls.py
|
py
| 1,054 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "apps.api.views.userprofile.UserViewSet",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "apps.api.views.lessons.LessonSetViewSet",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "apps.api.views.lessons.LessonViewSet",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "apps.api.views.lessons.PageViewSet",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "apps.api.views.lessons.FavoriteViewSet",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "apps.api.views.lessons.LogLessonViewSet",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "apps.api.views.question.QuestionViewSet",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "apps.api.views.question.AnswerViewSet",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "apps.api.views.question.UserAnswerViewSet",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "django.conf.urls.url",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 22,
"usage_type": "call"
}
] |
96914998
|
import os
import sys
from PIL import Image
if __name__ == '__main__':
dataset_path = ['val', 'test']
input_dir_name = 'face_dataset_RGB'
target_dir_name = 'face_dataset_depth_8bit'
result_dir_name = 'result'
for dataset in dataset_path:
count = 1
input_dir_path = os.path.join(os.getcwd(), dataset, input_dir_name)
target_dir_path = os.path.join(os.getcwd(), dataset, target_dir_name)
if not os.path.exists(os.path.join(dataset, result_dir_name)):
os.makedirs(os.path.join(dataset, result_dir_name))
for root, _, input_file_names in os.walk(input_dir_path):
for input_file_name in input_file_names:
if not input_file_name.endswith('.png'): continue
input_file_path = os.path.join(root, input_file_name)
output_file_path = input_file_path.replace('rgb', 'depth').replace(input_dir_name, target_dir_name)
print(input_file_path, output_file_path)
images = [Image.open(x) for x in [input_file_path, output_file_path]]
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
new_im.save(os.path.join(dataset, result_dir_name, '%d.png' % count))
count += 1
| null |
datasets/preprocess_pandora_dataset.py
|
preprocess_pandora_dataset.py
|
py
| 1,586 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
}
] |
323761261
|
# -*- coding: utf-8 -*-
import datetime
from time import sleep
import os
def timer(time=25):
now = datetime.datetime.now()
after = now + datetime.timedelta(minutes = time)
while datetime.datetime.now() < after:
sleep(1)
os.system('clear')
now = datetime.datetime.now()
delta = after - now
minutes = delta.seconds//60
seconds = delta.seconds%60
timer_mod = ' <------Live time: {} ------ time left {}:{} ------>'.format(now.strftime('%H:%M:%S'),minutes,seconds)
print(timer_mod)
print(finished_motd)
if __name__ == '__main__':
timer(time = 1)
| null |
days/01-03-datetimes/pomodoro.py
|
pomodoro.py
|
py
| 648 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "attribute"
}
] |
85164171
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from numpy.linalg import solve
from scipy.linalg import eigvals, logm, norm
def is_stable(A, domain='z'):
"""Determines if a linear state-space model is stable from eigenvalues of `A`
Parameters
----------
A : ndarray(n,n)
state matrix
domain : str, optional {'z', 's'}
'z' for discrete-time, 's' for continuous-time state-space models
returns
-------
bool
"""
if domain == 'z': # discrete-time
# Unstable if at least one pole outside unit circle
if any(abs(eigvals(A)) > 1):
return False
elif domain == 's': # continuous-time
# Unstable if at least one pole in right-half plane
if any(np.real(eigvals(A)) > 0):
return False
else:
raise ValueError(f"{domain} wrong. Use 's' or 'z'")
return True
def ss2phys(A, B, C, D=None):
"""Calculate state space matrices in physical domain using a similarity
transform T
See eq. (20.10) in
Etienne Gourc, JP Noel, et.al
"Obtaining Nonlinear Frequency Responses from Broadband Testing"
https://orbi.uliege.be/bitstream/2268/190671/1/294_gou.pdf
"""
# Similarity transform
T = np.vstack((C, C @ A))
C = solve(T.T, C.T).T # (C = C*T^-1)
A = solve(T.T, (T @ A).T).T # (A = T*A*T^-1)
B = T @ B
return A, B, C, D, T
def ss2frf(A, B, C, D, freq):
"""Compute frequency response function from state-space parameters
(discrete-time)
Computes the frequency response function (FRF) or matrix (FRM) Ĝ at the
normalized frequencies `freq` from the state-space matrices `A`, `B`, `C`,
and `D`. :math:`G(f) = C*inv(exp(2j*pi*f)*I - A)*B + D`
Parameters
----------
A : ndarray(n,n) state matrix
B : ndarray(n,m) input matrix
C : ndarray(p,n) output matrix
D : ndarray(p,m) feed-through matrix
freq : ndarray(F)
vector of normalized frequencies at which the FRM is computed
(0 < freq < 0.5)
Returns
-------
Gss : ndarray(F,p,m)
frequency response matrix
"""
# Z-transform variable
z = np.exp(2j*np.pi*freq)
In = np.eye(*A.shape)
# Use broadcasting. Much faster than for loop.
Gss = C @ solve((z*In[..., None] - A[..., None]
).transpose((2, 0, 1)), B[None]) + D
return Gss
def discrete2cont(ad, bd, cd, dd, dt, method='zoh', alpha=None):
"""Convert linear system from discrete to continuous time-domain.
This is the inverse of :func:`scipy.signal.cont2discrete`. This will not
work in general, for instance with the ZOH method when the system has
discrete poles at ``0``.
Parameters
----------
A,B,C,D : :data:`linear_system_like`
Linear system representation.
dt : ``float``
Time-step used to *undiscretize* ``sys``.
method : ``string``, optional
Method of discretization. Defaults to zero-order hold discretization
(``'zoh'``), which assumes that the input signal is held constant over
each discrete time-step.
alpha : ``float`` or ``None``, optional
Weighting parameter for use with ``method='gbt'``.
Returns
-------
:class:`.LinearSystem`
Continuous linear system (``analog=True``).
See Also
--------
:func:`scipy.signal.cont2discrete`
Examples
--------
Converting a linear system
>>> from nengolib.signal import discrete2cont, cont2discrete
>>> from nengolib import DoubleExp
>>> sys = DoubleExp(0.005, 0.2)
>>> assert dsys == discrete2cont(cont2discrete(sys, dt=0.1), dt=0.1)
"""
sys = (ad, bd, cd, dd)
if dt <= 0:
raise ValueError("dt (%s) must be positive" % (dt,))
n = ad.shape[0]
m = n + bd.shape[1]
if method == 'gbt':
if alpha is None or alpha < 0 or alpha > 1:
raise ValueError("alpha (%s) must be in range [0, 1]" % (alpha,))
In = np.eye(n)
ar = solve(alpha*dt*ad.T + (1-alpha)*dt*In, ad.T - In).T
M = In - alpha*dt*ar
br = np.dot(M, bd) / dt
cr = np.dot(cd, M)
dr = dd - alpha*np.dot(cr, bd)
elif method in ('bilinear', 'tustin'):
return discrete2cont(*sys, dt, method='gbt', alpha=0.5)
elif method in ('euler', 'forward_diff'):
return discrete2cont(*sys, dt, method='gbt', alpha=0.0)
elif method == 'backward_diff':
return discrete2cont(*sys, dt, method='gbt', alpha=1.0)
elif method == 'zoh':
# see https://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models
M = np.zeros((m, m))
M[:n, :n] = ad
M[:n, n:] = bd
M[n:, n:] = np.eye(bd.shape[1])
E = logm(M) / dt
ar = E[:n, :n]
br = E[:n, n:]
cr = cd
dr = dd
else:
raise ValueError("invalid method: '%s'" % (method,))
return ar, br, cr, dr
| null |
pyvib/lti_conversion.py
|
lti_conversion.py
|
py
| 4,954 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "scipy.linalg.eigvals",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.real",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "scipy.linalg.eigvals",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.solve",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.solve",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "numpy.eye",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.solve",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.solve",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "scipy.linalg.logm",
"line_number": 158,
"usage_type": "call"
}
] |
250976533
|
from ref_caradisiac.spiders.caradisiac import RefModeleSpider, RefMarqueSpider
from scrapy.crawler import CrawlerProcess
import re, io, json, os, time
os.environ.setdefault('SCRAPY_SETTINGS_MODULE', 'ref_caradisiac.settings') #add path to scrapy settings to the project
from scrapy.utils.project import get_project_settings
settings = get_project_settings()
def remove(filename):
if os.path.exists(filename):
os.remove(filename)
BASE_DIR = './'
f = open('modeles.json')
data = json.load(f)
f.close()
#print(data)
process = CrawlerProcess(settings)
for item in data:
print (item['href'])
marque = re.search('(?<=\/auto--)(.*)(?=\/)', item['href'])
if marque:
marque= marque.group(1)
else :
continue
marque = marque.split('/modeles')[0]
if marque is not None:
json_path = os.path.join(BASE_DIR, 'modeles', '%s.json'%marque)
process.settings.set('FEED_URI',json_path)
print(10*'*')
print(marque)
print(10*'*')
remove('%s.json'%marque)
remove(json_path)
process.crawl(RefModeleSpider,marque=marque)
#time.sleep(3)
#process.stop()
process.start() # the script will block here until the crawling is finished
| null |
main.py
|
main.py
|
py
| 1,244 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ.setdefault",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "scrapy.utils.project.get_project_settings",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "scrapy.crawler.CrawlerProcess",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "ref_caradisiac.spiders.caradisiac.RefModeleSpider",
"line_number": 45,
"usage_type": "argument"
}
] |
255130111
|
import requests
from bs4 import BeautifulSoup
session = requests.Session()
url = "https://en.wikipedia.org/wiki/List_of_Nobel_laureates"
page = session.get(url).text
nobelList = BeautifulSoup(page, "html.parser")
# Print the birthdays of all the nobel laureates
trows = nobelList.find('table', {'class' : ["wikitable", "sortable"]}).findAll('tr')
laurs = []
for r in trows:
for candidate in r.find_all('span', {'class': "vcard"}):
laurs.append(candidate.a)
for l in laurs:
print("Name: %s" % l.contents[0])
link = "https://en.wikipedia.org" + l['href']
pg2 = session.get(link).text
pg2soup = BeautifulSoup(pg2, 'html.parser')
info = pg2soup.find('table', {'class' : ['infobox', 'biography', 'vcard']}).find_all('tr')
for r in info:
bday = r.find('span', {'class': "bday"})
if bday is not None:
bday = bday.contents[0]
break
print("Birthday:",bday, "\n")
| null |
BS4/wiki_table.py
|
wiki_table.py
|
py
| 939 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.Session",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 23,
"usage_type": "call"
}
] |
229058160
|
from flask import Flask,request
app = Flask(__name__)
from flask import json
import sqlite3
from flask import g
import urllib
import requests
import json
from flask.ext.mysql import MySQL
mysql = MySQL()
# MySQL configurations
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = 'ronnie2k10'
app.config['MYSQL_DATABASE_DB'] = 'myquotrdb'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
@app.route('/search/<quote>/<uuid>')
def search(quote,uuid):
conn = mysql.connect()
c = conn.cursor()
googlesearch = urllib.urlencode({'q': quote})
url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=8&%s' % googlesearch
url2= 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=8&start=9&%s' % googlesearch
search_response = urllib.urlopen(url)
search_results = search_response.read()
results = json.loads(search_results)
data = results['responseData']
hits = data['results']
spliturl=[]
for h in hits:
if 'imdb.com/title/' in h['url']:
spliturl = h['url'].split('/')
if spliturl==[]:
search_response2 = urllib.urlopen(url2)
search_results2 = search_response2.read()
results2 = json.loads(search_results2)
data2 = results2['responseData']
hits2 = data2['results']
for h in hits2:
if 'imdb.com/title/' in h['url']:
spliturl = h['url'].split('/')
re = requests.get("http://www.omdbapi.com/?i="+spliturl[4]+"&tomatoes=true&plot=full").json()
query =c.execute("SELECT * FROM show WHERE imdbID = '"+re['imdbID']+"'")
returns =query.fetchall()
#Check if the result is an episode
if re['Type']=='episode':
# try:
if returns == []:
c.execute("INSERT INTO show (imdbId,title,imdbRating,poster,year,type,actors,genre,count) VALUES(%s,%s,%s,%s,%s,%s,%s,1)",(re["imdbID"],re["Title"],re["imdbRating"],re["Poster"],re["Year"],re["Type"],re["Actors"],re["Genre"]))
c.execute("INSERT INTO Searched (deviceId,imdbId,direct,entered_date)VALUES (%s,%s,1,datetime())",(uuid,re["imdbID"]))
#If it is, increase the count
else:
c.execute("UPDATE show SET count = count + 1 WHERE imdbID = '"+re['imdbID']+"'")
c.execute("UPDATE Show SET count = count + 1 WHERE imdbID = '"+re['seriesID']+"'")
searchQuery =c.execute("SELECT * FROM Searched WHERE imdbId='"+re['imdbID']+"' AND deviceId='"+uuid+"'")
searchReturns =searchQuery.fetchall()
if searchReturns == []:
c.execute("INSERT INTO Searched (deviceId,imdbId,direct,entered_date) VALUES (%s,%s,1,datetime())",(uuid,re["imdbID"]))
c.execute("INSERT INTO Searched (deviceId,imdbId,direct,entered_date) VALUES (%s,%s,0,datetime())",(uuid,re["seriesID"]))
else:
c.execute("UPDATE Searched SET entered_date=datetime() WHERE imdbID = '"+re['imdbID']+"'")
c.execute("UPDATE Searched SET entered_date=datetime() WHERE imdbID = '"+re['seriesID']+"'")
query =c.execute("SELECT * FROM Show WHERE imdbId = '"+re['seriesID']+"'")
returns =query.fetchall()
if returns == []:
se = requests.get("http://www.omdbapi.com/?i="+re['seriesID']+"&tomatoes=true&plot=full").json()
c.execute("INSERT INTO Show (imdbId,title,imdbRating,poster,year,type,actors,genre,count) VALUES (%s,%s,%s,%s,%s,%s,%s,1)",(se["imdbID"],se["Title"],se["imdbRating"],se["Poster"],se["Year"],se["Type"],se["Actors"],se["Genre"]))
c.execute("INSERT INTO Searched (deviceId,imdbId,direct,entered_date) VALUES (%s,%s,0,datetime())",(uuid,se["imdbID"]))
# except:
# print 'SQL Query Failed'
else:
# try:
#Check if searched for show is already in Database
query =c.execute("SELECT * FROM Show WHERE imdbID = '"+re['imdbID']+"'")
returns =query.fetchall()
#If not put, it in
#try:
if returns == []:
c.execute("INSERT INTO show (imdbId,title,imdbRating,poster,year,type,actors,genre,count) VALUES(%s,%s,%s,%s,%s,%s,%s,1)",(re["imdbID"],re["Title"],re["imdbRating"],re["Poster"],re["Year"],re["Type"],re["Actors"],re["Genre"]))
c.execute("INSERT INTO Searched (deviceId,imdbId,direct,entered_date)VALUES (%s,%s,1,datetime())",(uuid,re["imdbID"]))
#If it is, increase the count
else:
c.execute("UPDATE Show SET count = count + 1 WHERE imdbId = '"+re['imdbID']+"'")
searchQuery =c.execute("SELECT * FROM Searched WHERE imdbId='"+re['imdbID']+"' AND deviceId='"+uuid+"'" )
searchReturns =searchQuery.fetchall()
if searchReturns == []:
c.execute("INSERT INTO Searched (deviceId,imdbId,direct,entered_date) VALUES (%,%,1,datetime())",(uuid,re["imdbID"]))
else:
c.execute("UPDATE Searched SET entered_date=datetime() WHERE imdbId = '"+re['imdbID']+"'")
# except:
# print 'SQL Query Failed'
conn.commit()
conn.close()
return json.dumps(re)
@app.route('/search2')
def search2():
conn = mysql.connect()
c = conn.cursor()
query =c.execute("SELECT * FROM Searched")
returns =query.fetchall()
conn.close()
return str(returns)
@app.route('/top20')
def top20():
conn = mysql.connect()
c = conn.cursor()
query =c.execute("SELECT * FROM Show WHERE type != 'episode' ORDER BY count DESC LIMIT 20")
returns =query.fetchall()
conn.close()
return json.dumps(returns)
@app.route('/etop20')
def etop20():
conn = mysql.connect()
c = conn.cursor()
query =c.execute("SELECT * FROM Show WHERE type='episode' ORDER BY count DESC LIMIT 20")
returns =query.fetchall()
conn.close()
return json.dumps(returns)
@app.route('/mtop20')
def mtop20():
conn = mysql.connect()
c = conn.cursor()
query =c.execute("SELECT * FROM Show WHERE type='movie' ORDER BY count DESC LIMIT 20")
returns =query.fetchall()
conn.close()
return json.dumps(returns)
@app.route('/stop20')
def stop20():
conn = mysql.connect()
c = conn.cursor()
query =c.execute("SELECT * FROM Show WHERE type='series' ORDER BY count DESC LIMIT 20")
returns =query.fetchall()
conn.close()
return json.dumps(returns)
@app.route('/history/<uuid>')
def history(uuid):
conn = mysql.connect()
c = conn.cursor()
query =c.execute("SELECT Title,imdbRating,poster,year,actors,show.imdbId FROM show JOIN Searched ON show.imdbId = searched.imdbId WHERE direct=1 AND deviceId="+uuid+" ORDER BY entered_date DESC")
returns =query.fetchall()
conn.close()
return json.dumps(returns)
@app.route('/ref/<imdbid>/<uuid>')
def ref(imdbid,uuid):
conn = mysql.connect()
c = conn.cursor()
genre= c.execute("select genre from Show WHERE show.imdbId='"+imdbid+"'")
mainGenre=genre.split(',')
query = c.execute("select * FROM Show WHERE type !='episode' AND genre ='"+mainGenre[0]+"' AND imdbId IN (select imdbId from Searched WHERE imdbID !='"+imdbid+"' AND deviceId IN (SELECT deviceId from Searched WHERE imdbID ='"+imdbid+"' AND deviceId !='"+uuid+"') GROUP BY imdbId ORDER BY count(imdbId))LIMIT 3;")
returns = query.fetchall()
return json.dumps(returns)
if __name__ == '__main__':
app.run()
#SELECT * FROM Show WHERE imdbID IN (select imdbId, count(*) as SearchAmount from Searched WHERE imdbID !='"+imdbid+"' AND deviceId IN
#(SELECT deviceId from Searched WHERE imdbID ='"+imdbid+"' AND deviceId !='"+uuid+"') GROUP BY imdbid ORDER BY SearchAmount DESC LIMIT 3);")
| null |
ServerApp/Quotr/home.py
|
home.py
|
py
| 8,045 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "flask.ext.mysql.MySQL",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "urllib.urlencode",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "urllib.urlopen",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "urllib.urlopen",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 196,
"usage_type": "call"
}
] |
337202070
|
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch
import numpy as np
from tqdm import tqdm
# from torch_lr_finder import LRFinder
# def __new__(self):
# return self
# def train(trainloader, device, model,EPOCH):
# criterion = nn.CrossEntropyLoss()
# optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1, patience=3, verbose=True)
# train_losses = []
# train_acc = []
# # pbar = tqdm(trainloader)
# for epoch in range(EPOCH):
# running_loss = 0.0
# correct = 0
# total = 0
# processed = 0
# running_loss_overall = 0.0
# for i, data in enumerate(trainloader, 0):
# # get the inputs
# inputs, labels = data
# inputs, labels = inputs.to(device), labels.to(device)
# # zero the parameter gradients
# optimizer.zero_grad()
# # forward + backward + optimize
# outputs = model(inputs)
# loss = criterion(outputs, labels)
# loss.backward()
# optimizer.step()
# pred = outputs.argmax(dim=1, keepdim=True) # get the index of the max log-probability
# correct += pred.eq(labels.view_as(pred)).sum().item()
# processed += len(data)
# # pbar.set_description(desc= f'Loss={loss.item()} Batch_id={i} Accuracy={100*correct/processed:0.2f}')
# # train_acc.append(100*correct/processed)
# # print statistics
# running_loss += loss.item()
# if i % 2000 == 1999: # print every 2000 mini-batches
# print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
# running_loss_overall += running_loss
# running_loss = 0.0
# # print('Epoch {} completed'.format(epoch))
# # print('Loss: {}. Accuracy: {}'.format(loss.item(), accuracy))
# # print('-'*20)
# # accuracy = 100 * correct / total
# print((running_loss_overall / (i + 1)))
# scheduler.step(100-(running_loss_overall / (i + 1)))
# train_acc.append(100*correct/processed)
# train_losses.append((100-(running_loss_overall / (i + 1))))
# lr_finder = LRFinder(model, optimizer, criterion, device="cuda")
# lr_finder.range_test(trainloader, end_lr=100, num_iter=100)
# lr_finder.plot() # to inspect the loss-learning rate graph
# lr_finder.reset()
# print('Finished Training')
# return model, train_acc, train_losses
# train_losses = []
# test_losses = []
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
test_losses = 0.0
test_acc = 0.0
pred_wrong = []
true_wrong = []
image = []
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
_, predicted = torch.max(output.data, 1)
preds = predicted.cpu().numpy()
tar = target.cpu().numpy()
preds = np.reshape(preds,(len(preds),1))
tar = np.reshape(tar,(len(preds),1))
for i in range(len(preds)):
# pred.append(preds[i])
# true.append(target[i])
if(preds[i]!=tar[i]):
pred_wrong.append(preds[i])
true_wrong.append(tar[i])
image.append(data[i])
test_loss /= len(test_loader.dataset)
test_losses = test_loss
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test_acc = 100. * correct / len(test_loader.dataset)
return image,true_wrong,pred_wrong,test_acc,test_losses
def train( model, device, train_loader,test_loader, EPOCH):
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, nesterov=True, weight_decay= 0.0001)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=0.008,pct_start =5/24,epochs=24, steps_per_epoch=len(train_loader))
train_losses = []
train_acc = []
test_losses = []
test_acc = []
for epoch in range(EPOCH):
correct = 0
processed = 0
pbar = tqdm(train_loader)
model.train()
for batch_idx, (data, target) in enumerate(pbar):
# get samples
data, target = data.to(device), target.to(device)
# Init
optimizer.zero_grad()
# In PyTorch, we need to set the gradients to zero before starting to do backpropragation because PyTorch accumulates the gradients on subsequent backward passes.
# Because of this, when you start your training loop, ideally you should zero out the gradients so that you do the parameter update correctly.
# Predict
y_pred = model(data)
# Calculate loss
# regularization_loss = 0
# for param in model.parameters():
# regularization_loss += torch.sum(abs(param))
# classify_loss = criterion(y_pred,target)
loss = F.nll_loss(y_pred, target)
#loss = classify_loss + LAMDA * regularization_loss
# train_losses.append(loss)
# Backpropagation
loss.backward()
optimizer.step()
scheduler.step()
# Update pbar-tqdm
pred = y_pred.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
processed += len(data)
pbar.set_description(desc= f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}')
# train_acc.append(100*correct/processed)
train_losses.append(loss.item())
train_acc.append(100*correct/processed)
img,true_wrong,pred_wrong,tst_acc ,tst_loss = test(model, device, test_loader)
test_losses.append(tst_loss)
test_acc.append(tst_acc)
# lr_finder = LRFinder(model, optimizer, criterion, device)
# lr_finder.range_test(train_loader, end_lr=100, num_iter=100)
# lr_finder.plot() # to inspect the loss-learning rate graph
# lr_finder.reset()
return train_losses, train_acc, model,img,true_wrong,pred_wrong,test_acc,test_losses
def validate(testloader, device, model):
pred_wrong = []
true_wrong = []
image = []
dataiter = iter(testloader)
images, labels = dataiter.next()
images, labels = images.to(device), labels.to(device)
outputs = model(images)
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
# print(predicted)
# print(labels)
preds = predicted.cpu().numpy()
target = labels.cpu().numpy()
preds = np.reshape(preds,(len(preds),1))
target = np.reshape(target,(len(preds),1))
for i in range(len(preds)):
# pred.append(preds[i])
# true.append(target[i])
if(preds[i]!=target[i]):
pred_wrong.append(preds[i])
true_wrong.append(target[i])
image.append(images[i])
# if(predicted != labels):
# pred_wrong.append(predicted)
# true_wrong.append(labels)
# image.append(data[i])
print('Accuracy of the network on the 10000 test images: %2d %%' % ((100 * correct) / total))
return image,true_wrong,pred_wrong,
# def validate(testloader, device, model):
# dataiter = iter(testloader)
# images, labels = dataiter.next()
# images, labels = images.to(device), labels.to(device)
# outputs = model(images)
# correct = 0
# total = 0
# with torch.no_grad():
# for data in testloader:
# images, labels = data
# images, labels = images.to(device), labels.to(device)
# outputs = model(images)
# _, predicted = torch.max(outputs.data, 1)
# total += labels.size(0)
# correct += (predicted == labels).sum().item()
# print('Accuracy of the network on the 10000 test images: %2d %%' % ((100 * correct) / total))
def classValidation(testloader, device, model, classes):
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i]))
| null |
train_and_validate/train_and_validate.py
|
train_and_validate.py
|
py
| 10,102 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.no_grad",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.nll_loss",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "torch.max",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "torch.optim.SGD",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "torch.optim.lr_scheduler.OneCycleLR",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.optim.lr_scheduler",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "torch.optim",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.nll_loss",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 236,
"usage_type": "call"
}
] |
159687290
|
import unittest
from synthesis import PerformanceHistorySynthesizer
from sklearn.model_selection import ParameterGrid
import numpy as np
class Test(unittest.TestCase):
def setUp(self):
pass
#def tearDown(self):
# pass
def testPerformanceHistorySynthesizer(self):
'''
Testing different parameter configurations, sanity check
'''
pgrid = {
"n_revisions": np.arange(1, 2000, 500),
"n_features": np.arange(10, 1200, 500),
"p_influential": np.linspace(0.001, 0.1, 5),
"n_changepoints": np.arange(1, 50, 10),
"p_geom": np.linspace(0.7, 0.99, 6),
"noise": np.linspace(0.001, 1, 5),
"seed": np.arange(0, 100, 25)
}
pgrid = ParameterGrid(pgrid)
for i, param in enumerate(pgrid):
PerformanceHistorySynthesizer(
param["n_revisions"],
param["n_features"],
param["p_influential"],
param["n_changepoints"],
param["p_geom"],
param["noise"],
param["seed"],
)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| null |
change_point_identification/synthesis_test.py
|
synthesis_test.py
|
py
| 1,281 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.ParameterGrid",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "synthesis.PerformanceHistorySynthesizer",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 45,
"usage_type": "call"
}
] |
341546158
|
from django.core.management.base import BaseCommand, CommandError
import csv
from starcatalogue.models import Star, FoldedLightcurve
class Command(BaseCommand):
help = 'Imports folded lightcurve data (results_total.dat)'
def add_arguments(self, parser):
parser.add_argument('file', nargs=1, type=open)
def handle(self, *args, **options):
r = csv.reader(options['file'][0], delimiter=' ', skipinitialspace=True)
imported_total = 0
for count, row in enumerate(r):
try:
if row[7] != '0':
continue
superwasp_id = "".join(row[1:3])
period_number = int(row[3])
period_length = float(row[4])
sigma = float(row[5])
chi_squared = float(row[6])
except IndexError:
print('Warning: Skipping row {} due to IndexError'.format(count))
continue
try:
star = Star.objects.get(superwasp_id=superwasp_id)
lightcurve = FoldedLightcurve.objects.get(
star=star,
period_number=period_number,
)
except (Star.DoesNotExist, FoldedLightcurve.DoesNotExist):
continue
lightcurve.period_length = period_length
lightcurve.sigma = sigma
lightcurve.chi_squared = chi_squared
lightcurve.save()
imported_total += 1
self.stdout.write("Total imported: {}".format(imported_total))
| null |
starcatalogue/management/commands/importlightcurves.py
|
importlightcurves.py
|
py
| 1,572 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "csv.reader",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "starcatalogue.models.Star.objects.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "starcatalogue.models.Star.objects",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "starcatalogue.models.Star",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "starcatalogue.models.FoldedLightcurve.objects.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "starcatalogue.models.FoldedLightcurve.objects",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "starcatalogue.models.FoldedLightcurve",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "starcatalogue.models.Star.DoesNotExist",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "starcatalogue.models.Star",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "starcatalogue.models.FoldedLightcurve.DoesNotExist",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "starcatalogue.models.FoldedLightcurve",
"line_number": 36,
"usage_type": "name"
}
] |
350558898
|
from django.urls import path
from . import views
app_name = "life"
urlpatterns = [
path("list/", views.DiaryListView.as_view(), name="list"),
path("create/", views.DiaryCreateView.as_view(), name="create"),
path("list/<int:pk>/", views.DiaryDetailView.as_view(), name="detail"),
path("list/<int:pk>/update/", views.DiaryUpdateView.as_view(), name="update"),
path("list/<int:pk>/delete/", views.DiaryDeleteView.as_view(), name="delete"),
]
| null |
life/urls.py
|
urls.py
|
py
| 462 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
}
] |
510274535
|
from domain.application import Application
from domain.model import Model
import json
import os
from os import listdir
from os.path import isfile, join
def parse_applications(app_path: str):
"""
Function that parse app_path file.
app_path (String): Path to a json file.
"""
applications = {}
with open(app_path) as f:
data = json.load(f)
args = ["short_name", "name", "path", "description", "type"]
allowed_types = set(["image", "text"])
# If applications attribute doesn't exist finish immediately
if "applications" not in data:
return {}
for app in data["applications"]:
validJson = True
# Check if "app" has all the requirements
for arg in args:
if arg not in app:
print("Missing argument {} in file {}".format(arg, app_path))
validJson = False
if arg == "type" and app[arg] not in allowed_types:
print("The type {} is not allowed. Error found in file {}".format(app[arg], app_path))
validJson = False
# Create a new "Application" with the values given
if validJson:
if app["short_name"] in applications:
print("There is already an application with the name: {}".format(app["short_name"]))
elif 'show' in app and app['show'] == False:
print("The show value is set to false. Not showing this application")
else:
applications[app["short_name"].replace(" ", "")] = Application(app["name"], app["description"], app["path"], app['type'])
return applications
def parse_models(applicationList: dict):
"""
Given a dictionary of "Applications" instanciate the models related for each "application"
applicationList (Dictionary): dictionary with the "Applications"
"""
for app in list(applicationList.keys()):
parse_application_model(applicationList[app])
if len(applicationList[app].models) == 0:
print("{} doesn't have any model associated. Removing this application...".format(applicationList[app].name))
applicationList.pop(app, None)
def parse_application_model(application: Application):
"""
Given an "Application" instanciate the models found in "Application.models_path"
application (Application): application
"""
files = [f for f in listdir(application.models_path) if isfile(join(application.models_path, f))]
args = ["name", "description", "model_info", "file_format"]
for model_path in files:
with open(application.models_path + "/"+ model_path) as f:
data = json.load(f)
validJson = True
# Check if the model fulfill all the requirements
for arg in args:
if arg not in data:
print("Missing argument {} in file {}. The model is not going to be included".format(arg, application.models_path + "/"+ model_path))
validJson = False
# Add the model to the application
if validJson:
application.add_model(Model(data["name"].replace(" ", ""), data["name"], data["description"], data["model_info"], data["file_format"]))
def parse_config(config_path: str):
"""
Given the path of the configuration file returns a dictionary with all the elements.
config_path(String): Path to a json file.
"""
with open(config_path) as f:
data = json.load(f)
compulsory_args = ["models_path", "random_pictures_path", "random_texts_path", "upload_folder"]
non_compulsory_args = ["port", "number_of_pictures_to_show", "number_of_texts_to_show"]
error_arguments = ["number_of_random_pictures"]
# Compulsory arguments
for arg in compulsory_args:
if arg not in data:
raise FileNotFoundError("Missing argument '{}' in '{}'".format(arg, config_path))
if arg == "upload_folder":
if not (data[arg].startswith("/static") or data[arg].startswith("static")):
raise FileNotFoundError("Argument '{}' needs to be inside 'static' folder. Actual path: {}".format(arg, data[arg]))
else:
if data[arg][-1] == "/":
data[arg] = data[arg][:-1]
# Create folder if needed
if not os.path.exists(data[arg]):
print("Creating folder: {}".format(data[arg]))
os.makedirs(data[arg])
else:
# Remove files in the folder
for f in listdir(data[arg]):
if isfile(join(data[arg], f)):
os.remove(join(data[arg], f))
if arg == "random_pictures_path":
if not (data[arg].startswith("/static") or data[arg].startswith("static")):
raise FileNotFoundError("Argument '{}' needs to be inside 'static' folder. Actual path: {}".format(arg, data[arg]))
else:
if data[arg][-1] == "/":
data[arg] = data[arg][:-1]
elif arg == "random_texts_path":
if not (data[arg].startswith("/static") or data[arg].startswith("static")):
raise FileNotFoundError("Argument '{}' needs to be inside 'static' folder. Actual path: {}".format(arg, data[arg]))
else:
if data[arg][-1] == "/":
data[arg] = data[arg][:-1]
# Non compulsory arguments
for arg in non_compulsory_args:
if arg not in data:
if arg == "port":
data["port"] = 5000
elif arg == "number_of_pictures_to_show":
data["number_of_pictures_to_show"] = 4
elif arg == "number_of_texts_to_show":
data['number_of_texts_to_show'] = 5
# Error arguments
for arg in error_arguments:
if arg in data:
raise FileNotFoundError("The argument '{}' in '{}' cannot be used".format(arg, config_path))
return data
| null |
util/parse_json.py
|
parse_json.py
|
py
| 6,019 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "json.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "domain.application.Application",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "domain.application.Application",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "domain.model.Model",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 120,
"usage_type": "call"
}
] |
104746798
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from openpyxl import Workbook
import sqlserver
cursor_sql = sqlserver.get_cursor()
sql = " select contrato, lote.datacadastro , "\
" (SELECT nomefantasia FROM FILIAL emp WHERE emp.idfilial= lote.idfilial) as nome_filial, "\
" (SELECT TOP 1 CD.Nome "\
" FROM LOTE_ENDERECO LE "\
" INNER JOIN CIDADE CD ON CD.IDCIDADE = LE.IDCIDADE "\
" WHERE LE.IDLOTE = lote.IDLOTE AND LE.TIPOENDERECO = 1 ) AS cidade_origem, "\
" (SELECT TOP 1 CD.Nome "\
" FROM LOTE_ENDERECO LE "\
" INNER JOIN CIDADE CD ON CD.IDCIDADE = LE.IDCIDADE "\
" WHERE LE.IDLOTE = lote.IDLOTE AND LE.TIPOENDERECO = 2 ) AS cidade_destino, "\
" PROD.DESCRICAO AS produto "\
" from lote "\
" INNER JOIN PRODUTO PROD ON PROD.IDPRODUTO = lote.IDPRODUTO "\
" where lote.ativo = 1 and lote.status = 1 and ocultarloteappmobile = 1 "\
" order by lote.idfilial "
print(sql)
dados_sql = cursor_sql.execute(sql)
wb = Workbook()
ws = wb.active
list_row = []
list_row.append('Contrato')
list_row.append('Data cadastro')
list_row.append('Filial')
list_row.append('Cidade origem')
list_row.append('Cidade destino')
list_row.append('Produto')
ws.append(list_row)
for row in dados_sql:
list_row = []
list_row.append(row[0])
list_row.append(str(row[1].day)+'/'+str(row[1].month)+'/'+str(row[1].year))
list_row.append(row[2])
list_row.append(row[3])
list_row.append(row[4])
list_row.append(row[5])
ws.append(list_row)
wb.save("LotesOcultos.xlsx")
| null |
rel_lotesocultos.py
|
rel_lotesocultos.py
|
py
| 1,492 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlserver.get_cursor",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "openpyxl.Workbook",
"line_number": 26,
"usage_type": "call"
}
] |
55979723
|
##
## B8IT106 Tools for Data Analytics CA_TWO
## October 2019
## Added to GitHub - October 8th 2019
## October9 Branch Created
##
## Module Imports for Machine Learning in Python
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import seaborn as sns
##
def MainProg_CATwo():
# Set PyCharm Display Option
# This is done to improve console display
# for use in documentented screen shots
desired_width=320
pd.set_option('display.width', 400)
np.set_printoptions(linewidth=10)
pd.set_option('display.max_columns',15)
# Identify file to be read into dataset
filename = "Spruce.csv"
# Set up file identifier for use in Console Print statements
dataDescription = "Spruce Dataset"
# Read CSV file and return dataset
df_spruce = ReadDataframe(filename)
# Display some basic initial statistics about dataset
# This data will be used to inform follow up data cleansing actions
DisplayBasicDataFrameInfo(df_spruce, dataDescription)
df_FinalSpruce = PreSplitDataManipulation(df_spruce, dataDescription)
X, Y = CreateLableAndFeatureSet(df_FinalSpruce, dataDescription)
X_train, X_test, Y_train, Y_test, X_Scaled = CreateTrainingAndTestData(X, Y, dataDescription, df_FinalSpruce)
#TuneRandomForestAlgorithm(X_train, Y_train)
# Determined by algorithm tuning process
best_estimator = 550
ImplementTunedRandomForestAlgorithm(X_train, X_test, Y_train, Y_test, best_estimator, X)
X = CreateRevisedFeatureSet(X)
## --- Rinse / Repeat ##
X_train, X_test, Y_train, Y_test, X_Scaled = CreateTrainingAndTestData(X, Y, dataDescription, df_FinalSpruce)
#####
# ---- Call RandomForest with revised FeatureSet
ImplementTunedRandomForestAlgorithm(X_train, X_test, Y_train, Y_test, best_estimator, X)
### ---- Implement PCA Visualisation and K-Means Clustering
x_pca = ImplementPCAVisualisation(X_Scaled, Y, dataDescription)
ImplementK_MeansClustering(X_Scaled, x_pca, dataDescription)
def CreateTrainingAndTestData(X, Y, dataDescription, origDataset):
X_Scaled = NormaliseTrainingData(X, dataDescription)
X_train, X_test, Y_train, Y_test = SplitDatasetIntoTrainAndTestSets(X_Scaled, Y, dataDescription, origDataset)
X_train, Y_train = ImplementOverSampling(X_train, Y_train)
return X_train, X_test, Y_train, Y_test, X_Scaled
def ReadDataframe(filename):
print("\n\tReading {} file..\n".format(filename))
# Read CSV file into panda dataframe
df = pd.read_csv(filename)
# Return the panda dataframe read in from the CSV file
return df
def DisplayBasicDataFrameInfo(dataset, datasetDescription):
print("\n\t{} Dataset Head Rows : \n".format(datasetDescription))
print(dataset.head())
print("\n\t{} Dataset Dimensions : \n".format(datasetDescription))
print(dataset.shape)
print("\n\t{} Dataset Datatypes : \n".format(datasetDescription))
print(dataset.dtypes)
print("\n\t{} Dataset 'Info()' : \n".format(datasetDescription))
print(dataset.info())
print("\n\t{} Dataset 'Describe()' : \n".format(datasetDescription))
print(dataset.describe())
def ConvertTreeType(column):
# Converting Categorical features into Numerical features
if column == 'Spruce':
return 1
else:
return 0
def PreSplitDataManipulation(dataset, datasetDescription):
# Check for Null Values
print("\n\tChecking for Null Values in {} Dataset - Result : {}\n".format(datasetDescription, dataset.isnull().values.any()))
# Pause
anykey = input("\nPress any key..")
# Check for Duplicates
numOfDuplicatedRows = dataset.duplicated().value_counts()
print("\n\tChecking for Duplicate Rows in {} Dataset - Result : {}\n\n".format(datasetDescription, numOfDuplicatedRows))
# Pause
anykey = input("\nPress any key..")
# Converting Categorical features into Numerical features - most algorithms need numeric values
# Just one column - the 'Tree Type' needs to be converted from a Categorical Values
# This is the target variable and a 'Spruce' is assigned a value of '1', and 'Other' is assigned
# a value of '0'
# Display the first two rows after conversion of 'Tree Type'
print("\nCategorical {} Dataset Head Rows Prior to Tree Type conversion : \n".format(datasetDescription))
print(dataset.head(2))
dataset['Tree_Type'] = dataset['Tree_Type'].apply(ConvertTreeType)
final_data = dataset
# Display the first two rows after conversion of 'Tree Type'
print("\nConverted Categorical {} Dataset Head Rows : \n".format(datasetDescription))
print(final_data.head(2))
# Pause
anykey = input("Press any key..")
# Display the change in datatype for 'Tree Type'
print("\nConverted Categorical {} Dataset Datatypes : \n".format(datasetDescription))
print(final_data.dtypes)
# Pause
anykey = input("Press any key..")
# Pre-Split Data Preparation
# Hidden missing values - check the zeroes - we already checked for NULL
#print(final_data.head(10))
#Elevation Slope Horizontal_Distance_To_Hydrology Vertical_Distance_To_Hydrology Horizontal_Distance_To_Roadways Horizontal_Distance_To_Fire_Points
SpruceFeatureCheckListForZeroValues = ['Elevation','Slope','Horizontal_Distance_To_Hydrology','Vertical_Distance_To_Hydrology','Horizontal_Distance_To_Roadways','Horizontal_Distance_To_Fire_Points']
print("\n\t# Rows in {1} dataframe {0}".format(len(final_data), datasetDescription))
# It would not seem logical that any of the first six colums in the Spruce dataset have a zero value
# This loop checks if there are any zero values
# If there were any zero values the user would determine the appropriate follow up action
for feature in SpruceFeatureCheckListForZeroValues:
print("\n\t# zero value rows in column {1}: {0}".format(len(final_data.loc[final_data[feature] == 0]),feature))
# Pause
anykey = input("Press any key..")
# Drop rows?
# Check for Correlation after all features converted to numeric
CheckDatasetForCorrelation(final_data, datasetDescription)
return final_data
def CheckDatasetForCorrelation(dataset, dataDescription):
print("\n\tCheck {} Dataset For any Correlation between features (Categorical features converted into Numerics): \n".format(dataDescription))
# Correlation analysis - a graphical representation of possible correlation of data
sns.heatmap(dataset.corr(), annot=True, fmt='.2f')
# Pause
anykey = input("Press any key..")
def CreateLableAndFeatureSet(final_data, dataDescription):
# Check distribution of survived and died in cleaned dataset
# Check that the observation of any outcome is not too rare
num_obs = len(final_data)
num_true = len(final_data.loc[final_data['Tree_Type'] == 1]) # Spruce Tree = True
num_false = len(final_data.loc[final_data['Tree_Type'] == 0]) # Spruce Tree = False
print("Number of Spruce Tree Types : {0} ({1:2.2f}%)\n".format(num_true, (num_true/num_obs) * 100))
print("Number of Other Tree Types : {0} ({1:2.2f}%)\n".format(num_false, (num_false/num_obs) * 100))
# Dividing dataset into label and feature sets
X = final_data.drop('Tree_Type', axis = 1) # Features
Y = final_data['Tree_Type'] # Labels
print("\n\tDimentions of Label and Feature Dataset for {}".format(dataDescription))
print(X.shape)
print(Y.shape)
print("\n\tFeatured + Labeled - {} Dataset Head Rows X + Y : \n".format(dataDescription))
print(X.head(2))
print(Y.head(2))
# Pause
#anykey = input("Press any key..")
return X, Y
def NormaliseTrainingData(X, dataDescription):
print("\n\tScaling the Feature dataset..")
# Normalizing numerical features so that each feature has mean 0 and variance 1
feature_scaler = StandardScaler()
X_scaled = feature_scaler.fit_transform(X)
#print("\nPre-Scaled Features - {} Dataset Head Rows : \n".format(dataDescription))
#print(X.head(3))
# Pause
#anykey = input("Press any key..")
#print("\nPost-Scaled Features - {} Dataset Head Rows : \n".format(dataDescription))
#print(X_scaled.view())
# Pause
#anykey = input("Press any key..")
return X_scaled
def SplitDatasetIntoTrainAndTestSets(X_Scaled, Y, dataDescription, final_data):
# Dividing dataset into training and test sets
X_train, X_test, Y_train, Y_test = train_test_split(X_Scaled, Y, test_size = 0.3, random_state = 100)
print("\n\t{} Training Set Shape : \n".format(dataDescription))
print(X_train.shape)
print("\n\t{} Test Set Shape : \n".format(dataDescription))
print(X_test.shape)
# Need to check if we have the desired 70 / 30 split in Train and Test Data
print("\n\t{0:0.2f}% in {1} training set".format( (len(X_train)/len(final_data.index)) * 100, dataDescription))
print("\n\t{0:0.2f}% in {1} test set".format((len(X_test)/len(final_data.index)) * 100, dataDescription))
# Verifying predicted value was split correctly - according to proportion in original dataset
print("\n")
print("\tOriginal True : {0} ({1:0.2f}%)".format(len(final_data.loc[final_data['Tree_Type'] == 1]), (len(final_data.loc[final_data['Tree_Type'] == 1])/len(final_data.index)) * 100.0))
print("\tOriginal False : {0} ({1:0.2f}%)".format(len(final_data.loc[final_data['Tree_Type'] == 0]), (len(final_data.loc[final_data['Tree_Type'] == 0])/len(final_data.index)) * 100.0))
print("\n")
print("\tTraining True : {0} ({1:0.2f}%)".format(len(Y_train[Y_train[:] == 1]), (len(Y_train[Y_train[:] == 1])/len(Y_train) * 100.0)))
print("\tTraining False : {0} ({1:0.2f}%)".format(len(Y_train[Y_train[:] == 0]), (len(Y_train[Y_train[:] == 0])/len(Y_train) * 100.0)))
print("\n")
print("\tTest True : {0} ({1:0.2f}%)".format(len(Y_test[Y_test[:] == 1]), (len(Y_test[Y_test[:] == 1])/len(Y_test) * 100.0)))
print("\tTest False : {0} ({1:0.2f}%)".format(len(Y_test[Y_test[:] == 0]), (len(Y_test[Y_test[:] == 0])/len(Y_test) * 100.0)))
print("\n")
return X_train, X_test, Y_train, Y_test
def ImplementOverSampling(X_train,Y_train):
# Implementing Oversampling to balance the dataset; SMOTE stands for Synthetic Minority Oversampling TEchnique
print("Number of observations in each class before oversampling (training data): \n", pd.Series(Y_train).value_counts())
smote = SMOTE(random_state = 101)
X_train,Y_train = smote.fit_sample(X_train,Y_train)
print("Number of observations in each class after oversampling (training data): \n", pd.Series(Y_train).value_counts())
# Pause
# anykey = input("Press any key..")
return X_train,Y_train
def TuneRandomForestAlgorithm(X_train, Y_train):
"""
In the below GridSearchCV(), scoring parameter should be set as follows:
scoring = 'accuracy' when you want to maximize prediction accuracy
scoring = 'recall' when you want to minimize false negatives
scoring = 'precision' when you want to minimize false positives
scoring = 'f1' when you want to balance false positives and false negatives (place equal emphasis on minimizing both)
"""
# Tuning the random forest parameter 'n_estimators' using Grid Search
rfc = RandomForestClassifier(criterion='entropy', max_features='auto', random_state=1)
scoreOptions = ['accuracy','recall','precision','f1']
#grid_param = {'n_estimators': [100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700]}
grid_param = {'n_estimators': [10,20]}
print("\n\tRunning Grid Search Cross Validation tuning..")
for score in scoreOptions:
gd_sr = GridSearchCV(estimator=rfc, param_grid=grid_param, scoring=score, cv=5)
print("\n\tScore paramter set to : {}".format(score))
# Execute the fit function on the Training Data
gd_sr.fit(X_train, Y_train)
# Determine and display the optimum hyperparameter
best_parameters = gd_sr.best_params_
print("\n\tOptimum parameter is : {}".format(best_parameters))
#print(best_parameters)
best_result = gd_sr.best_score_ # Mean cross-validated score of the best_estimator
print("\n\tOptimum scoring result is : {}".format(best_result))
#print(best_result)
# Pause
anykey = input("Press any key..")
def ImplementTunedRandomForestAlgorithm(X_train, X_test, Y_train, Y_test, best_estimator, X):
# Building random forest using the tuned parameter
#rfc = RandomForestClassifier(n_estimators=400, criterion='entropy', max_features='auto', random_state=1)
rfc = RandomForestClassifier(n_estimators=best_estimator, criterion='entropy', max_features='auto', random_state=1)
rfc.fit(X_train,Y_train)
# Rate the importance of the features to guide the creation of a more targeted feature set for the algorithm
featimp = pd.Series(rfc.feature_importances_, index=list(X)).sort_values(ascending=False)
print("\n\tList of features in dataset by importance to prediction model : \n")
print(featimp)
# Pause
# anykey = input("Press any key..")
Y_pred = rfc.predict(X_test)
print("\n\tPrediction Accuracy: ", metrics.accuracy_score(Y_test, Y_pred))
# Displaying a Confusion Matrix
# Text on screen
print("\n\tConfusion Matrix\n")
print("{0}".format(metrics.confusion_matrix(Y_test, Y_pred)))
print("\n")
print("\n\tClassification Report\n")
print(metrics.classification_report(Y_test, Y_pred))
# Pause
# anykey = input("Press any key..")
conf_mat = metrics.confusion_matrix(Y_test, Y_pred)
plt.figure(figsize=(8,6))
sns.heatmap(conf_mat,annot=True)
plt.title("Confusion_matrix")
plt.xlabel("Predicted Class")
plt.ylabel("Actual class")
plt.show()
print('Confusion matrix: \n', conf_mat)
print('TP: ', conf_mat[1,1])
print('TN: ', conf_mat[0,0])
print('FP: ', conf_mat[0,1])
print('FN: ', conf_mat[1,0])
def CreateRevisedFeatureSet(dataset):
# Selecting features with higher sifnificance and redefining feature set
X = dataset[['Elevation', 'Horizontal_Distance_To_Roadways', 'Horizontal_Distance_To_Fire_Points', 'Vertical_Distance_To_Hydrology', 'Horizontal_Distance_To_Hydrology','Slope','Soil_Type20','Soil_Type21','Soil_Type9','Soil_Type27','Soil_Type36']]
# Check for Correlation after reduced feature set created
CheckDatasetForCorrelation(X, "Reduced Spruce Trees Feature Set")
return X
def ImplementPCAVisualisation(X_Scaled, Y, dataDescription):
# Implementing PCA to visualize dataset
print("\n\tThe Implementation of PCA Visualisation...\n")
pca = PCA(n_components = 2)
pca.fit(X_Scaled)
x_pca = pca.transform(X_Scaled)
print(pca.explained_variance_ratio_)
print(sum(pca.explained_variance_ratio_))
# Pause
# anykey = input("Press any key..")
plt.figure(figsize = (8,6))
plt.scatter(x_pca[:,0], x_pca[:,1], c=Y, cmap='plasma')
plt.xlabel('First Principal Component')
plt.ylabel('Second Principal Component')
plt.show()
return x_pca
def ImplementK_MeansClustering(X_Scaled, x_pca, dataDescription):
# Implementing K-Means CLustering on dataset and visualizing clusters
print("\n\tThe Implementation of K-Means Clustering and Visualisation...\n")
# Finding the number of clusters (K)
inertia = []
for i in range(1,11):
kmeans = KMeans(n_clusters = i, random_state = 100)
kmeans.fit(X_Scaled)
inertia.append(kmeans.inertia_)
plt.plot(range(1, 11), inertia)
plt.title('The Elbow Plot')
plt.xlabel('Number of clusters')
plt.ylabel('Inertia')
plt.show()
kmeans = KMeans(n_clusters = 2)
kmeans.fit(X_Scaled)
print(kmeans.cluster_centers_)
plt.figure(figsize = (8,6))
plt.scatter(x_pca[:,0], x_pca[:,1], c=kmeans.labels_, cmap='plasma')
plt.xlabel('First Principal Component')
plt.ylabel('Second Principal Component')
plt.show()
MainProg_CATwo()
| null |
IrisML/CA2.py
|
CA2.py
|
py
| 15,582 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.set_option",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.set_printoptions",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "seaborn.heatmap",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "imblearn.over_sampling.SMOTE",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.GridSearchCV",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 356,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 362,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 365,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 374,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 375,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 376,
"usage_type": "name"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 411,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 413,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 415,
"usage_type": "name"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 432,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 433,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 434,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 435,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 436,
"usage_type": "name"
},
{
"api_name": "sklearn.cluster.KMeans",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 441,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 442,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 443,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 444,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 445,
"usage_type": "name"
}
] |
312817033
|
import matplotlib.pyplot as plt
import numpy as np
indata = np.loadtxt("indata.dat")
time_size = indata[0]
grid_size = indata[1]
end_time = indata[2]
periodic_start_fig = plt.figure()
plt.title("Periodic 2D-rossby wave at T = 0")
periodic_start = np.loadtxt("benchmarks/2dperstart.dat")
X = np.linspace(0,1,len(periodic_start))
Y = np.linspace(0,1,len(periodic_start))
plt.contourf(Y,X,periodic_start,20,cmap= "RdYlBu")
plt.xlabel("X-extent - Dimensionless")
plt.ylabel("Y-extent - Dimensionless")
cb = plt.colorbar()
cb.set_label("Wave amplitude - Dimensionless")
periodic_slutt_fig = plt.figure()
plt.title("Periodic 2D-rossby wave at T = 150")
periodic_end = np.loadtxt("benchmarks/2dperlast.dat")
plt.contourf(Y,X,periodic_end,20,cmap= "RdYlBu")
plt.xlabel("X-extent - Dimensionless")
plt.ylabel("Y-extent - Dimensionless")
cb = plt.colorbar()
cb.set_label("Wave amplitude - Dimensionless")
periodic_start_fig.savefig("figures/2dstart.png")
periodic_slutt_fig.savefig("figures/2dend.png")
| null |
project5/plot3.py
|
plot3.py
|
py
| 997 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.loadtxt",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.loadtxt",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.contourf",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "numpy.loadtxt",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.contourf",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
}
] |
623181890
|
from nltk.corpus import stopwords
class Exhaustive:
""" Exhaustive (statistical) method for text summarization """
def __init__(self, **kwargs):
self.text = None
self.wfreq = {}
self.tokens = []
self.wstop = set(stopwords.words("english"))
if "tokens" in kwargs.keys() and "text" in kwargs.keys():
raise ValueError(
"Error: Both tokens and text specified. Specify only one.")
if "tokens" in kwargs.keys():
self.tokens = []
for line in kwargs["tokens"]:
self.tokens += [line.split(" ")]
assert type(self.tokens) in [list, tuple]
else:
self.text = kwargs["text"]
#assert type(self.text) == str
self.__TokenizePara(delim="।")
"""
@kwargs:
@term = term for which we need to calculate wt. freq.
"""
def __GetWeightedFreq(self, **kwargs):
"""
Returns the weighted frequency of the word specified.
Weighted frequency is calculated as:
wf = freq(wx)/max(freq(wi))
"""
if len(self.wfreq) == 0:
self.__PopulateFreq()
word = kwargs["term"]
if word.lower() in self.wstop or word.isdigit() or len(word) == 0:
return 0
if word.lower() not in self.wfreq.keys() and word.lower() not in self.wstop:
raise ValueError("Invalid word {0} specified".format(word))
return self.wfreq[word.lower()] / max(self.wfreq.values())
"""
@kwargs:
@k = k sentences to pick.
"""
def KTopRanks(self, **kwargs):
""" Returns the top "k" sentences based on the exh. method chosen """
if "k" not in kwargs.keys():
raise ValueError("Error: Missing arg \"k\"")
k = kwargs["k"]
if k > len(self.tokens):
raise ValueError("Error: dimm of k is greater than \"text\"")
if len(self.wfreq) == 0:
self.__PopulateFreq()
idx=0
arr = {}
for lines in self.tokens:
swt = 0
line = str()
for word in lines:
line += "{0} ".format(word)
swt += self.__GetWeightedFreq(term=word)
arr[line] = (swt,idx)
arr = sorted(arr.items(), key=lambda x: x[1], reverse=True)
arr=arr[:k]
arr=sorted(arr,key=lambda x:x[1][1])
return arr[:k]
def __PopulateFreq(self):
""" Builds the hashmap - words & their frequencies. """
for item in self.tokens:
for word in item:
if word not in self.wstop and len(word) > 0 and word.isdigit() == False:
if word.lower() in self.wfreq:
self.wfreq[word.lower()] += 1
else:
self.wfreq[word.lower()] = 1
"""
@kwargs:
@delim = delimeter to split on
"""
def __TokenizePara(self, **kwargs):
""" Tokenize the paragraph based on the specified delim. """
if len(self.tokens) != 0:
raise ValueError("Error: dimm of tokens is not 0")
lines = list(filter(None, self.text.split(kwargs["delim"])))
for line in lines:
arr = []
for word in line.split(" "):
if len(word) > 0 and word != "\n":
arr.append(word.lower())
self.tokens.append(arr)
self.tokens = list(filter(None, self.tokens))
| null |
Algorithms/weightedfreq/Hindi/first.py
|
first.py
|
py
| 3,478 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 10,
"usage_type": "name"
}
] |
491332647
|
import numpy as np
import argparse
import json
import time
from torch import nn
from torch import optim
import torch.utils.data as tdata
import torch.nn.functional as F
import os
import cv2
import torch
import torchvision
WIDTH = 1920
HEIGHT = 1208
path = "/local/temporary/audi/camera/"
# path_pic = "/local/temporary/audi/camera/camera/cam_front_center/"
path_pic = "audi/camera/camera/cam_front_center/"
path_labels = "labels/"
dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
def load_data():
pics = []
labels = []
im_w = 480
im_h = 302
i = 0
st1 = time.time()
for name in sorted(os.listdir(path_pic)):
img = cv2.imread(os.path.join(path_pic, name))
img = cv2.resize(img, (im_w, im_h))
pics.append(img)
i += 1
if i == 100:
#TODO uncoment, just to speed things up
break
pics = np.asarray(pics)
elapsed1 = time.time() - st1
print("time to get pictures: ",elapsed1, "s")
i = 0
st2 = time.time()
for name in sorted(os.listdir(path_labels)):
f = open(path_labels + name, "rb")
labels.append(json.load(f)['Angle'])
f.close()
i += 1
if i == 100:
#TODO uncoment, just to speed things up
break
labels = np.asarray(labels)
elapsed2 = time.time() - st2
print("time to get labels: ",elapsed2, "s")
return pics, labels
class My_CNN(nn.Module):
def __int__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, kernel_size=3, stride=2, padding=0)
self.fc1 = nn.Linear(239*3*150)
def forward(self, xb):
xb = F.relu(self.conv1(xb))
xb - self.fc1
return xb
class Dataset(tdata.Dataset):
def __init__(self, pics, labels):
super().__init__()
self.pics = pics
self.labels = labels
def __len__(self):
return self.pics.shape[0]
def __getitem__(self, i):
return{
#mby here should be transpose
'pic':self.pics[i]/255,
'label':self.labels[i],
'key':i,
}
def loss_batch(model, loss_function, data, labels, opt = None):
loss = loss_function(model(data), y_batch)
if opt is not None:
loss.backward()
opt.step()
opt.zero_grad()
return loss.item(), len(data)
def get_loader(bs = 8):
data, labels = load_data()
border = int(data.shape[0]*4/5)
data_train, data_val = np.split(data, [border])
labels_train, labels_val = np.split(labels, [border])
dataset_tr = Dataset(data_train, labels_train)
dataset_val = Dataset(data_val, labels_val)
trn_loader = tdata.DataLoader(dataset_tr, batch_size = bs, shuffle = True)
val_loader = tdata.DataLoader(dataset_val, batch_size = bs*2)
return trn_loader, val_loader
def parse_args():
parser = argparse.ArgumentParser('Simple MNIST classifier')
parser.add_argument('--learning_rate', '-lr', default=0.00001, type=float)
parser.add_argument('--epochs', '-e', default=30, type=int)
parser.add_argument('--batch_size', '-bs', default=8, type=int)
def fit(train_dl, val_dl, model, opt, loss_fun):
for epoch in range(epochs):
for data, label in train_dl:
data = data.to(dev)
label = data.to(dev)
loss_batch(model, loss_function, data, label, opt)
# with torch.no_grad():
#TODO evaluate loss in training
def evaluate(val_dl, model, epoch, loss_function):
with torch.no_grad():
acc = 0
processed = 0
for data, labels in val_dl:
data = data.to(dev)
labels = labels.to(dev)
value, num = loss_batch(model, loss_function, data, labels)
acc = (num*value + processed * acc)/(num + processed)
processed += num
print(acc)
#just squared error averaged over the validation set
def main():
args = parse_args()
loss_fun = nn.MSELoss()
#trn_loader, val_loader = get_loader()
model = My_CNN()
model = model.to(dev)
# opt = torch.optim.Adam(model.parameters(), args.learning_rate)
if __name__ == "__main__":
main()
| null |
first_model.py
|
first_model.py
|
py
| 4,168 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.cuda.is_available",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "numpy.split",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.split",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torch.utils.data",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "torch.utils.data",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 136,
"usage_type": "name"
}
] |
293270471
|
import os
import numpy as np
import cv2
import random
def make_data(filepath ):
fo = open(os.path.join(filepath, "lables.txt"), "w", encoding="UTF-8")
for i in range(0, 10):
picture_names = os.listdir(os.path.join(filepath, str(i)))
for name in picture_names:
fo.write(name + " " + str(i) + "\n")
fo.close()
def make_batch(filepath ,batch_size):
fi = open(os.path.join(filepath, 'lables.txt'), "r")
lines = fi.readlines()
indexes = np.arange(len(lines))
random.shuffle(indexes)
batch_X = []
batch_y = []
for index in indexes:
sp = lines[index].strip().split()
assert len(sp) == 2
image = cv2.imread(os.path.join(filepath, sp[1], sp[0]))
image = cv2.resize(image, dsize=(227, 227))/255
batch_X.append(image)
batch_y.append(int(sp[1]))
if batch_size == len(batch_X):
yield np.array(batch_X), np.array(batch_y)
batch_X = []
batch_y = []
if len(batch_X) > 0:
yield np.array(batch_X), np.array(batch_y)
# if __name__ == "__main__":
# train_path = "./CIFAR-10-data/train"
# test_path = "./CIFAR-10-data/test"
# make_data(test_path) # 对训练数据或者测试数据生成一个label.txt的文件,该文档存储的是:图片名称+图片分类标签
| null |
data_process.py
|
data_process.py
|
py
| 1,383 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 34,
"usage_type": "call"
}
] |
220255642
|
from beaker.middleware import SessionMiddleware
from bottle import Bottle, request, HTTPResponse
from jumpscale.packages.threebot_deployer.bottle.utils import (
list_threebot_solutions,
stop_threebot_solution,
delete_threebot_solution,
)
from jumpscale.loader import j
from jumpscale.packages.auth.bottle.auth import SESSION_OPTS, login_required, get_user_info
from jumpscale.packages.marketplace.bottle.models import UserEntry
from jumpscale.core.base import StoredFactory
from jumpscale.core.exceptions import exceptions
app = Bottle()
@app.route("/api/threebots/list")
@login_required
def list_threebots() -> str:
user_info = j.data.serializers.json.loads(get_user_info())
threebots = list_threebot_solutions(user_info["username"])
return j.data.serializers.json.dumps({"data": threebots})
@app.route("/api/threebots/stop", method="POST")
@login_required
def stop_threebot() -> str:
data = j.data.serializers.json.loads(request.body.read())
user_info = j.data.serializers.json.loads(get_user_info())
if "password" not in data or "uuid" not in data:
return HTTPResponse(
j.data.serializers.json.dumps({"error": "invalid body. missing keys"}),
status=400,
headers={"Content-Type": "application/json"},
)
try:
stop_threebot_solution(owner=user_info["username"], solution_uuid=data["uuid"], password=data["password"])
except (exceptions.Permission, exceptions.Validation):
return HTTPResponse(
j.data.serializers.json.dumps({"error": "invalid secret"}),
status=401,
headers={"Content-Type": "application/json"},
)
return j.data.serializers.json.dumps({"data": True})
@app.route("/api/threebots/destroy", method="POST")
@login_required
def destroy_threebot() -> str:
data = j.data.serializers.json.loads(request.body.read())
user_info = j.data.serializers.json.loads(get_user_info())
if "password" not in data or "uuid" not in data:
return HTTPResponse(
j.data.serializers.json.dumps({"error": "invalid body. missing keys"}),
status=400,
headers={"Content-Type": "application/json"},
)
try:
delete_threebot_solution(owner=user_info["username"], solution_uuid=data["uuid"], password=data["password"])
except (exceptions.Permission, exceptions.Validation):
return HTTPResponse(
j.data.serializers.json.dumps({"error": "invalid secret"}),
status=401,
headers={"Content-Type": "application/json"},
)
return j.data.serializers.json.dumps({"data": True})
@app.route("/api/allowed", method="GET")
@login_required
def allowed():
user_factory = StoredFactory(UserEntry)
user_info = j.data.serializers.json.loads(get_user_info())
tname = user_info["username"]
explorer_url = j.core.identity.me.explorer.url
instances = user_factory.list_all()
for name in instances:
user_entry = user_factory.get(name)
if user_entry.tname == tname and user_entry.explorer_url == explorer_url and user_entry.has_agreed:
return j.data.serializers.json.dumps({"allowed": True})
return j.data.serializers.json.dumps({"allowed": False})
@app.route("/api/accept", method="GET")
@login_required
def accept():
user_factory = StoredFactory(UserEntry)
user_info = j.data.serializers.json.loads(get_user_info())
tname = user_info["username"]
explorer_url = j.core.identity.me.explorer.url
if "testnet" in explorer_url:
explorer_name = "testnet"
elif "devnet" in explorer_url:
explorer_name = "devnet"
elif "explorer.grid.tf" in explorer_url:
explorer_name = "mainnet"
else:
return HTTPResponse(
j.data.serializers.json.dumps({"error": f"explorer {explorer_url} is not supported"}),
status=500,
headers={"Content-Type": "application/json"},
)
user_entry = user_factory.get(f"{explorer_name}_{tname.replace('.3bot', '')}")
if user_entry.has_agreed:
return HTTPResponse(
j.data.serializers.json.dumps({"allowed": True}), status=200, headers={"Content-Type": "application/json"}
)
else:
user_entry.has_agreed = True
user_entry.explorer_url = explorer_url
user_entry.tname = tname
user_entry.save()
return HTTPResponse(
j.data.serializers.json.dumps({"allowed": True}), status=201, headers={"Content-Type": "application/json"}
)
app = SessionMiddleware(app, SESSION_OPTS)
| null |
jumpscale/packages/threebot_deployer/bottle/solutions.py
|
solutions.py
|
py
| 4,604 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bottle.Bottle",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.loads",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "jumpscale.packages.auth.bottle.auth.get_user_info",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "jumpscale.packages.threebot_deployer.bottle.utils.list_threebot_solutions",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.dumps",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "jumpscale.packages.auth.bottle.auth.login_required",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.loads",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "bottle.request.body.read",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "bottle.request.body",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "bottle.request",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.loads",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "jumpscale.packages.auth.bottle.auth.get_user_info",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "bottle.HTTPResponse",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.dumps",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "jumpscale.packages.threebot_deployer.bottle.utils.stop_threebot_solution",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "jumpscale.core.exceptions.exceptions.Permission",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.core.exceptions.exceptions",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "jumpscale.core.exceptions.exceptions.Validation",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "bottle.HTTPResponse",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.dumps",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.dumps",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "jumpscale.packages.auth.bottle.auth.login_required",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.loads",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "bottle.request.body.read",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "bottle.request.body",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "bottle.request",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.loads",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "jumpscale.packages.auth.bottle.auth.get_user_info",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "bottle.HTTPResponse",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.dumps",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "jumpscale.packages.threebot_deployer.bottle.utils.delete_threebot_solution",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "jumpscale.core.exceptions.exceptions.Permission",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.core.exceptions.exceptions",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "jumpscale.core.exceptions.exceptions.Validation",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "bottle.HTTPResponse",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.dumps",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.dumps",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "jumpscale.packages.auth.bottle.auth.login_required",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "jumpscale.core.base.StoredFactory",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "jumpscale.packages.marketplace.bottle.models.UserEntry",
"line_number": 73,
"usage_type": "argument"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.loads",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "jumpscale.packages.auth.bottle.auth.get_user_info",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.core",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.dumps",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.dumps",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "jumpscale.packages.auth.bottle.auth.login_required",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "jumpscale.core.base.StoredFactory",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "jumpscale.packages.marketplace.bottle.models.UserEntry",
"line_number": 88,
"usage_type": "argument"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.loads",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "jumpscale.packages.auth.bottle.auth.get_user_info",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.core",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "bottle.HTTPResponse",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.dumps",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "bottle.HTTPResponse",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.dumps",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "bottle.HTTPResponse",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data.serializers.json.dumps",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "jumpscale.loader.j.data",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "jumpscale.loader.j",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "jumpscale.packages.auth.bottle.auth.login_required",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "beaker.middleware.SessionMiddleware",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "jumpscale.packages.auth.bottle.auth.SESSION_OPTS",
"line_number": 122,
"usage_type": "argument"
}
] |
537324419
|
'''
@author: katiepfleger
'''
from __future__ import print_function
import sqlite3
import webbrowser
import requests
from logout import check_input
CONN = sqlite3.connect("bpm.db")
C = CONN.cursor()
CLIENT_ID = '482102fb45cb45fdb465ef73801f4665'
CLIENT_SECRET = 'dc7269e0e5a84e71b9b27f857055b41f'
REDIRECT_URI = 'https://localhost/'
SCOPES = 'user-library-modify user-library-read user-read-playback-state \
user-read-currently-playing user-modify-playback-state user-read-recently-played \
playlist-read-private playlist-modify-public playlist-modify-private \
playlist-read-collaborative'
def welcome():
'''
Performs the user authentication and login flow
'''
login_result = C.execute('SELECT COUNT(*) FROM Credentials').fetchone()[0]
if login_result == 1:
print('Welcome back to BPM!')
return True
else:
print('Welcome to BPM!')
print('Do you have a BPM account?')
print('0:\tYes')
print('1:\tNo')
has_account = input()
check_input(has_account)
if has_account == '0':
user_id = get_username()
if user_id:
authenticate()
get_code(user_id)
CONN.close()
return True
else:
return False
else:
print('Do you have a Spotify account?')
print('0:\tYes')
print('1:\tNo')
has_spotify = input()
check_input(has_spotify)
if has_spotify == '0':
user_id = create_username()
authenticate()
get_code(user_id)
CONN.close()
return True
else:
print('Please sign up for a Spotify account and return.\n')
return False
def get_username():
'''
Checks for existing BPM users.
'''
username = input('Enter your BPM username: ')
check_input(username)
try:
user_id = C.execute("SELECT U.id FROM User U WHERE U.username='%s'" \
% username).fetchone()
if user_id:
return user_id[0]
else:
print('There are no users with that username.')
return create_username()
except ValueError:
print('There are no users with that username. \
Would you like to try again or create a new account?')
print('0:\tTry Again')
print('1:\tCreate New Account')
if input() == '0':
return get_username()
else:
print('Do you have a Spotify account?')
print('0:\tYes')
print('1:\tNo')
has_spotify = input()
check_input(has_spotify)
if has_spotify == '0':
return create_username()
else:
print('Please sign up for a Spotify account and return.\n')
return False
def create_username():
'''
Creates new BPM user.
'''
username = input('Please enter a username for your new BPM account\n')
check_input(username)
try:
C.execute('INSERT INTO User(username) VALUES (?);', (username,))
CONN.commit()
user_id = C.execute('SELECT U.id FROM User U WHERE U.username=?;', \
(username,)).fetchone()[0]
return user_id
except ValueError:
print('That username is already taken. Please enter a different username.\n')
return create_username()
def authenticate():
'''
Authenticates current user with the BPM app in Spotify.
'''
auth_req = "https://accounts.spotify.com/authorize" + \
"?redirect_uri=" + REDIRECT_URI + \
"&scope=" + SCOPES + \
"&client_id=" + CLIENT_ID + \
"&response_type=code"
auth_req.replace(":", "%3A").replace("/", "%2F").replace(" ", "+")
webbrowser.open(auth_req)
def get_code(user_id):
'''
Prompts user to enter validation code that they are redirected to.
'''
print("Please copy and paste your validation code from the browser: ")
auth_code = input()
check_input(auth_code)
finish_auth(user_id, auth_code)
def finish_auth(user_id, auth_code):
'''
Adds user authentication tokens to Credentials.
'''
resp = requests.post("https://accounts.spotify.com/api/token",
data={"grant_type": "authorization_code",
"redirect_uri": REDIRECT_URI,
"code": auth_code},
auth=(CLIENT_ID, CLIENT_SECRET))
resp.raise_for_status()
resp_json = resp.json()
access_token = resp_json["access_token"]
refresh_token = resp_json["refresh_token"]
expires_in = resp_json["expires_in"]
C.execute('INSERT INTO Credentials(user_id, access_token, refresh_token, expires_in) \
VALUES (?, ?, ?, ?);', (user_id, access_token, refresh_token, expires_in))
CONN.commit()
def get_current_user_token():
'''
Retrieves current user's access token.
'''
local_conn = sqlite3.connect("bpm.db")
c = local_conn.cursor()
access_token = c.execute('SELECT C.access_token FROM Credentials C').fetchone()[0]
local_conn.close()
return access_token
| null |
src/auth.py
|
auth.py
|
py
| 5,237 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlite3.connect",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logout.check_input",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "logout.check_input",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "logout.check_input",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "logout.check_input",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "logout.check_input",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "webbrowser.open",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "logout.check_input",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 158,
"usage_type": "call"
}
] |
106623030
|
# -*- coding: utf-8 -*-
'''
@File : linkagePage.py
@Time : 2018/10/23 14:18
@Author : Chenzd
@Project : 联动页面类
@Software: PyCharm
'''
import random
import time
from page.LeeBus.curtain.curtain_editPage import Curtain_editPage
from page.basePage import BasePage
from selenium.webdriver.common.by import By
from page.LeeBus.common import Common
from public.configLog import Logger
logger = Logger(logger='page.LeeBus.linkage.linkagePage').getlog()
class LinkagePage(BasePage):
# 调色灯
tvSettingTime = (By.ID, 'com.leelen.luxdomo:id/tvSettingTime')
titleAddIv = (By.ID, 'com.leelen.luxdomo:id/titleAddIv')
linkage_edit_name = (By.ID, 'com.leelen.luxdomo:id/linkage_edit_name')
linkage_meet_conditions_txt = (By.ID, 'com.leelen.luxdomo:id/linkage_meet_conditions_txt')
# 任意条件
linkage_meet_oneCondition = (By.ID, 'com.leelen.luxdomo:id/linkage_meet_oneCondition')
# 所有条件
linkage_meet_allConditions = (By.ID, 'com.leelen.luxdomo:id/linkage_meet_allConditions')
# 添加条件
linkage_meet_conditions_add = (By.ID, 'com.leelen.luxdomo:id/linkage_meet_conditions_add')
# 添加设备
linkage_device_add = (By.ID, 'com.leelen.luxdomo:id/linkage_device_add')
# 添加场景
linkage_scene_add = (By.ID, 'com.leelen.luxdomo:id/linkage_scene_add')
# 添加条件的传感器所有名
linkage_add_condition_listView_name = (By.ID, 'com.leelen.luxdomo:id/linkage_add_condition_listView_name')
# 通用传感器选择的二级界面
other_device_listView_status = (By.ID, 'com.leelen.luxdomo:id/other_device_listView_status')
# 指纹锁的二级界面
finger_print_lock_all_name = (By.ID, 'com.leelen.luxdomo:id/finger_print_lock_all_name')
# 温度传感器二级界面
# 温度高于
linkage_temperature_highText = (By.ID, 'com.leelen.luxdomo:id/linkage_temperature_highText')
# 设置温度
linkage_temperature_high_value = (By.ID, 'com.leelen.luxdomo:id/linkage_temperature_high_value')
# 温度低于
linkage_temperature_lowText = (By.ID, 'com.leelen.luxdomo:id/linkage_temperature_lowText')
# 设置温度
linkage_temperature_value = (By.ID, 'com.leelen.luxdomo:id/linkage_temperature_value')
# 湿度传感器二级界面
# 湿度高于
linkage_humidity_high_choose = (By.ID, 'com.leelen.luxdomo:id/linkage_humidity_high_choose')
# 设置湿度高于
linkage_humidity_high_value = (By.ID, 'com.leelen.luxdomo:id/linkage_humidity_high_value')
# 湿度低于
linkage_humidity_low_choose = (By.ID, 'com.leelen.luxdomo:id/linkage_humidity_low_choose')
# 设置湿度低于
linkage_humidity_value = (By.ID, 'com.leelen.luxdomo:id/linkage_humidity_value')
# 全选设备
add_device_group_choice = (By.ID, 'com.leelen.luxdomo:id/add_device_group_choice')
# 随机选择场景一个
link_add_scene_list_item_name = (By.ID, 'com.leelen.luxdomo:id/link_add_scene_list_item_name')
# 联动删除按钮
linkage_add_delete = (By.ID, 'com.leelen.luxdomo:id/linkage_add_delete')
# 设备删除按钮--灯
device_deleteBtn = (By.ID, 'com.leelen.luxdomo:id/btnDelete')
# 设备删除按钮--中央空调
center_conditioner_delete = (By.ID, 'com.leelen.luxdomo:id/center_conditioner_delete')
# 设备删除按钮--除灯以外
device_setting_delete = (By.ID, 'com.leelen.luxdomo:id/device_setting_delete')
# 设备删除按钮--地暖
floorHeartDeleteBt = (By.ID, 'com.leelen.luxdomo:id/floorHeartDeleteBt')
# 设备删除按钮--调试灯
tvDelete = (By.ID, 'com.leelen.luxdomo:id/tvDelete')
# 垃圾桶图标
link_add_scene_list_item_delete = (By.ID, 'com.leelen.luxdomo:id/link_add_scene_list_item_delete')
# 设备名称
scene_add_device_type = (By.ID, 'com.leelen.luxdomo:id/scene_add_device_type')
tvMsg = (By.ID, 'com.leelen.luxdomo:id/tvMsg')
tvSecond = (By.ID, 'com.leelen.luxdomo:id/tvSecond')
allopenBtn = (By.ID, 'com.leelen.luxdomo:id/deviceIv')
linkage_name_list = [] # 存放联动名称列表
# 进入联动页面并随机编辑名称
def enter_linkage(self):
time.sleep(2)
self.swipe_left()
def back_top(self):
while not self.is_element_exist(self.linkage_edit_name):
self.swipe_down()
def add_linkage(self):
self.click(self.titleAddIv)
self.input(self.linkage_edit_name, Common(self.driver).random_name())
self.linkage_name_list.append(self.getText(self.linkage_edit_name))
# 检测是否成功创建联动
def check_creat_linkage(self):
time.sleep(3)
self.swipe_up()
name = self.findAU('text(\"' + self.linkage_name_list[-1] + '\")')
suc = 0
if name:
print('结果:联动创建成功')
else:
suc = 1
print('结果:联动创建失败')
# self.linkage_name_list = [] # 重新置为空,不影响后面使用
return suc
def back_scenePage(self):
time.sleep(2)
self.swipe_rigth()
# 点击加号--添加联动条件
def linkage_conditions_add(self):
self.click(self.linkage_meet_conditions_add)
# 选择联动条件
def linkage_conditions(self):
eles = self.get_elements(self.linkage_add_condition_listView_name)
ran = random.randint(0, len(eles)-1)
conditions_text = self.getText_element(eles[ran])
print('选择的条件传感器为:【'+conditions_text+'】')
self.click_element(eles[ran])
# 温度传感器的二级界面操作方法
def temperature_sensor(self, first_id, temperature_value):
text = self.getText(first_id)
temper_text1 = self.getText(temperature_value)
print('设置前温度显示:'+temper_text1 + '°C')
self.click(temperature_value)
self.swipeControl(533, 1019, 533, 692)
self.click(Curtain_editPage.sureBtn)
temper_text = self.getText(temperature_value)
print('设置后温度显示:' + temper_text + '°C')
self.click(first_id)
print('选择' + text + temper_text + '°C')
# 湿度传感器的二级界面操作方法
def humidity_sensor(self, first_id, temperature_value):
self.click(first_id)
temper_text1 = self.getText(temperature_value)
print('设置前湿度显示:' + temper_text1 + '°C')
self.click(temperature_value)
self.swipeControl(533, 1019, 533, 692)
self.click(Curtain_editPage.sureBtn)
temper_text = self.getText(temperature_value)
print('设置后湿度显示:' + temper_text + '°C')
if first_id == self.linkage_humidity_high_choose:
print('选择湿度高于' + temper_text + '°C')
else:
print('选择湿度低于' + temper_text + '°C')
# 进入二级界面后判断
def linkage_conditions_next(self):
if self.is_element_exist(self.finger_print_lock_all_name): # 如果是指纹锁二级界面则点击取消,再选择一遍
self.click(Common.cancelTv)
self.linkage_conditions()
self.linkage_conditions_next()
elif self.is_element_exist(self.linkage_temperature_highText): # 如果温度传感器的二级界面,进行温度传感器操作
list_ele = [self.linkage_temperature_highText, self.linkage_temperature_lowText]
ran = random.randint(0, len(list_ele)-1)
if ran == 0:
self.temperature_sensor(self.linkage_temperature_highText, self.linkage_temperature_high_value)
else:
self.temperature_sensor(self.linkage_temperature_lowText, self.linkage_temperature_value)
elif self.is_element_exist(self.linkage_humidity_high_choose): # 如果湿度传感器的二级界面,进行湿度传感器操作
list_ele = [self.linkage_humidity_high_choose, self.linkage_humidity_low_choose]
ran = random.randint(0, len(list_ele)-1)
if ran == 0:
self.humidity_sensor(self.linkage_humidity_high_choose, self.linkage_temperature_high_value)
else:
self.humidity_sensor(self.linkage_humidity_low_choose, self.linkage_temperature_value)
else: # 通用二级传感器界面
eles = self.get_elements(self.other_device_listView_status)
ran = random.randint(0, len(eles)-1)
text = self.getText_element(eles[ran])
print('选择:'+text)
self.click_element(eles[ran])
Common(self.driver).save_def()
# 添加设备--点击加号
def add_device_btn(self):
self.click(self.linkage_device_add)
def linkage_devices_add(self):
self.click(self.add_device_group_choice)
print('联动控制设备为:【全选】')
Common(self.driver).save_def()
# 添加场景--点击加号
def linkage_scenes_add(self):
while not self.is_element_exist(self.linkage_scene_add):
self.swipe_up()
self.click(self.linkage_scene_add)
eles = self.get_elements(self.link_add_scene_list_item_name)
ran = random.randint(0, len(eles)-1)
text = self.getText_element(eles[ran])
print('选择联动控制的场景为:【'+text+'】')
self.click_element(eles[ran])
Common(self.driver).save_def()
# 编辑条件
def edit_conditions(self):
text1 = self.getText(self.linkage_meet_conditions_txt)
print('编辑前条件:' + text1)
self.click(self.linkage_meet_conditions_txt)
self.click(self.linkage_meet_oneCondition)
text2 = self.getText(self.linkage_meet_conditions_txt)
print('编辑后条件:'+text2)
suc = 0
if text2 != text1:
print('条件编辑成功')
else:
print('条件编辑未改变')
return suc
# 对比删除的弹框内容
def confirm_alert(self, msg_alert):
suc = 0
if self.is_element_exist(self.tvMsg):
msg = self.getText(self.tvMsg)
message = msg.strip().replace('\n', '')
print('弹出框内容:【%s】' % message)
self.click(self.tvSecond)
if msg_alert == message:
print('结果:弹出框的内容正确')
else:
suc = 1
print('结果:弹出框的内容错误')
return suc
# 随机设备名,删除设备--进入二级界面
def delete_device_enter(self, msg_alert):
eles = self.get_elements(self.scene_add_device_type)
ran = random.randint(0, len(eles)-1)
text_before = self.getText_element(eles[ran])
self.click_element(eles[ran])
time.sleep(1)
if self.is_element_exist(self.device_deleteBtn):
self.click(self.device_deleteBtn)
elif self.is_element_exist(self.center_conditioner_delete):
self.click(self.center_conditioner_delete)
elif self.is_element_exist(self.device_setting_delete):
self.click(self.device_setting_delete)
elif self.is_element_exist(self.tvDelete):
self.click(self.tvDelete)
else:
self.click(self.floorHeartDeleteBt)
suc = self.confirm_alert(msg_alert)
text_after = self.getText_element(eles[ran])
if text_before != text_after:
print('结果:删除成功')
else:
suc = 1
print('结果:删除失败')
return suc
# 随机设备名,删除设备----设备列表
def delete_device_list(self, msg_alert):
eles = self.get_elements(self.scene_add_device_type)
ran = random.randint(0, len(eles) - 1)
text_before = self.getText_element(eles[ran])
time.sleep(1)
self.longPress(eles[ran])
suc = self.confirm_alert(msg_alert)
text_after = self.getText_element(eles[ran])
if text_before != text_after:
print('结果:删除成功')
else:
suc = 1
print('结果:删除失败')
return suc
# 删除联动中的场景
def delete_scene_list(self, msg_alert):
if self.is_element_exist(self.link_add_scene_list_item_delete):
self.click(self.link_add_scene_list_item_delete)
suc = self.confirm_alert(msg_alert)
if self.is_element_exist(self.link_add_scene_list_item_delete):
suc = 1
print('结果:删除联动中的场景失败')
else:
print('结果:删除联动中的场景成功')
return suc
# 删除整个联动
def delete_linkage(self):
while not self.is_element_exist(self.linkage_add_delete):
self.swipe_up()
self.click(self.linkage_add_delete)
if __name__ == '__main__':
msg = ' 删除后,联动无法控制该设备\n是否删除? '
print(msg.strip().replace('\n',''))
| null |
page/LeeBus/linkage/linkagePage.py
|
linkagePage.py
|
py
| 13,018 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "public.configLog.Logger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "page.basePage.BasePage",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "page.LeeBus.common.Common",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "page.LeeBus.curtain.curtain_editPage.Curtain_editPage.sureBtn",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "page.LeeBus.curtain.curtain_editPage.Curtain_editPage",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "page.LeeBus.curtain.curtain_editPage.Curtain_editPage.sureBtn",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "page.LeeBus.curtain.curtain_editPage.Curtain_editPage",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "page.LeeBus.common.Common.cancelTv",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "page.LeeBus.common.Common",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "page.LeeBus.common.Common",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "page.LeeBus.common.Common",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "page.LeeBus.common.Common",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 271,
"usage_type": "call"
}
] |
307872856
|
'''
Moudle that contains the two main data types
`target_extraction.data_types.TargetText` and
`target_extraction.data_types.TargetTextCollection` where the later is a
container for the former.
classes:
1. `target_extraction.data_types.TargetText`
2. `target_extraction.data_types.TargetTextCollection`
'''
from collections.abc import MutableMapping
from collections import OrderedDict, Counter
import copy
import json
from pathlib import Path
from typing import Optional, List, Tuple, Iterable, NamedTuple, Any, Callable
from typing import Union, Dict
from target_extraction.tokenizers import is_character_preserving, token_index_alignment
from target_extraction.data_types_util import Span
class TargetText(MutableMapping):
'''
This is a data structure that inherits from MutableMapping which is
essentially a python dictionary.
The following are the default keys that are in all `TargetText`
objects, additional items can be added through __setitem__ but the default
items cannot be deleted.
1. text - The text associated to all of the other items
2. text_id -- The unique ID associated to this object
3. targets -- List of all target words that occur in the text. A special
placeholder of None (python None value) can exist where the
target does not exist but a related Category does this would
mean though that the related span is Span(0, 0), this type of
special placeholder is in place for the SemEval 2016 Restaurant
dataset where they link the categories to the targets but
not all categories have related targets thus None.
4. spans -- List of Span NamedTuples where each one specifies the start and
end of the respective targets within the text.
5. target_sentiments -- List sepcifying the sentiment of the respective
targets within the text.
6. categories -- List of categories that exist in the data which may or
may not link to the targets (this is dataset speicific). NOTE:
depending on the dataset and how it is parsed the category can exist
but the target does not as the category is a latent variable, in
these cases the category and category sentiments will be the same size
which would be a different size to the target and target sentiments
size. E.g. can happen where the dataset has targets and categories
but they do not map to each other in a one to one manner e.g
SemEval 2014 restuarant dataset, there are some samples that contain
categories but no targets. Another word for category can be aspect.
7. category_sentiments -- List of the sentiments associated to the
categories. If the categories and targets map to each other then
this will be empty and you will only use the target_sentiments.
Methods:
1. to_json -- Returns the object as a dictionary and then encoded using
json.dumps
2. tokenize -- This will add a new key `tokenized_text` to this TargetText
instance that will store the tokens of the text that is associated to
this TargetText instance.
3. pos_text -- This will add a new key `pos_tags` to this TargetText
instance. This key will store the pos tags of the text that is
associated to this Target Text instance.
4. force_targets -- Does not return anything but modifies the `spans` and
`text` values as whitespace is prefixed and suffixed the target unless
the prefix or suffix is whitespace. NOTE that this is the only method
that currently can change the `spans` and `text` key values after they
have been set.
5. sequence_labels -- Adds the `sequence_labels` key to this TargetText
instance which can be used to train a machine learning algorthim to
detect targets.
6. get_sequence_spans -- The span indexs from the sequence labels given
assuming that the sequence labels are in BIO format.
7. one_sample_per_span -- This returns a similar TargetText instance
where the new instance will only contain one target per span.
Static Functions:
1. from_json -- Returns a TargetText object given a json string. For
example the json string can be the return of TargetText.to_json.
'''
def _check_is_list(self, item: List[Any], item_name: str) -> None:
'''
This will check that the argument given is a List and if not will raise
a TypeError.
:param item: The argument that is going to be checked to ensure it is a
list.
:param item_name: Name of the item. This is used within the raised
error message, if an error is raised.
:raises TypeError: If any of the items are not of type List.
'''
type_err = f'{item_name} should be a list not {type(item)} {item}'
if not isinstance(item, list):
raise TypeError(type_err)
def sanitize(self) -> None:
'''
This performs a check on all of the lists that can be given at
object construction time to ensure that the following conditions are
met:
1. The target, spans and target_sentiments lists are all of the same
size if set.
2. The categories and the category_sentiments lists are all of the
same size if set.
Further more it checks the following:
1. If targets or spans are set then both have to exist.
2. If targets and spans are set that the spans text match the
associated target words e.g. if the target is `barry davies` in
`today barry davies went` then the spans should be [[6,18]]
:raises ValueError: If any of the above conditions are not True.
'''
def length_mis_match(lists_to_check: List[Any],
text_id_msg: str) -> None:
length_mismatch_msg = 'The following lists do not match '\
f'{lists_to_check}'
list_lengths = [len(_list) for _list in lists_to_check
if _list is not None]
current_list_size = -1
for list_length in list_lengths:
if current_list_size == -1:
current_list_size = list_length
else:
if current_list_size != list_length:
raise ValueError(text_id_msg + length_mismatch_msg)
targets = self._storage['targets']
target_sentiments = self._storage['target_sentiments']
spans = self._storage['spans']
categories = self._storage['categories']
category_sentiments = self._storage['category_sentiments']
text_id = self._storage['text_id']
text_id_msg = f'Text id that this error refers to {text_id}\n'
# Checking the length mismatches for the two different lists
length_mis_match([targets, target_sentiments, spans], text_id_msg)
length_mis_match([categories, category_sentiments], text_id_msg)
# Checking that if targets are set than so are spans
if targets is not None and spans is None:
spans_none_msg = f'If the targets are a list: {targets} then spans'\
f' should also be a list and not None: {spans}'
raise ValueError(text_id_msg + spans_none_msg)
# Checking that the words Spans reference in the text match the
# respective target words. Edge case is the case of None targets which
# should have a Span value of (0, 0)
if targets is not None:
text = self._storage['text']
for target, span in zip(targets, spans):
if target is None:
target_span_msg = 'As the target value is None the span '\
'it refers to should be of value '\
f'Span(0, 0) and not {span}'
if span != Span(0, 0):
raise ValueError(text_id_msg + target_span_msg)
else:
start, end = span.start, span.end
text_target = text[start:end]
target_span_msg = 'The target the spans reference in the '\
f'text: {text_target} does not match '\
f'the target in the targets list: {target}'
if text_target != target:
raise ValueError(text_id_msg + target_span_msg)
def __init__(self, text: str, text_id: str,
targets: Optional[List[str]] = None,
spans: Optional[List[Span]] = None,
target_sentiments: Optional[List[Union[int, str]]] = None,
categories: Optional[List[str]] = None,
category_sentiments: Optional[List[Union[int, str]]] = None):
# Ensure that the arguments that should be lists are lists.
self._list_argument_names = ['targets', 'spans', 'target_sentiments',
'categories', 'category_sentiments']
self._list_arguments = [targets, spans, target_sentiments, categories,
category_sentiments]
names_arguments = zip(self._list_argument_names, self._list_arguments)
for argument_name, list_argument in names_arguments:
if list_argument is None:
continue
self._check_is_list(list_argument, argument_name)
temp_dict = dict(text=text, text_id=text_id, targets=targets,
spans=spans, target_sentiments=target_sentiments,
categories=categories,
category_sentiments=category_sentiments)
self._protected_keys = set(['text', 'text_id', 'targets', 'spans'])
self._storage = temp_dict
self.sanitize()
def __getitem__(self, key: str) -> Any:
'''
:returns: One of the values from the self._storage dictionary. e.g.
if the key is `text` it will return the string representing
the text associated to this object.
'''
return self._storage[key]
def __iter__(self) -> Iterable[str]:
'''
Returns an interator over the keys in self._storage which are the
following Strings by default additional keys can be added:
1. text
2. text_id
3. targets
4. spans
5. target_sentiments
6. categories
7. category_sentiments
:returns: The keys in self._storage
'''
return iter(self._storage)
def __len__(self) -> int:
'''
:returns: The number of items in self._storage.
'''
return len(self._storage)
def __repr__(self) -> str:
'''
:returns: String returned is what user see when the instance is
printed or printed within a interpreter.
'''
return f'TargetText({self._storage})'
def __eq__(self, other: 'TargetText') -> bool:
'''
Two TargetText instances are equal if they both have the same `text_id`
value.
:param other: Another TargetText object that is being compared to this
TargetText object.
:returns: True if they have the same `text_id` value else False.
'''
if not isinstance(other, TargetText):
return False
elif self['text_id'] != other['text_id']:
return False
return True
def __delitem__(self, key: str) -> None:
'''
Given a key that matches a key within self._storage or self.keys()
it will delete that key and value from this object.
NOTE: Currently 'text', 'text_id', 'spans', and 'targets' are keys
that cannot be deleted.
:param key: Key and its respective value to delete from this object.
'''
if key in self._protected_keys:
raise KeyError('Cannot delete a key that is protected, list of '
f' protected keys: {self._protected_keys}')
del self._storage[key]
def __setitem__(self, key: str, value: Any) -> None:
'''
Given a key and a respected value it will either change that current
keys value to the one gien here or create a new key with that value.
NOTE: Currently 'text', 'text_id', 'spans', and 'targets' are keys
that cannot be changed.
:param key: Key to be added or changed
:param value: Value associated to the given key.
'''
if key in self._protected_keys:
raise KeyError('Cannot change a key that is protected, list of '
f' protected keys: {self._protected_keys}')
# If the key value should be a list ensure that the new value is a
# list as well.
if key in self._list_argument_names:
self._check_is_list(value, key)
self._storage[key] = value
self.sanitize()
def to_json(self) -> str:
'''
Required as TargetText is not json serlizable due to the 'spans'.
:returns: The object as a dictionary and then encoded using json.dumps
'''
return json.dumps(self._storage)
def _shift_spans(self, num_shifts: int, target_span: Span) -> None:
'''
This only affects the current state of the TargetText attributes.
The attributes this affects is the `spans` attribute.
NOTE: This is only used within self.force_targets method.
:param num_shifts: The number of whitespaces that the target at
span_index is going to be added. 1 if it is
just prefix or suffix space added, 2 if both or
0 if none.
:param spans: The current target span indexs that are having extra
whitespace added either prefix or suffix.
'''
relevant_span_indexs: List[int] = []
target_span_end = target_span.end
for span_index, other_target_span in enumerate(self['spans']):
if other_target_span == target_span:
continue
elif other_target_span.start > target_span_end:
relevant_span_indexs.append(span_index)
for relevant_span_index in relevant_span_indexs:
start, end = self['spans'][relevant_span_index]
start += num_shifts
end += num_shifts
self._storage['spans'][relevant_span_index] = Span(start, end)
def force_targets(self) -> None:
'''
:NOTE: As this affects the following attributes `spans` and `text` it
therefore has to modify these through self._storage as both of these
attributes are within self._protected_keys.
Does not return anything but modifies the `spans` and `text` values
as whitespace is prefixed and suffixed the target unless the prefix
or suffix is whitespace.
Motivation:
Ensure that the target tokens are not within another seperate String
e.g. target = `priced` but the sentence is `the laptop;priced is high`
and the tokenizer is on whitespace it will not have `priced` seperated
therefore the BIO tagging is not determinstric thus force will add
whitespace around the target word e.g. `the laptop; priced`. This was
mainly added for the TargetText.sequence_tags method.
'''
for span_index in range(len(self['spans'])):
text = self['text']
last_token_index = len(text) - 1
span = self['spans'][span_index]
prefix = False
suffix = False
start, end = span
if start != 0:
if text[start - 1] != ' ':
prefix = True
if end < last_token_index:
if text[end] != ' ':
suffix = True
text_before = text[:start]
text_after = text[end:]
target = text[start:end]
if prefix and suffix:
self._storage['text'] = f'{text_before} {target} {text_after}'
self._shift_spans(2, span)
self._storage['spans'][span_index] = Span(start + 1, end + 1)
elif prefix:
self._storage['text'] = f'{text_before} {target}{text_after}'
self._shift_spans(1, span)
self._storage['spans'][span_index] = Span(start + 1, end + 1)
elif suffix:
self._storage['text'] = f'{text_before}{target} {text_after}'
self._shift_spans(1, span)
def tokenize(self, tokenizer: Callable[[str], List[str]],
perform_type_checks: bool = False) -> None:
'''
This will add a new key `tokenized_text` to this TargetText instance
that will store the tokens of the text that is associated to this
TargetText instance.
For a set of tokenizers that are definetly comptable see
target_extraction.tokenizers module.
Ensures that the tokenization is character preserving.
:param tokenizer: The tokenizer to use tokenize the text for each
TargetText instance in the current collection
:param perform_type_checks: Whether or not to perform type checks
to ensure the tokenizer returns a List of
Strings
:raises TypeError: If the tokenizer given does not return a List of
Strings.
:raises ValueError: This is raised if the TargetText instance contains
empty text.
:raises ValueError: If the tokenization is not character preserving.
'''
text = self['text']
tokenized_text = tokenizer(text)
if perform_type_checks:
if not isinstance(tokenized_text, list):
raise TypeError('The return type of the tokenizer function ',
f'{tokenizer} should be a list and not '
f'{type(tokenized_text)}')
for token in tokenized_text:
if not isinstance(token, str):
raise TypeError('The return type of the tokenizer function ',
f'{tokenizer} should be a list of Strings'
f' and not a list of {type(token)}')
if len(tokenized_text) == 0:
raise ValueError('There are no tokens for this TargetText '
f'instance {self}')
if not is_character_preserving(text, tokenized_text):
raise ValueError('The tokenization method used is not character'
f' preserving. Original text `{text}`\n'
f'Tokenized text `{tokenized_text}`')
self['tokenized_text'] = tokenized_text
def pos_text(self, tagger: Callable[[str], Tuple[List[str], List[str]]],
perform_type_checks: bool = False) -> None:
'''
This will add a new key `pos_tags` to this TargetText instance.
This key will store the pos tags of the text that is associated to
this Target Text instance. NOTE: It will also replace the current
tokens in the `tokenized_text` key with the tokens produced
from the pos tagger.
For a set of pos taggers that are definetly comptable see
target_extraction.pos_taggers module. The pos tagger will have to
produce both a list of tokens and pos tags.
:param tagger: POS tagger.
:param perform_type_checks: Whether or not to perform type checks
to ensure the POS tagger returns a
tuple containing two lists both containing
Strings.
:raises TypeError: If the POS tagger given does not return a Tuple
:raises TypeError: If the POS tagger given does not return a List of
Strings for both the tokens and the pos tags.
:raises TypeError: If the POS tagger tokens or pos tags are not lists
:raises ValueError: If the POS tagger return is not a tuple of length
2
:raises ValueError: This is raised if the Target Text text is empty
:raises ValueError: If the number of pos tags for this instance
does not have the same number of tokens that has
been generated by the tokenizer function.
'''
text = self['text']
tokens_pos_tags = tagger(text)
if perform_type_checks:
if not isinstance(tokens_pos_tags, tuple):
raise TypeError('The return type for the pos tagger should be'
f' a tuple not {type(tokens_pos_tags)}')
if len(tokens_pos_tags) != 2:
raise ValueError('The return of the POS tagger should be a '
f'tuple of length 2 not {len(tokens_pos_tags)}')
if not isinstance(tokens_pos_tags[0], list):
raise TypeError('The return type of the tagger function ',
f'{tagger} should be a list and not '
f'{type(tokens_pos_tags[0])} for the tokens')
if not isinstance(tokens_pos_tags[1], list):
raise TypeError('The return type of the tagger function ',
f'{tagger} should be a list and not '
f'{type(tokens_pos_tags[1])} for the POS tags')
for name, tags in [('tokens', tokens_pos_tags[0]),
('pos_tags', tokens_pos_tags[1])]:
for tag in tags:
if not isinstance(tag, str):
raise TypeError('The return type of the tagger function ',
f'{tagger} should be a list of Strings'
f' and not a list of {type(tag)} for '
f'the {name}')
tokens, pos_tags = tokens_pos_tags
num_pos_tags = len(pos_tags)
if len(pos_tags) == 0:
raise ValueError('There are no tags for this TargetText '
f'instance {self}')
num_tokens = len(tokens)
if num_tokens != num_pos_tags:
raise ValueError(f'Number of POS tags {pos_tags} should be the '
f'same as the number of tokens {tokens}')
self['pos_tags'] = pos_tags
self['tokenized_text'] = tokens
def sequence_labels(self) -> None:
'''
Adds the `sequence_labels` key to this TargetText instance which can
be used to train a machine learning algorthim to detect targets.
The `force_targets` method might come in useful here for training
and validation data to ensure that more of the targets are not
affected by tokenization error as only tokens that are fully within
the target span are labelled with `B` or `I` tags.
Currently the only sequence labels supported is IOB-2 labels for the
targets only. Future plans look into different sequence label order
e.g. IOB see link below for more details of the difference between the
two sequence, of which there are more sequence again.
https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)
:raises KeyError: If the current TargetText has not been tokenized.
:raises ValueError: If two targets overlap the same token(s) e.g
`Laptop cover was great` if `Laptop` and
`Laptop cover` are two seperate targets this should
riase a ValueError as a token should only be
associated to one target.
'''
text = self['text']
if 'tokenized_text' not in self:
raise KeyError(f'Expect the current TargetText {self} to have '
'been tokenized using the self.tokenize method.')
self.sanitize()
tokens = self['tokenized_text']
sequence_labels = ['O' for _ in range(len(tokens))]
# This is the case where there are no targets thus all sequence labels
# are `O`
if self['spans'] is None or self['targets'] is None:
self['sequence_labels'] = sequence_labels
return
target_spans: List[Span] = self['spans']
tokens_index = token_index_alignment(text, tokens)
for target_span in target_spans:
target_span_range = list(range(*target_span))
same_target = False
for sequence_index, token_index in enumerate(tokens_index):
token_start, token_end = token_index
token_end = token_end - 1
if (token_start in target_span_range and
token_end in target_span_range):
if sequence_labels[sequence_index] != 'O':
err_msg = ('Cannot have two sequence labels for one '
f'token, text {text}\ntokens {tokens}\n'
f'token indexs {tokens_index}\nTarget '
f'spans {target_spans}')
raise ValueError(err_msg)
if same_target:
sequence_labels[sequence_index] = 'I'
else:
sequence_labels[sequence_index] = 'B'
same_target = True
self['sequence_labels'] = sequence_labels
def _key_error(self, key: str) -> None:
'''
:param key: The key to check for within this TargetText instance.
:raises KeyError: If the key given does not exist within this
TargetText instance.
'''
if f'{key}' not in self:
raise KeyError(f'Requires that this TargetText instance {self}'
f'contians the key `{key}`')
def get_sequence_spans(self, sequence_key: str) -> List[Span]:
'''
The following sequence label tags are supported: IOB-2. These are the
tags that are currently generated by `sequence_labels`
:param sequence_key: Key to sequence labels such as a BIO sequence
labels. Example key name would be `sequence_labels`
after `sequence_labels` function has been called
or more appropiately `predicted_sequence_labels`
when you have predicted sequence labels.
:returns: The span indexs from the sequence labels given assuming that
the sequence labels are in BIO format.
:raises ValueError: If the sequence labels that are contained in the
sequence key value contain values other than
`B`, `I`, or `O`.
'''
def found_target(start_index: int, end_index: int) -> Span:
if end_index == 0:
raise ValueError(f'The end index {end_index} of '
f'a span cannot be 0. sequence label key used '
f'{sequence_key}\nTargetText {self}')
return Span(start_span_index, end_span_index)
# number of tokens, sequence labels, and token text indexs should
# all be the same, it is if the `sequence_labels` function is used
tokens = self['tokenized_text']
token_text_indexs = token_index_alignment(self['text'], tokens)
sequence_labels = self[sequence_key]
same_target = False
start_span_index = 0
end_span_index = 0
sequence_spans: List[Span] = []
for text_index, sequence_label in zip(token_text_indexs,
sequence_labels):
if sequence_label == 'B':
if same_target == True:
sequence_span = found_target(start_span_index, end_span_index)
sequence_spans.append(sequence_span)
same_target = False
start_span_index = 0
end_span_index = 0
same_target = True
start_span_index = text_index[0]
end_span_index = text_index[1]
elif sequence_label == 'I':
end_span_index = text_index[1]
elif sequence_label == 'O':
if same_target:
sequence_span = found_target(start_span_index, end_span_index)
sequence_spans.append(sequence_span)
same_target = False
start_span_index = 0
end_span_index = 0
else:
raise ValueError('Sequence labels should be `B` `I` or `O` '
f'and not {sequence_label}. Sequence label '
f'key used {sequence_key}\nTargetText {self}')
if end_span_index != 0:
sequence_spans.append(Span(start_span_index, end_span_index))
return sequence_spans
def one_sample_per_span(self, remove_empty: bool = False) -> 'TargetText':
'''
This returns a similar TargetText instance where the new instance
will only contain one target per span.
This is for the cases where you can have a target e.g. `food` that has
a different related category attached to it e.g.
TargetText(text=`$8 and there is much nicer, food, all of it great and
continually refilled.`, text_id=`1`,
targets=[`food`, `food`, `food`],
categories=[`style`, `quality`, `price`],
target_sentiments=[`pos`,`pos`,`pos`],
spans=[Span(27, 31),Span(27, 31),Span(27, 31)])
As we can see the targets and the categories are linked, this is only
really the case in SemEval 2016 datasets from what I know currently.
In the example case above it will transform it to the following:
TargetText(text=`$8 and there is much nicer, food, all of it great and
continually refilled.`, text_id=`1`,
targets=[`food`],spans=[Span(27,31)])
This type of pre-processing is perfect for the Target Extraction
task.
:param remove_empty: If the TargetText instance contains any None
targets then these will be removed along with
their respective Spans.
:returns: This returns a similar TargetText instance where the new
instance will only contain one target per span.
'''
text = self['text']
text_id = self['text_id']
targets: List[str] = []
spans: List[Span] = []
if self['spans'] is None:
return TargetText(text=text, text_id=text_id)
current_spans = self['spans']
unique_spans = set(current_spans)
spans = sorted(unique_spans, key=lambda x: x[0])
temp_spans: List[Span] = []
for span in spans:
targets_text = text[span.start: span.end]
if span.start == 0 and span.end == 0 and remove_empty:
continue
else:
temp_spans.append(span)
targets.append(targets_text)
spans = temp_spans
return TargetText(text=text, text_id=text_id,
targets=targets, spans=spans)
@staticmethod
def from_json(json_text: str) -> 'TargetText':
'''
This is required as the 'spans' are Span objects which are not json
serlizable and are required for TargetText therefore this handles
that special case.
This function is also required as we have had to avoid using the
__set__ function and add objects via the _storage dictionary
underneath so that we could add values to this object that are not
within the constructor like `tokenized_text`. To ensure that it is
compatable with the TargetText concept we call `TargetText.sanitize`
method at the end.
:param json_text: JSON representation of TargetText
(can be from TargetText.to_json)
:returns: A TargetText object
:raises KeyError: If within the JSON representation there is no
`text` or `text_id` key.
'''
json_target_text = json.loads(json_text)
if not 'text' in json_target_text or not 'text_id' in json_target_text:
raise KeyError('The JSON text given does not contain a `text`'
f' or `text_id` field: {json_target_text}')
target_text = TargetText(text=json_target_text['text'],
text_id=json_target_text['text_id'])
for key, value in json_target_text.items():
if key == 'text' or key == 'text_id':
continue
if key == 'spans':
if value == None:
target_text._storage[key] = None
else:
all_spans = []
for span in value:
all_spans.append(Span(*span))
target_text._storage[key] = all_spans
else:
target_text._storage[key] = value
target_text.sanitize()
return target_text
class TargetTextCollection(MutableMapping):
'''
This is a data structure that inherits from MutableMapping which is
essentially a python dictionary, however the underlying storage is a
OrderedDict therefore if you iterate over it, the iteration will always be
in the same order.
This structure only contains TargetText instances.
Methods:
1. to_json -- Writes each TargetText instances as a dictionary using it's
own to_json function on a new line within the returned String. The
returned String is not json comptable but if split by new line it is and
is also comptable with the from_json method of TargetText.
2. add -- Wrapper around __setitem__. Given as an argument a TargetText
instance it will be added to the collection.
3. to_json_file -- Saves the current TargetTextCollection to a json file
which won't be strictly json but each line in the file will be and each
line in the file can be loaded in from String via TargetText.from_json.
Also the file can be reloaded into a TargetTextCollection using
TargetTextCollection.load_json.
4. tokenize -- This applies the TargetText.tokenize method across all
of the TargetText instances within the collection.
5. pos_text -- This applies the TargetText.pos_text method across all of
the TargetText instances within the collection.
6. sequence_labels -- This applies the TargetText.sequence_labels
method across all of the TargetText instances within the collection.
7. force_targets -- This applies the TargetText.force_targets method
across all of the TargetText instances within the collection.
8. exact_match_score -- Recall, Precision, and F1 score in a Tuple.
All of these measures are based on exact span matching rather than the
matching of the sequence label tags, this is due to the annotation spans
not always matching tokenization therefore this removes the tokenization
error that can come from the sequence label measures.
9. samples_with_targets -- Returns all of the samples that have target
spans as a TargetTextCollection.
10. target_count -- A dictionary of target text as key and values as the
number of times the target text occurs in this TargetTextCollection
11. one_sample_per_span -- This applies the TargetText.one_sample_per_span
method across all of the TargetText instances within the collection to
create a new collection with those new TargetText instances within it.
12. sanitize -- This applies the TargetText.sanitize function to all of
the TargetText instances within this collection, affectively ensures
that all of the instances follow the specified rules that TargetText
instances should follow.
Static Functions:
1. from_json -- Returns a TargetTextCollection object given the json like
String from to_json. For example the json string can be the return of
TargetTextCollection.to_json.
2. load_json -- Returns a TargetTextCollection based on each new line in
the given json file.
'''
def __init__(self, target_texts: Optional[List['TargetText']] = None,
name: Optional[str] = None) -> None:
'''
:param target_texts: A list of TargetText instances to add to the
collection.
:param name: Name to call the collection.
'''
self._storage = OrderedDict()
if target_texts is not None:
for target_text in target_texts:
self.add(target_text)
if name is None:
name = ''
self.name = name
def add(self, value: 'TargetText') -> None:
'''
Wrapper around set item. Instead of having to add the value the
usual way of finding the instances 'text_id' and setting this containers
key to this value, it does this for you.
e.g. performs self[value['text_id']] = value
:param value: The TargetText instance to store in the collection
'''
self[value['text_id']] = value
def to_json(self) -> str:
'''
Required as TargetTextCollection is not json serlizable due to the
'spans' in the TargetText instances.
:returns: The object as a list of dictionarys where each the TargetText
instances are dictionaries.
'''
json_text = ''
for index, target_text_instance in enumerate(self.values()):
if index != 0:
json_text += '\n'
target_text_instance: TargetText
json_text += target_text_instance.to_json()
return json_text
@staticmethod
def from_json(json_text: str, **target_text_collection_kwargs
) -> 'TargetTextCollection':
'''
Required as the json text is expected to be the return from the
self.to_json method. This string is not passable by a standard json
decoder.
:param json_text: This is expected to be a dictionary like object for
each new line in this text
:param target_text_collection_kwargs: Key word arguments to give to
the TargetTextCollection
constructor.
:returns: A TargetTextCollection based on each new line in the given
text to be passable by TargetText.from_json method.
'''
if json_text.strip() == '':
return TargetTextCollection(**target_text_collection_kwargs)
target_text_instances = []
for line in json_text.split('\n'):
target_text_instances.append(TargetText.from_json(line))
return TargetTextCollection(target_text_instances,
**target_text_collection_kwargs)
@staticmethod
def load_json(json_fp: Path, **target_text_collection_kwargs
) -> 'TargetTextCollection':
'''
Allows loading a dataset from json. Where the json file is expected to
be output from TargetTextCollection.to_json_file as the file will be
a json String on each line generated from TargetText.to_json.
:param json_fp: File that contains json strings generated from
TargetTextCollection.to_json_file
:param target_text_collection_kwargs: Key word arguments to give to
the TargetTextCollection
constructor.
:returns: A TargetTextCollection based on each new line in the given
json file.
'''
target_text_instances = []
with json_fp.open('r') as json_file:
for line in json_file:
if line.strip():
target_text_instance = TargetText.from_json(line)
target_text_instances.append(target_text_instance)
return TargetTextCollection(target_text_instances,
**target_text_collection_kwargs)
def to_json_file(self, json_fp: Path) -> None:
'''
Saves the current TargetTextCollection to a json file which won't be
strictly json but each line in the file will be and each line in the
file can be loaded in from String via TargetText.from_json. Also the
file can be reloaded into a TargetTextCollection using
TargetTextCollection.load_json.
:param json_fp: File path to the json file to save the current data to.
'''
with json_fp.open('w+') as json_file:
for index, target_text_instance in enumerate(self.values()):
target_text_instance: TargetText
target_text_string = target_text_instance.to_json()
if index != 0:
target_text_string = f'\n{target_text_string}'
json_file.write(target_text_string)
def tokenize(self, tokenizer: Callable[[str], List[str]]) -> None:
'''
This applies the TargetText.tokenize method across all of
the TargetText instances within the collection.
For a set of tokenizers that are definetly comptable see
target_extraction.tokenizers module.
Ensures that the tokenization is character preserving.
:param tokenizer: The tokenizer to use tokenize the text for each
TargetText instance in the current collection
:raises TypeError: If the tokenizer given does not return a List of
Strings.
:raises ValueError: This is raised if any of the TargetText instances
in the collection contain an empty string.
:raises ValueError: If the tokenization is not character preserving.
'''
for index, target_text_instance in enumerate(self.values()):
if index == 0:
target_text_instance.tokenize(tokenizer, True)
else:
target_text_instance.tokenize(tokenizer, False)
def pos_text(self, tagger: Callable[[str], List[str]]) -> None:
'''
This applies the TargetText.pos_text method across all of
the TargetText instances within the collection.
For a set of pos taggers that are definetly comptable see
target_extraction.pos_taggers module.
:param tagger: POS tagger.
:raises TypeError: If the POS tagger given does not return a List of
Strings.
:raises ValueError: This is raised if any of the TargetText instances
in the collection contain an empty string.
:raises ValueError: If the Target Text instance has not been tokenized.
:raises ValueError: If the number of pos tags for a Target Text instance
does not have the same number of tokens that has
been generated by the tokenizer function.
'''
for index, target_text_instance in enumerate(self.values()):
if index == 0:
target_text_instance.pos_text(tagger, True)
else:
target_text_instance.pos_text(tagger, False)
def force_targets(self) -> None:
'''
This applies the TargetText.force_targets method across all of the
TargetText instances within the collection.
'''
for target_text_instance in self.values():
target_text_instance.force_targets()
def sequence_labels(self) -> None:
'''
This applies the TargetText.sequence_labels method across all of
the TargetText instances within the collection.
:raises KeyError: If the current TargetText has not been tokenized.
:raises ValueError: If two targets overlap the same token(s) e.g
`Laptop cover was great` if `Laptop` and
`Laptop cover` are two seperate targets this should
riase a ValueError as a token should only be
associated to one target.
'''
for target_text_instance in self.values():
target_text_instance.sequence_labels()
def exact_match_score(self,
predicted_sequence_key: str = 'predicted_sequence_labels'
) -> Tuple[float, float, float,
Dict[str, List[Tuple[str, Span]]]]:
'''
Just for clarification we use the sequence label tags to find the
predicted spans. However even if you have a perfect sequence label
score does not mean you will have a perfect extact span score
as the tokenizer used for the sequence labelling might not align
perfectly with the annotated spans.
The False Positive mistakes, False Negative mistakes, and correct
True Positive Dictionary keys are those names with the values neing a
List of Tuples where the Tuple is made up of the TargetText instance ID
and the Span that was incorrect (FP) or not tagged (FN) or correct (TP).
Example of this is as follows:
{`FP`: [('1', Span(0, 4))], 'FN': [], 'TP': []}
:param predicted_sequence_key: Key of the predicted sequence labels
within this TargetText instance.
:returns: Recall, Precision, and F1 score, False Positive mistakes,
False Negative mistakes, and correct True Positives in a
Dict. All of these measures are based on exact span matching
rather than the matching of the sequence label tags,
this is due to the annotation spans not always matching
tokenization therefore this removes the tokenization
error that can come from the sequence label measures.
:raises KeyError: If there are no predicted sequence label key
within this TargetText.
:raises ValueError: If the predicted or true spans contain multiple
spans that have the same span e.g.
[Span(4, 15), Span(4, 15)]
'''
# tp = True Positive count
tp = 0.0
num_pred_true = 0.0
num_actually_true = 0.0
fp_mistakes: List[Tuple[str, Span]] = []
fn_mistakes: List[Tuple[str, Span]] = []
correct_tp: List[Tuple[str, Span]] = []
for target_text_index, target_text_instance in enumerate(self.values()):
if target_text_index == 0:
keys_to_check = ['spans',
f'{predicted_sequence_key}']
for key in keys_to_check:
target_text_instance._key_error(key)
predicted_spans = target_text_instance.get_sequence_spans(predicted_sequence_key)
# Add to the number of predicted true and actually true
predicted_spans: List[Span]
num_pred_true += len(predicted_spans)
true_spans: List[Span] = target_text_instance['spans']
if true_spans is None:
true_spans = []
num_actually_true += len(true_spans)
# This should be impossible to get to
if len(predicted_spans) != len(set(predicted_spans)):
raise ValueError(f'Predicted spans {predicted_spans} contain'
f' multiple of the same predicted span. '
f'TargetText: {target_text_instance}')
# This is possible
if len(true_spans) != len(set(true_spans)):
raise ValueError(f'True spans {true_spans} contain'
f' multiple of the same true span. '
f'TargetText: {target_text_instance}')
text_id = target_text_instance['text_id']
true_spans = set(true_spans)
for predicted_span in predicted_spans:
if predicted_span in true_spans:
tp += 1
correct_tp.append((text_id, predicted_span))
else:
fp_mistakes.append((text_id, predicted_span))
for true_span in true_spans:
if true_span not in predicted_spans:
fn_mistakes.append((text_id, true_span))
error_analysis_dict = {'FP': fp_mistakes, 'FN': fn_mistakes,
'TP': correct_tp}
if tp == 0.0:
return 0.0, 0.0, 0.0, error_analysis_dict
recall = tp / num_actually_true
precision = tp / num_pred_true
f1 = (2 * precision * recall) / (precision + recall)
return recall, precision, f1, error_analysis_dict
def samples_with_targets(self) -> 'TargetTextCollection':
'''
:returns: All of the samples that have targets as a
TargetTextCollection for this TargetTextCollection.
:raises KeyError: If either `spans` or `targets` does not exist in
one or more of the TargetText instances within this
collection. These key's are protected keys thus they
should always exist but this is just a warning if
you have got around the protected keys.
'''
sub_collection = TargetTextCollection()
for target_text in self.values():
if target_text['spans'] and target_text['targets']:
sub_collection.add(target_text)
return sub_collection
def target_count(self) -> Dict[str, int]:
'''
:returns: A dictionary of target text as key and values as the number
of times the target text occurs in this TargetTextCollection
'''
target_count: Dict[str, int] = Counter()
for target_text in self.values():
if target_text['spans']:
text = target_text['text']
for span in target_text['spans']:
target = text[span.start: span.end]
target_count.update([target])
return dict(target_count)
def one_sample_per_span(self, remove_empty: bool = False
) -> 'TargetTextCollection':
'''
This applies the TargetText.one_sample_per_span method across all of the
TargetText instances within the collection to create a new collection
with those new TargetText instances within it.
:param remove_empty: If the TargetText instance contains any None
targets then these will be removed along with
their respective Spans.
:returns: A new TargetTextCollection that has samples that come
from this collection but has had the
TargetText.one_sample_per_span method applied to it.
'''
new_collection = TargetTextCollection()
for target_text in self.values():
new_collection.add(target_text.one_sample_per_span(remove_empty=remove_empty))
return new_collection
def sanitize(self) -> None:
'''
This applies the TargetText.sanitize function to all of
the TargetText instances within this collection, affectively ensures
that all of the instances follow the specified rules that TargetText
instances should follow.
'''
for target_text in self.values():
target_text.sanitize()
def __setitem__(self, key: str, value: 'TargetText') -> None:
'''
Will add the TargetText instance to the collection where the key
should be the same as the TargetText instance 'text_id'.
:param key: Key to be added or changed
:param value: TargetText instance associated to this key. Where the
key should be the same value as the TargetText instance
'text_id' value.
'''
if not isinstance(value, TargetText):
raise TypeError('The value should be of type TargetText and not '
f'{type(value)}')
text_id = value['text_id']
if text_id != key:
raise ValueError(f'The value `text_id`: {text_id} should be the '
f'same value as the key: {key}')
# We copy it to stop any mutable objects from changing outside of the
# collection
value_copy = copy.deepcopy(value)
self._storage[key] = value_copy
def __delitem__(self, key: str) -> None:
'''
Given a key that matches a key within self._storage or self.keys()
it will delete that key and value from this object.
:param key: Key and its respective value to delete from this object.
'''
del self._storage[key]
def __eq__(self, other: 'TargetTextCollection') -> bool:
'''
Two TargetTextCollection instances are equal if they both have
the same TargetText instances within it.
:param other: Another TargetTextCollection object that is being
compared to this TargetTextCollection object.
:returns: True if they have the same TargetText instances within it.
'''
if not isinstance(other, TargetTextCollection):
return False
if len(self) != len(other):
return False
for key in self.keys():
if key not in other:
return False
return True
def __repr__(self) -> str:
'''
:returns: String returned is what user see when the instance is
printed or printed within a interpreter.
'''
rep_text = 'TargetTextCollection('
for key, value in self.items():
rep_text += f'key: {key}, value: {value}'
break
if len(self) > 1:
rep_text += '...)'
else:
rep_text += ')'
return rep_text
def __len__(self) -> int:
'''
:returns: The number of TargetText instances in the collection.
'''
return len(self._storage)
def __iter__(self) -> Iterable[str]:
'''
Returns as interator over the TargetText instances 'text_id''s that
are stored in this collection. This is an ordered iterator as the
underlying dictionary used to store the TargetText instances is an
OrderedDict in self._storage.
:returns: TargetText instances 'text_id''s that are stored in this
collection
'''
return iter(self._storage)
def __getitem__(self, key: str) -> 'TargetText':
'''
:returns: A TargetText instance that is stored within this collection.
'''
return self._storage[key]
| null |
target_extraction/data_types.py
|
data_types.py
|
py
| 56,357 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.abc.MutableMapping",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 379,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 379,
"usage_type": "name"
},
{
"api_name": "target_extraction.tokenizers.is_character_preserving",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 424,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 424,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 424,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 528,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 528,
"usage_type": "name"
},
{
"api_name": "target_extraction.tokenizers.token_index_alignment",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 583,
"usage_type": "call"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 578,
"usage_type": "name"
},
{
"api_name": "target_extraction.tokenizers.token_index_alignment",
"line_number": 589,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 595,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 595,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 623,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 562,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 562,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 656,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 657,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 657,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 665,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 665,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 697,
"usage_type": "call"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 712,
"usage_type": "call"
},
{
"api_name": "collections.abc.MutableMapping",
"line_number": 720,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 775,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 775,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 776,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 782,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 847,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 871,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 889,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 889,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 914,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 914,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 1000,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 1000,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 1000,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 1001,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 1001,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 1001,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 1002,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 1002,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 1002,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 1012,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 1012,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 1015,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 1015,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 965,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 966,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 966,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 966,
"usage_type": "name"
},
{
"api_name": "target_extraction.data_types_util.Span",
"line_number": 966,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 1073,
"usage_type": "name"
},
{
"api_name": "collections.Counter",
"line_number": 1073,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 1068,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 1133,
"usage_type": "call"
},
{
"api_name": "typing.Iterable",
"line_number": 1187,
"usage_type": "name"
}
] |
426385174
|
"""
import matplotlib.pyplot as plt
x_values = [1, 2, 3, 4, 5]
y_values = [1, 4, 9, 16, 25]
plt.scatter(x_values, y_values, s=100)
# 设置图表标题并给坐标轴加上标签
plt.title("Square Number", fontsize=24)
plt.xlabel("Value", fontsize=14)
plt.ylabel("Square of Value", fontsize=14)
# 设置刻度标记的大小
plt.tick_params(axis='both', which='majar', labelsize=14)
plt.show()
"""
import matplotlib.pyplot as plt
x_values = list(range(1, 1001))
y_values = [x**2 for x in x_values]
plt.scatter(x_values, y_values, c=y_values, cmap=plt.cm.Blues, edgecolor='none', s=40)
# plt.show()
# plt.savefig('squares_plot.png', bbox_inches='tight')
plt.savefig('squares_plot2.png')
| null |
matplotlib_demo/scatter_squares.py
|
scatter_squares.py
|
py
| 692 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
}
] |
124439770
|
#import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import pickle
from operator import itemgetter
__author__ = 'fuatbasik'
plt.rcParams['font.size'] = 25
plt.rcParams['ytick.labelsize'] = 22
plt.rcParams['xtick.labelsize'] = 22
plt.rcParams['legend.fontsize'] = 20
#plt.rcParams['axes.titlesize'] = 23
plt.rcParams['lines.markersize'] = 16
plt.rcParams['figure.subplot.left'] = 0.1
plt.rcParams['figure.subplot.bottom'] = 0.15
markers = ['k+-', 'mo-', 'c*-', 'rx-', 'g^-', 'bd-']
results =pickle.load(open('/Users/fuatbasik/Documents/s3tm/s3tm_experiments/learningTimes/learningTimeResults.txt'))
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
x = sorted(results[0]["time"])
for i, result in enumerate(results):
y = []
for key, value in sorted(result["time"].iteritems(), key=itemgetter(0), reverse=False):
y.append(value )
ax.errorbar(x, y,fmt=markers[i+1], label=result["label"], markerfacecolor='none')
ax.legend(loc='best')
ax.set_xlabel("number of tweets")
ax.set_ylabel("seconds")
ax.set_yscale("log", basey=2)
ax.set_xscale("log", basex=10)
ax.xaxis.set_ticks(x)
plt.show()
#plt.savefig("../docs/learningTimes.png")
| null |
code/draw/drawLearningTimes.py
|
drawLearningTimes.py
|
py
| 1,184 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "operator.itemgetter",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
}
] |
297492134
|
import pysat.solvers
import pysat.formula
solvers = ['cadical',
'glucose30',
'glucose41',
'lingeling',
'maplechrono',
'maplecm',
'maplesat',
'minicard',
'minisat22',
'minisat-gh']
clauses = ([1, 2, 3], [-1, 2], [-2])
def test_solvers():
cnf = pysat.formula.CNF()
for clause in clauses:
cnf.append(clause)
for solverName in solvers:
with pysat.solvers.Solver(name=solverName) as solver:
solver.append_formula(cnf)
assert(solver.solve())
assert(solver.get_model() in [[-1, -2, 3]])
| null |
test/test_sat.py
|
test_sat.py
|
py
| 660 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pysat.solvers.formula.CNF",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pysat.solvers.formula",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pysat.solvers",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pysat.solvers.solvers.Solver",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pysat.solvers.solvers",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pysat.solvers",
"line_number": 25,
"usage_type": "name"
}
] |
202505081
|
#coding: utf-8 -*-
#This is just a proof of concept with probably a lot of bugs, use at your own risks!
#It creates a new tab in the MS Word ribbon, with buttons calling docCleaner scripts from inside Word
#To uninstall it, launch it with the --unregister argument
#You can also remove it from Word (in the "Developer" tab, look for COM Addins > Remove)
#Inspired by the Excel addin provided in the win32com module demos, and the "JJ Word Addin" (I don't remember where I get it, but thanks!)
import os
import sys
#Defining Pythonpath
scriptdir, script = os.path.split(__file__)
pkgdir = os.path.join(scriptdir, 'pkgs')
sys.path.insert(0, pkgdir)
os.environ['PYTHONPATH'] = pkgdir + os.pathsep + os.environ.get('PYTHONPATH', '')
os.environ['PYTHONHOME'] = ""
from tkinter import *
import win32com
win32com.__path__
from win32com import universal
from win32com.server.exception import COMException
from win32com.client import gencache, DispatchWithEvents
import winerror
import pythoncom
from win32com.client import constants, Dispatch
import win32com.client
import win32ui
import win32con
from PIL import Image
import mimetypes
#from guess_language import guess_language
import locale
import gettext
import simplejson
import tempfile
import shutil
import mimetypes
import csv
#import doccleaner
from doccleaner import doccleaner
#from doccleaner import imageConv
locale.setlocale(locale.LC_ALL, '')
user_locale = locale.getlocale()[0]
def checkIfDocx(filepath):
if mimetypes.guess_type(filepath)[0] == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
return True
else:
return False
#TODO : localization
def init_localization():
'''prepare l10n'''
print(locale.setlocale(locale.LC_ALL,""))
locale.setlocale(locale.LC_ALL, '') # use user's preferred locale
# take first two characters of country code
loc = locale.getlocale()
#filename = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "lang", "messages_%s.mo" % locale.getlocale()[0][0:2])
filename = os.path.join("lang", "messages_{0}.mo").format(locale.getlocale()[0][0:2])
try:
print("Opening message file {0} for locale {1}".format(filename, loc[0]))
#If the .mo file is badly generated, this line will return an error message: "LookupError: unknown encoding: CHARSET"
trans = gettext.GNUTranslations(open(filename, "rb"))
except IOError:
print("Locale not found. Using default messages")
trans = gettext.NullTranslations()
trans.install()
def load_json(filename):
f = open(filename, "r")
data = f.read()
f.close()
return simplejson.loads(data)
def getInterfaceTranslation():
reader = csv.reader(open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "interface_translations.csv")))
translation_dict = {}
headers = next(reader)[1:]
for row in reader:
temp_dict = {}
name = row[0]
values = []
for x in row[1:]:
values.append(str(x).encode('windows-1252').decode('utf-8'))
for i in range(len(values)):
if values[i]:
temp_dict[headers[i]] = values[i]
translation_dict[name] = temp_dict
return translation_dict
class WordAddin:
#wd = win32com.client.Dispatch("Word.Application")
wd = win32com.client.GetActiveObject("Word.Application")
wc = win32com.client.constants
#Convert translations csv to nested dictionary: http://stackoverflow.com/questions/11102326/python-csv-to-nested-dictionary
#TODO: nom dynamique pour le répertoire
#wd = win32com.client.Dispatch("Word.Application")
#see list of MS Office language codes (MsoLanguageID Enumeration): http://msdn.microsoft.com/en-us/library/aa432635%28v=office.12%29.aspx
#Check if MS Word is in french
if wd.Application.Language in (1036, 11276, 3084, 12300, 15372, 5132, 13324, 6156, 14348, 8204, 10252, 7180, 9228):
wd_language = "fr"
#in spanish
elif wd.Application.Language in (2058, 1034, 11274, 16394, 13322, 9226, 5130, 7178, 12298, 17418, 4106, 18442, 3082, 19466, 6154, 15370, 10250, 20490, 14346, 8202):
#wd_language = "es"
wd_language = "en"
#If not, we'll use buttons in english for the customized ribbon
else:
wd_language = "en"
# def __init__(self):
# self.jsonConf = load_json(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'winword_addin.json'))
def docFormat(self, filePath):
#check the format of the document, stock it in a variable
form = {
#docx
('application/vnd.openxmlformats-officedocument.wordprocessingml.document'):(".docx", 12),
#doc
('application/vnd.ms-word', 'application/doc', 'application/vnd.msword', 'appl/text', 'application/winword', 'application/word', 'application/x-msw6', 'application/x-msword', 'application/msword'):(".doc", 0),
#odt
('application/vnd.oasis.opendocument.text', 'application/x-vnd.oasis.opendocument.text'):(".odt", 23),
#rtf
('application/rtf', 'application/x-rtf', 'text/rtf', 'text/richtext', 'application/x-soffice'):(".rtf", 6)
}
#mimetypes.guess_type returns a tuple or a string
if type(mimetypes.guess_type(filePath, strict=True)) == tuple:
#if it's a tuple, the mimetype is the first element in the tuple
docMimetype = mimetypes.guess_type(filePath, strict=True)[0]
elif type(mimetypes.guess_type(filePath, strict=True)) == str:
docMimetype = mimetypes.guess_type(filePath, strict=True)
for key in form.keys():
if docMimetype in key:
documentFormat = form[key]
documentExtension = documentFormat[0]
documentSaveFormat = documentFormat[1]
break
else:
try:
documentExtension = mimetypes.guess_extension(docMimetype, strict=True)
documentSaveFormat = self.wd.ActiveDocument.SaveFormat
except:
documentFormat = "other"
return (docMimetype, documentExtension, documentSaveFormat)
def apply_style(self,tag):
try:
#Applying style
self.wd.Selection.Style = self.wd.ActiveDocument.Styles(tag)
except:
#If style does not exist -> create it, then apply it
self.wd.ActiveDocument.Styles.Add(ctrl. Tag)
self.wd.Selection.Style = self.wd.ActiveDocument.Styles(tag)
# def ConvertImages(self, ctrl):
#
# #Creating a word object inside a wd variable
# wd = win32com.client.Dispatch("Word.Application")
# wc = win32com.client.constants
# #If document is not docx, convert it
# initialPath = wd.ActiveDocument.FullName
# initialExtension = self.docFormat(wd.ActiveDocument.FullName)[1]
# initialSaveFormat = self.docFormat(wd.ActiveDocument.FullName)[2]
# if initialExtension != ".docx":
# wd.ActiveDocument.SaveAs(FileName = wd.ActiveDocument.Name + '.docx',
# FileFormat = wc.wdFormatXMLDocument )
# try:
# #Check if the file is not a new one (unsaved)
# if os.path.isfile(wd.ActiveDocument.FullName) == True:
# #Before processing the doc, let's save the user's last modifications
# #TODO: ne fonctionne pas correctement
# wd.ActiveDocument.Save()
#
# originDoc = wd.ActiveDocument.FullName #:Puts the path of the current file in a variable
# tmp_dir = tempfile.mkdtemp() #:Creates a temp folder, which will contain the temp docx files necessary for processing
#
# #Creates a temp doc,
# newDoc = os.path.join(tmp_dir, "~" + wd.ActiveDocument.Name) #:Creates a temporary file (newDoc), which will be the docCleaner output
#
#
#
#
#
# #If there is more than one XSL sheet, we'll have to make consecutive processings
#
#
#
# img_arguments = ['--input', str(originDoc),
# '--output', str(newDoc)
# ]
#
# imageConv.main(img_arguments)
#
#
#
# #Opening the temp file
# wd.Documents.Open(newDoc)
#
# #Copying the temp file content to the original doc
# #To do this, never use the MSO Content.Copy() and Content.Paste() methods, because :
# # 1) It would overwrite important data the user may have copied to the clipboard.
# # 2) Other programs, like antiviruses, may use simulteanously the clipboard, which would result in a big mess for the user.
# #Instead, use the Content.FormattedText function, it's simple, and takes just one line of code:
# wd.Documents(originDoc).Content.FormattedText = wd.Documents(newDoc).Content.FormattedText
# #Closing and removing the temp document
# wd.Documents(newDoc).Close()
# os.remove(newDoc)
#
# #Saving the changes
## if initialExtension != "docx":
## print("bla")
## else:
# wd.ActiveDocument.Save()
#
# #Removing the whole temp folder
# try:
# shutil.rmtree(tmp_dir)
# except:
# #TODO: What kind of error would be possible when removing the temp folder? How to handle it?
# pass
#
# else:
# win32ui.MessageBox("You need to save the file before launching this script!"
# ,"Error",win32con.MB_OK)
#
# except Exception as e:
#
# tb = sys.exc_info()[2]
# #TODO: writing the error to a log file
# win32ui.MessageBox(str(e) + "\n" +
# str(tb.tb_lineno)+ "\n" +
# str(newDoc)
# ,"Error",win32con.MB_OKCANCEL)
#
# def GetLanguage(self,ctrl):
# wd = win32com.client.Dispatch("Word.Application")
# #tests = {}
# for paragraph in self.wd.ActiveDocument.Paragraphs:
# win32ui.MessageBox(str(paragraph),"Error",win32con.MB_OK)
# if str(paragraph.Style) == "Normal":
# tests['paraNormal'] = True
# elif str(paragraph.Style) in ['Titre', 'Title']:
# tests['title'] = True
# elif str(paragraph.Style) in ['langue', 'Language']:
# tests['lang'] = True
# elif str(paragraph.Style) == "Pagination":
# if re.match("([0-9]{1,})(?:[0-9]{2,})", str(paragraph) ) is not None:
# win32ui.MessageBox(str("ok"),"Error",win32con.MB_OK)
# else:
# win32ui.MessageBox(str("no"),"Error",win32con.MB_OK)
# wdStory = 6
# self.wd.Selection.HomeKey(Unit=wdStory)
# self.wd.Selection.Find.Text = ""
# self.wd.Selection.Find.Text
# self.wd.Selection.Find.Execute()
#docContent = self.wd.ActiveDocument.Content()
# language = guess_language(docContent)
#
# #styles = ['','','','']
# wdStory = 6
# self.wd.Selection.HomeKey(Unit=wdStory)
# try:
#self.wd.ActiveDocument.Selection.Find(Text="liste")
#1: chercher paragraphe stylé en langue, le remplacer par la valeur trouvée
#2:
# except:
# print("bla")
#win32ui.MessageBox(str(language),"Error",win32con.MB_OK)
#win32ui.MessageBox(docContent,"Error",win32con.MB_OK)
def removeBookmarks(self,tag):
try:
for bookmark in self.wd.ActiveDocument.Bookmarks:
bookmark.Delete()
except Exception as E:
print(e)
def do(self,tag):
#Get the current working dir. We'll need it to get the path to interface_translations.csv, if the user switches languages after applying a processing
initialWorkingDir = os.getcwd()
try:
#Check if the file is not a new one (unsaved)
if os.path.isfile(self.wd.ActiveDocument.FullName) == True:
#Before processing the doc, let's save the user's last modifications
#TODO: ne fonctionne pas correctement
initialPath = self.wd.ActiveDocument.FullName
initialExtension = self.docFormat(initialPath)[1]
initialSaveFormat = self.docFormat(initialPath)[2]
if initialExtension != ".docx":
self.wd.ActiveDocument.SaveAs(FileName = os.path.join(tempfile.mkdtemp(), self.wd.ActiveDocument.Name + '.docx'),
FileFormat = 12) #12 = wdFormatXMLDocument = .docx -> see https://msdn.microsoft.com/en-us/library/office/ff839952.aspx
else:
self.wd.ActiveDocument.Save()
originDoc = self.wd.ActiveDocument.FullName #:Puts the path of the current file in a variable
tmp_dir = tempfile.mkdtemp() #:Creates a temp folder, which will contain the temp docx files necessary for processing
#TODO: If the document is in another format than docx, convert it temporarily to docx
#At the processing's end, we'll have to convert it back to its original format, so we need to store this information
transitionalDoc = originDoc #:Creates a temp transitional doc, which will be used if we need to make consecutive XSLT processings. #E.g..: original doc -> xslt processing -> transitional doc -> xslt processing -> final doc -> copying to original doc
newDoc = os.path.join(tmp_dir, "~" + self.wd.ActiveDocument.Name) #:Creates a temporary file (newDoc), which will be the docCleaner output
jj = 0 #:This variable will be increased by one for each XSL processing defined in the json file
#Then, we take the current active document as input, the temp doc as output
for button in jsonConf["buttons"]:
if button["tag"] == tag:
for xsl in button["xsl"]:
if jj > 0:
#If there is more than one XSL sheet, we'll have to make consecutive processings
newDocName, newDocExtension = os.path.splitext(newDoc)
transitionalDoc = newDoc
newDoc = newDocName + str(jj)+ newDocExtension
dc_arguments = ['--input', str(transitionalDoc),
'--output', str(newDoc),
'--transform', os.path.join(os.path.dirname(doccleaner.__file__),
"docx", xsl["XSLname"] )
]
for param in ["subfile", "XSLparameter"]:
if xsl[param] != 0:
if param == "subfile":
str_param = os.path.join(os.path.dirname(doccleaner.__file__),
"docx", xsl[param])
else:
str_param = xsl[param]
dc_arguments.extend( ( '--' + param, str_param)) #",".join ( str_param ) ))
doccleaner.main(dc_arguments)
jj += 1
#Opening the temp file
self.wd.Documents.Open(newDoc)
#Copying the temp file content to the original doc
#To do this, never use the MSO Content.Copy() and Content.Paste() methods, because :
# 1) It would overwrite important data the user may have copied to the clipboard.
# 2) Other programs, like antiviruses, may use simulteanously the clipboard, which would result in a big mess for the user.
#Instead, use the Content.FormattedText function, it's simple, and takes just one line of code:
self.wd.Documents(originDoc).Content.FormattedText = self.wd.Documents(newDoc).Content.FormattedText
#Closing and removing the temp document
self.wd.Documents(newDoc).Close()
os.remove(newDoc)
#Saving the changes
if initialExtension != ".docx":
self.wd.ActiveDocument.SaveAs(FileName = initialPath,
)
else:
self.wd.ActiveDocument.Save()
#Removing the whole temp folder
try:
shutil.rmtree(tmp_dir)
except:
#TODO: What kind of error would be possible when removing the temp folder? How to handle it?
pass
else:
win32ui.MessageBox("You need to save the file before launching this script!"
,"Error",win32con.MB_OK)
except Exception as e:
tb = sys.exc_info()[2]
#TODO: writing the error to a log file
win32ui.MessageBox(str(e) + "\n" +
str(tb.tb_lineno)+ "\n" #+
#str(newDoc)
,"Error",win32con.MB_OKCANCEL)
os.chdir(initialWorkingDir)
# def GetScreenTip(self,ctrl):
# return self.translation_dict[ctrl. Tag][self.wd_language+"_screentip"]
#
# def GetLabel(self,ctrl):
# try:
# label_id = ctrl. Tag
# except:
# label_id = ctrl. Id
# return self.translation_dict[str(label_id)][self.wd_language+"_label"]
class Interface(Frame):
def __init__(self, parent):
Frame.__init__(self, parent, background="white")
self.parent = parent
self.initUI()
def initUI(self):
self.parent.title("Assistant de stylage")
self.pack()
#def getCurrentDoc():
def defineButton(wdobj, action, ref,txt,cmd,pos,width,imageFile):
#Defining button options in a dictionary
button_options = {}
#If there is an image for the button, we need to define some additional arguments, and a different height and width
if imageFile != None:
image = PhotoImage(file=imageFile)
button_options["image"] = image
button_options['compound'] = LEFT
button_options['height'] = 40
button_options['width'] = 40
else:
button_options['height'] = 1
button_options['width'] = width
button_options["text"] = txt
#☻button_options["textvariable"] =
#button_options["command"] = lambda: wdobj.do(tag=cmd)
button_options["command"] = lambda: getattr(wdobj,action)(tag=cmd)
button_options['fg'] = 'Black'
button_options['bg'] = "#eff1f3"
button_options['justify'] = LEFT
b = Button(ref,
button_options
)
if imageFile != None:
b.image=image
b.grid(row=pos, column=1, sticky=(N+S+E+W), padx=5 )
return(b)
#TODO: pour redéfinir chaque bouton, il faut stocker chaque objet "button" dans un dictionnaire
def redefineButtons(ref, buttonList,lang):
translation = getInterfaceTranslation()
if lang == "fr":
ref.title("Assistant de stylage")
else:
ref.title("OpenEdition's copyediting macros")
for button in buttonList:
#récupérer le tag lié au bouton -> placé dans un tuple, à côté de l'objet bouton
button[0].configure(text=translation[button[1]][lang+"_label"])
def returnLanguage(WordObj):
#see list of MS Office language codes (MsoLanguageID Enumeration): http://msdn.microsoft.com/en-us/library/aa432635%28v=office.12%29.aspx
#Check if MS Word is in french
if WordObj.wd.Application.Language in (1036, 11276, 3084, 12300, 15372, 5132, 13324, 6156, 14348, 8204, 10252, 7180, 9228):
language = "fr"
#in spanish
elif WordObj.wd.Application.Language in (2058, 1034, 11274, 16394, 13322, 9226, 5130, 7178, 12298, 17418, 4106, 18442, 3082, 19466, 6154, 15370, 10250, 20490, 14346, 8202):
#wd_language = "es"
language = "en"
#If not, we'll use buttons in english
else:
language = "en"
return language
def generateMenu(appPath, WordObj, itemsNumber,confFile):
wd_language = returnLanguage(WordObj)
root = Tk()
scriptPath = os.path.dirname(os.path.realpath(__file__))
root.iconbitmap(default=os.path.join(scriptPath, 'favicon.ico'))#os.path.join(appPath, 'favicon.bmp'))
if wd_language == "fr":
root.title('Assistant de stylage')
else:
root.title("OpenEdition's copyediting macros")
root.rowconfigure((0,1), weight=1) # make buttons stretch when
root.columnconfigure((1,1), weight=1) # when window is resized
#root.resizable(1,0)
#root.bind("<Configure>", resize)
#Appeler le dictionnaire de traduction de l'interface
translation = getInterfaceTranslation()
x =0
buttonList = []
for button in confFile["buttons"]:
x +=1
if button["image"] != None:
imagePath = os.path.join(os.path.dirname(os.path.realpath(__file__)), button["image"])
else:
imagePath = button["image"]
buttonList.append( (defineButton(WordObj,
action=button["action"],
ref=root,
txt=translation[button["tag"]][wd_language+"_label"], #Get the label from the translation dictionary
cmd=button["tag"],
pos=x,
width=70,
imageFile=imagePath
),
button["tag"])
)
#Boutons pour modifier l'interface sur demande.
enButton = Button(root,text="English",command=lambda: redefineButtons(root, buttonList,'en'))
enButton.grid(row=1,column=2)
frButton = Button(root,text="French",command=lambda: redefineButtons(root, buttonList,'fr'))
frButton.grid(row=2,column=2)
root.mainloop()
def main():
#init_localization()
path = os.path.dirname(os.path.realpath(__file__))
global jsonConf
jsonConf = load_json(os.path.join(path, 'winword_addin.json'))
generateMenu(
appPath=path,
WordObj = WordAddin() ,
itemsNumber = len(jsonConf["buttons"]),
confFile= jsonConf
)
if __name__ == '__main__':
main()
| null |
dc_winword/wordaddin.py
|
wordaddin.py
|
py
| 24,079 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.path.split",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.pathsep",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "win32com.__path__",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "locale.setlocale",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "locale.LC_ALL",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "locale.getlocale",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "mimetypes.guess_type",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "locale.setlocale",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "locale.LC_ALL",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "locale.setlocale",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "locale.LC_ALL",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "locale.getlocale",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "locale.getlocale",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "gettext.GNUTranslations",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "gettext.NullTranslations",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "simplejson.loads",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "win32com.client.GetActiveObject",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "win32com.client",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "win32com.client",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "mimetypes.guess_type",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "mimetypes.guess_type",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "mimetypes.guess_type",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "mimetypes.guess_type",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "mimetypes.guess_extension",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 339,
"usage_type": "attribute"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 351,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 364,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "doccleaner.doccleaner.__file__",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "doccleaner.doccleaner",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 378,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "doccleaner.doccleaner.__file__",
"line_number": 378,
"usage_type": "attribute"
},
{
"api_name": "doccleaner.doccleaner",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "doccleaner.doccleaner.main",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "doccleaner.doccleaner",
"line_number": 385,
"usage_type": "name"
},
{
"api_name": "os.remove",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "win32ui.MessageBox",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "win32con.MB_OK",
"line_number": 418,
"usage_type": "attribute"
},
{
"api_name": "sys.exc_info",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "win32ui.MessageBox",
"line_number": 424,
"usage_type": "call"
},
{
"api_name": "win32con.MB_OKCANCEL",
"line_number": 427,
"usage_type": "attribute"
},
{
"api_name": "os.chdir",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 532,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 532,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 532,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 533,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 533,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 551,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 551,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 551,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 551,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 577,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 577,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 577,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 579,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 579,
"usage_type": "attribute"
}
] |
514916179
|
import webbrowser
from python_helper import Constant as c
from python_helper import log, StringHelper
from flask import Response, request
import flask_restful
from python_framework.api.src.annotation.MethodWrapper import Function, overrideSignatures
from python_framework.api.src.helper import Serializer
from python_framework.api.src.service import GlobalException
from python_framework.api.src.domain import HttpStatus
from python_framework.api.src.service import Security
from python_framework.api.src.service.openapi import OpenApiManager
KW_URL = 'url'
KW_DEFAULT_URL = 'defaultUrl'
KW_MODEL = 'model'
KW_API = 'api'
KW_METHOD = 'method'
KW_RESOURCE = 'resource'
KW_CONTROLLER_RESOURCE = 'Controller'
KW_SERVICE_RESOURCE = 'Service'
KW_REPOSITORY_RESOURCE = 'Repository'
KW_VALIDATOR_RESOURCE = 'Validator'
KW_MAPPER_RESOURCE = 'Mapper'
KW_HELPER_RESOURCE = 'Helper'
KW_CONVERTER_RESOURCE = 'Converter'
KW_RESOURCE_LIST = [
KW_CONTROLLER_RESOURCE,
KW_SERVICE_RESOURCE,
KW_REPOSITORY_RESOURCE,
KW_VALIDATOR_RESOURCE,
KW_MAPPER_RESOURCE,
KW_HELPER_RESOURCE,
KW_CONVERTER_RESOURCE
]
LOCALHOST_URL = 'http://127.0.0.1:5000'
DOT_SPACE_CAUSE = f'''{c.DOT_SPACE}{c.LOG_CAUSE}'''
def printMyStuff(stuff):
print()
print(f' type(stuff).__name__ = {type(stuff).__name__}')
print(f' type(stuff).__class__.__name__ = {type(stuff).__class__.__name__}')
print(f' stuff.__class__.__name__ = {stuff.__class__.__name__}')
print(f' stuff.__class__.__module__ = {stuff.__class__.__module__}')
print(f' stuff.__class__.__qualname__ = {stuff.__class__.__qualname__}')
def printClass(Class) :
print(f'{2 * c.TAB}Class.__name__ = {Class.__name__}')
print(f'{2 * c.TAB}Class.__module__ = {Class.__module__}')
print(f'{2 * c.TAB}Class.__qualname__ = {Class.__qualname__}')
@Function
def jsonifyResponse(object, contentType, status) :
return Response(Serializer.jsonifyIt(object), mimetype = contentType, status = status)
@Function
def getClassName(instance) :
return instance.__class__.__name__
@Function
def getModuleName(instance) :
return instance.__class__.__module__
@Function
def getQualitativeName(instance) :
return instance.__class__.__qualname__
def appendArgs(args, argument, isControllerMethod=False) :
if isControllerMethod and Serializer.isList(argument) :
return args + argument
args.append(argument)
return args
@Function
def getArgsWithSerializerReturnAppended(argument, args, isControllerMethod=False) :
args = [arg for arg in args]
args = appendArgs(args, argument, isControllerMethod=isControllerMethod)
return tuple(arg for arg in args)
@Function
def getArgsWithResponseClassInstanceAppended(args, responseClass) :
if responseClass :
resourceInstance = args[0]
objectRequest = args[1]
serializerReturn = Serializer.convertFromObjectToObject(objectRequest, responseClass)
args = getArgsWithSerializerReturnAppended(serializerReturn, args)
return args
@Function
def getResourceFinalName(resourceInstance, resourceName=None) :
if not resourceName :
resourceName = resourceInstance.__class__.__name__
for kwAsset in KW_RESOURCE_LIST :
if kwAsset in resourceName :
resourceName = resourceName.replace(kwAsset, c.NOTHING)
return f'{resourceName[0].lower()}{resourceName[1:]}'
@Function
def getResourceType(resourceInstance, resourceName = None) :
if not resourceName :
resourceName = resourceInstance.__class__.__name__
for kwAsset in KW_RESOURCE_LIST :
if kwAsset in resourceName :
return kwAsset
@Function
def getAttributePointerList(object) :
return [
getattr(object, objectAttributeName)
for objectAttributeName in dir(object)
if (not objectAttributeName.startswith('__') and not objectAttributeName.startswith('_'))
]
@Function
def setMethod(resourceInstance, newMethod, methodName = None) :
def buildNewClassMethod(resourceInstance, newMethod) :
def myInnerMethod(*args, **kwargs) :
return newMethod(resourceInstance,*args, **kwargs)
overrideSignatures(myInnerMethod, newMethod)
return myInnerMethod
if not type(newMethod).__name__ == KW_METHOD :
newMethod = buildNewClassMethod(resourceInstance, newMethod)
if not methodName :
methodName = newMethod.__name__
setattr(resourceInstance, methodName, newMethod)
return resourceInstance
@Function
def getGlobals() :
try :
from app import globals
except Exception as exception :
raise Exception('Failed to get "globals" instance from app.py')
return globals
@Function
def getApi() :
try:
api = getGlobals().api
except Exception as exception :
raise Exception(f'Failed to return api from "globals" instance. Cause: {str(exception)}')
return api
@Function
def getNullableApi() :
try :
api = getApi()
except :
api = None
return api
@Function
def raiseBadResponseImplementetion(cause):
raise Exception(f'Bad response implementation. {cause}')
@Function
def validateFlaskApi(instance) :
apiClassName = flask_restful.Api.__name__
moduleName = flask_restful.__name__
if not apiClassName == getClassName(instance) and apiClassName == getQualitativeName(instance) and moduleName == getModuleName(instance) :
raise Exception(f'Globals can only be added to a "flask_restful.Api" instance. Not to {apiInstance}')
@Function
def validateResponseClass(responseClass, controllerResponse) :
log.debug(validateResponseClass, controllerResponse)
if responseClass :
if not controllerResponse and not isinstance(controllerResponse, list):
raiseBadResponseImplementetion(f'Response not present')
if isinstance(responseClass, list) :
if 0 == len(responseClass) :
raiseBadResponseImplementetion(f'"responseClass" was not defined')
elif len(responseClass) == 1 :
if not isinstance(responseClass[0], list) :
if not isinstance(controllerResponse, responseClass[0]) :
raiseBadResponseImplementetion(f'Response class does not match expected class. Expected "{responseClass[0].__name__}", response "{controllerResponse.__class__.__name__}"')
elif not isinstance(responseClass[0][0], list) :
if not isinstance(controllerResponse, list) :
raiseBadResponseImplementetion(f'Response is not a list. Expected "{responseClass[0].__class__.__name__}", but found "{controllerResponse.__class__.__name__}"')
elif isinstance(controllerResponse, list) and len(controllerResponse) > 0 and not isinstance(controllerResponse[0], responseClass[0][0]):
# print(f'responseClass = {responseClass}')
# print(f'responseClass[0] = {responseClass[0]}')
# print(f'responseClass[0][0] = {responseClass[0][0]}')
raiseBadResponseImplementetion(f'Response element class does not match expected element class. Expected "{responseClass[0][0].__name__}", response "{controllerResponse[0].__class__.__name__}"')
else :
if not isinstance(controllerResponse, responseClass) :
raiseBadResponseImplementetion(f'Response class does not match expected class. Expected "{responseClass.__name__}", response "{controllerResponse.__class__.__name__}"')
@Function
def setResource(apiInstance, resourceInstance, resourceName=None) :
resourceName = getResourceFinalName(resourceInstance, resourceName=resourceName)
setattr(apiInstance,resourceName,resourceInstance)
@Function
def bindResource(apiInstance,resourceInstance) :
validateFlaskApi(apiInstance)
setResource(getattr(apiInstance.resource, getResourceType(resourceInstance).lower()), resourceInstance)
def getGlobalException(exception, resourceInstance, resourceInstanceMethod):
apiInstance = getNullableApi()
return GlobalException.handleLogErrorException(exception, resourceInstance, resourceInstanceMethod, apiInstance)
def raiseGlobalException(exception, resourceInstance, resourceInstanceMethod) :
raise getGlobalException(exception, resourceInstance, resourceInstanceMethod)
@Function
def getCompleteResponseByException(exception, resourceInstance, resourceInstanceMethod) :
exception = getGlobalException(exception, resourceInstance, resourceInstanceMethod)
completeResponse = [{'message':exception.message, 'timestamp':str(exception.timeStamp)},exception.status]
log.error(resourceInstance.__class__, f'Error processing {resourceInstance.__class__.__name__}.{resourceInstanceMethod.__name__} request', exception)
return completeResponse
@Function
def initialize(apiInstance, defaultUrl=None, openInBrowser=False) :
defaultUrl = defaultUrl
openInBrowser = openInBrowser
url = f'{apiInstance.host}{apiInstance.baseUrl}'
if defaultUrl :
url = f'{url}{defaultUrl}'
def inBetweenFunction(function,*argument,**keywordArgument) :
log.debug(initialize,f'''{function.__name__} method''')
if (openInBrowser) :
log.debug(initialize,f'''Openning "{url}" url in rowser''')
webbrowser.open_new(url)
def innerFunction(*args,**kwargs) :
try :
functionReturn = function(*args,**kwargs)
except Exception as exception :
raise Exception(f'Failed to initialize. Cause: {str(exception)}')
return functionReturn
return innerFunction
return inBetweenFunction
@Function
def Controller(
url=c.SLASH,
tag='Tag not defined',
description='Controller not descripted'
) :
controllerUrl = url
controllerTag = tag
controllerDescription = description
def Wrapper(OuterClass,*args,**kwargs):
apiInstance = getApi()
log.debug(Controller,f'''wrapping {OuterClass.__name__}''')
class InnerClass(OuterClass,flask_restful.Resource):
url = controllerUrl
tag = controllerTag
description = controllerDescription
def __init__(self,*args,**kwargs):
log.debug(OuterClass,f'in {InnerClass.__name__}.__init__(*{args},**{kwargs})')
OuterClass.__init__(self)
flask_restful.Resource.__init__(self,*args,**kwargs)
self.service = apiInstance.resource.service
overrideSignatures(InnerClass, OuterClass)
return InnerClass
return Wrapper
def getRequestBodyAsJson(contentType) :
try :
if OpenApiManager.DEFAULT_CONTENT_TYPE == contentType :
requestBodyAsJson = request.get_json()
else :
raise Exception(f'Content type "{contentType}" not implemented')
except Exception as exception :
raise GlobalException.GlobalException(message='Not possible to parse the request', logMessage=str(exception), status=HttpStatus.BAD_REQUEST)
return requestBodyAsJson
@Security.jwtRequired
def securedMethod(args, kwargs, contentType, resourceInstance, resourceInstanceMethod, requestClass, roleRequired) :
if not Security.getRole() in roleRequired :
raise GlobalException.GlobalException(message='Role not allowed', logMessage=f'''Role {Security.getRole()} trying to access denied resourse''', status=HttpStatus.FORBIDEN)
return notSecuredMethod(args, kwargs, contentType, resourceInstance, resourceInstanceMethod, requestClass)
def notSecuredMethod(args, kwargs, contentType, resourceInstance, resourceInstanceMethod, requestClass) :
if resourceInstanceMethod.__name__ in OpenApiManager.ABLE_TO_RECIEVE_BODY_LIST and requestClass :
requestBodyAsJson = getRequestBodyAsJson(contentType) ###- request.get_json()
if requestBodyAsJson :
serializerReturn = Serializer.convertFromJsonToObject(requestBodyAsJson, requestClass)
args = getArgsWithSerializerReturnAppended(serializerReturn, args, isControllerMethod=True)
return resourceInstanceMethod(resourceInstance,*args[1:],**kwargs)
@Function
def ControllerMethod(
url = c.SLASH,
requestClass = None,
responseClass = None,
roleRequired = None,
consumes = OpenApiManager.DEFAULT_CONTENT_TYPE,
produces = OpenApiManager.DEFAULT_CONTENT_TYPE
):
controllerMethodUrl = url
controllerMethodRequestClass = requestClass
controllerMethodResponseClass = responseClass
controllerMethodRoleRequired = roleRequired
controllerMethodProduces = produces
controllerMethodConsumes = consumes
def innerMethodWrapper(resourceInstanceMethod,*args,**kwargs) :
noException = None
log.debug(ControllerMethod,f'''wrapping {resourceInstanceMethod.__name__}''')
def innerResourceInstanceMethod(*args,**kwargs) :
resourceInstance = args[0]
try :
if roleRequired and (type(list()) == type(roleRequired) and not [] == roleRequired) :
completeResponse = securedMethod(args, kwargs, consumes, resourceInstance, resourceInstanceMethod, requestClass, roleRequired)
else :
completeResponse = notSecuredMethod(args, kwargs, consumes, resourceInstance, resourceInstanceMethod, requestClass)
validateResponseClass(responseClass, completeResponse[0])
except Exception as exception :
completeResponse = getCompleteResponseByException(exception, resourceInstance, resourceInstanceMethod)
###- request.method: GET
###- request.url: http://127.0.0.1:5000/alert/dingding/test?x=y
###- request.base_url: http://127.0.0.1:5000/alert/dingding/test
###- request.url_charset: utf-8
###- request.url_root: http://127.0.0.1:5000/
###- str(request.url_rule): /alert/dingding/test
###- request.host_url: http://127.0.0.1:5000/
###- request.host: 127.0.0.1:5000
###- request.script_root:
###- request.path: /alert/dingding/test
###- request.full_path: /alert/dingding/test?x=y
###- request.args: ImmutableMultiDict([('x', 'y')])
###- request.args.get('x'): y
controllerResponse = completeResponse[0]
status = completeResponse[1]
return jsonifyResponse(controllerResponse, produces, status)
overrideSignatures(innerResourceInstanceMethod, resourceInstanceMethod)
innerResourceInstanceMethod.url = controllerMethodUrl
innerResourceInstanceMethod.requestClass = controllerMethodRequestClass
innerResourceInstanceMethod.responseClass = controllerMethodResponseClass
innerResourceInstanceMethod.roleRequired = controllerMethodRoleRequired
innerResourceInstanceMethod.produces = controllerMethodProduces
innerResourceInstanceMethod.consumes = controllerMethodConsumes
return innerResourceInstanceMethod
return innerMethodWrapper
@Function
def validateArgs(args, requestClass, method) :
if requestClass :
resourceInstance = args[0]
if Serializer.isList(requestClass) :
for index in range(len(requestClass)) :
if Serializer.isList(args[index + 1]) and len(args[index + 1]) > 0 :
expecteObjectClass = requestClass[index][0]
for objectInstance in args[index + 1] :
GlobalException.validateArgs(resourceInstance, method, objectInstance, expecteObjectClass)
else :
objectRequest = args[index + 1]
expecteObjectClass = requestClass[index]
GlobalException.validateArgs(resourceInstance, method, objectRequest, expecteObjectClass)
else :
objectRequest = args[1]
expecteObjectClass = requestClass
GlobalException.validateArgs(resourceInstance, method, objectRequest, expecteObjectClass)
@Function
def Service() :
def Wrapper(OuterClass, *args, **kwargs):
apiInstance = getApi()
noException = None
log.debug(Service,f'''wrapping {OuterClass.__name__}''')
class InnerClass(OuterClass):
def __init__(self,*args,**kwargs):
log.debug(OuterClass,f'in {InnerClass.__name__}.__init__(*{args},**{kwargs})')
OuterClass.__init__(self,*args,**kwargs)
self.globals = apiInstance.globals
self.service = apiInstance.resource.service
self.repository = apiInstance.resource.repository
self.validator = apiInstance.resource.validator
self.mapper = apiInstance.resource.mapper
self.helper = apiInstance.resource.helper
self.converter = apiInstance.resource.converter
overrideSignatures(InnerClass, OuterClass)
return InnerClass
return Wrapper
@Function
def ServiceMethod(requestClass=None):
def innerMethodWrapper(resourceInstanceMethod,*args,**kwargs) :
noException = None
log.debug(ServiceMethod,f'''innerMethodWrapper wraped {resourceInstanceMethod.__name__}''')
def innerResourceInstanceMethod(*args,**kwargs) :
resourceInstance = args[0]
try :
validateArgs(args,requestClass,innerResourceInstanceMethod)
methodReturn = resourceInstanceMethod(*args,**kwargs)
except Exception as exception :
raiseGlobalException(exception, resourceInstance, resourceInstanceMethod)
return methodReturn
overrideSignatures(innerResourceInstanceMethod, resourceInstanceMethod)
return innerResourceInstanceMethod
return innerMethodWrapper
@Function
def Repository(model = None) :
repositoryModel = model
def Wrapper(OuterClass, *args, **kwargs):
apiInstance = getApi()
noException = None
log.debug(Repository,f'''wrapping {OuterClass.__name__}''')
class InnerClass(OuterClass):
model = repositoryModel
def __init__(self,*args,**kwargs):
log.debug(OuterClass,f'in {InnerClass.__name__}.__init__(*{args},**{kwargs})')
OuterClass.__init__(self,*args,**kwargs)
self.repository = apiInstance.repository
self.globals = apiInstance.globals
overrideSignatures(InnerClass, OuterClass)
return InnerClass
return Wrapper
@Function
def Validator() :
def Wrapper(OuterClass, *args, **kwargs):
apiInstance = getApi()
noException = None
log.debug(Validator,f'''wrapping {OuterClass.__name__}''')
class InnerClass(OuterClass):
def __init__(self,*args,**kwargs):
log.debug(OuterClass,f'in {InnerClass.__name__}.__init__(*{args},**{kwargs})')
OuterClass.__init__(self,*args,**kwargs)
self.service = apiInstance.resource.service
self.validator = apiInstance.resource.validator
self.helper = apiInstance.resource.helper
self.converter = apiInstance.resource.converter
overrideSignatures(InnerClass, OuterClass)
return InnerClass
return Wrapper
@Function
def ValidatorMethod(requestClass=None, message=None, logMessage=None) :
def innerMethodWrapper(resourceInstanceMethod,*args,**kwargs) :
noException = None
log.debug(ValidatorMethod,f'''wrapping {resourceInstanceMethod.__name__}''')
def innerResourceInstanceMethod(*args,**kwargs) :
resourceInstance = args[0]
try :
validateArgs(args,requestClass,innerResourceInstanceMethod)
methodReturn = resourceInstanceMethod(*args,**kwargs)
except Exception as exception :
raiseGlobalException(exception, resourceInstance, resourceInstanceMethod)
return methodReturn
overrideSignatures(innerResourceInstanceMethod, resourceInstanceMethod)
return innerResourceInstanceMethod
return innerMethodWrapper
@Function
def Mapper() :
def Wrapper(OuterClass, *args, **kwargs):
apiInstance = getApi()
noException = None
log.debug(Mapper,f'''wrapping {OuterClass.__name__}''')
class InnerClass(OuterClass):
def __init__(self,*args,**kwargs):
log.debug(OuterClass,f'in {InnerClass.__name__}.__init__(*{args},**{kwargs})')
OuterClass.__init__(self,*args,**kwargs)
self.service = apiInstance.resource.service
self.validator = apiInstance.resource.validator
self.mapper = apiInstance.resource.mapper
self.helper = apiInstance.resource.helper
self.converter = apiInstance.resource.converter
overrideSignatures(InnerClass, OuterClass)
return InnerClass
return Wrapper
@Function
def MapperMethod(requestClass=None, responseClass=None) :
def innerMethodWrapper(resourceInstanceMethod,*args,**kwargs) :
noException = None
log.debug(MapperMethod,f'''wrapping {resourceInstanceMethod.__name__}''')
def innerResourceInstanceMethod(*args,**kwargs) :
resourceInstance = args[0]
try :
validateArgs(args,requestClass,innerResourceInstanceMethod)
args = getArgsWithResponseClassInstanceAppended(args, responseClass)
methodReturn = resourceInstanceMethod(*args,**kwargs)
except Exception as exception :
raiseGlobalException(exception, resourceInstance, resourceInstanceMethod)
return methodReturn
overrideSignatures(innerResourceInstanceMethod, resourceInstanceMethod)
return innerResourceInstanceMethod
return innerMethodWrapper
@Function
def Helper() :
def Wrapper(OuterClass, *args, **kwargs):
apiInstance = getApi()
noException = None
log.debug(Helper,f'''wrapping {OuterClass.__name__}''')
class InnerClass(OuterClass,flask_restful.Resource):
def __init__(self,*args,**kwargs):
log.debug(OuterClass,f'in {InnerClass.__name__}.__init__(*{args},**{kwargs})')
OuterClass.__init__(self,*args,**kwargs)
self.helper = apiInstance.resource.helper
self.converter = apiInstance.resource.converter
overrideSignatures(InnerClass, OuterClass)
return InnerClass
return Wrapper
@Function
def HelperMethod(requestClass=None, responseClass=None) :
def innerMethodWrapper(resourceInstanceMethod,*args,**kwargs) :
noException = None
log.debug(HelperMethod,f'''wrapping {resourceInstanceMethod.__name__}''')
def innerResourceInstanceMethod(*args,**kwargs) :
resourceInstance = args[0]
try :
validateArgs(args,requestClass,innerResourceInstanceMethod)
args = getArgsWithResponseClassInstanceAppended(args, responseClass)
methodReturn = resourceInstanceMethod(*args,**kwargs)
except Exception as exception :
raiseGlobalException(exception, resourceInstance, resourceInstanceMethod)
return methodReturn
overrideSignatures(innerResourceInstanceMethod, resourceInstanceMethod)
return innerResourceInstanceMethod
return innerMethodWrapper
@Function
def Converter() :
def Wrapper(OuterClass, *args, **kwargs):
apiInstance = getApi()
noException = None
log.debug(Converter,f'''wrapping {OuterClass.__name__}''')
class InnerClass(OuterClass):
def __init__(self,*args,**kwargs):
log.debug(OuterClass,f'in {InnerClass.__name__}.__init__(*{args},**{kwargs})')
OuterClass.__init__(self,*args,**kwargs)
self.helper = apiInstance.resource.helper
self.converter = apiInstance.resource.converter
overrideSignatures(InnerClass, OuterClass)
return InnerClass
return Wrapper
@Function
def ConverterMethod(requestClass=None, responseClass=None) :
def innerMethodWrapper(resourceInstanceMethod,*args,**kwargs) :
noException = None
log.debug(ConverterMethod,f'''wrapping {resourceInstanceMethod.__name__}''')
def innerResourceInstanceMethod(*args,**kwargs) :
resourceInstance = args[0]
try :
validateArgs(args, requestClass, innerResourceInstanceMethod)
args = getArgsWithResponseClassInstanceAppended(args, responseClass)
methodReturn = resourceInstanceMethod(*args,**kwargs)
except Exception as exception :
raiseGlobalException(exception, resourceInstance, resourceInstanceMethod)
return methodReturn
overrideSignatures(innerResourceInstanceMethod, resourceInstanceMethod)
return innerResourceInstanceMethod
return innerMethodWrapper
| null |
python_framework/api/src/service/flask/FlaskManager.py
|
FlaskManager.py
|
py
| 25,290 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "python_helper.Constant.DOT_SPACE",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "python_helper.Constant",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "python_helper.Constant.LOG_CAUSE",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "python_helper.Constant.TAB",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "python_helper.Constant",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "python_helper.Constant.TAB",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "python_helper.Constant",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "python_helper.Constant.TAB",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "python_helper.Constant",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "flask.Response",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.helper.Serializer.jsonifyIt",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.helper.Serializer",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.helper.Serializer.isList",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.helper.Serializer",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.helper.Serializer.convertFromObjectToObject",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.helper.Serializer",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "python_helper.Constant.NOTHING",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "python_helper.Constant",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.overrideSignatures",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "app.globals",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "flask_restful.Api",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "flask_restful.__name__",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.service.GlobalException.handleLogErrorException",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.service.GlobalException",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "python_helper.log.error",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "webbrowser.open_new",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "python_helper.Constant.SLASH",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "python_helper.Constant",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "flask_restful.Resource",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "python_helper.log.debug",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "flask_restful.Resource.__init__",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "flask_restful.Resource",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.overrideSignatures",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.service.openapi.OpenApiManager.DEFAULT_CONTENT_TYPE",
"line_number": 265,
"usage_type": "attribute"
},
{
"api_name": "python_framework.api.src.service.openapi.OpenApiManager",
"line_number": 265,
"usage_type": "name"
},
{
"api_name": "flask.request.get_json",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.service.GlobalException.GlobalException",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.service.GlobalException",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.domain.HttpStatus.BAD_REQUEST",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "python_framework.api.src.domain.HttpStatus",
"line_number": 270,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.service.Security.getRole",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.service.Security",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.service.GlobalException.GlobalException",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.service.GlobalException",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.service.Security.getRole",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.service.Security",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.domain.HttpStatus.FORBIDEN",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "python_framework.api.src.domain.HttpStatus",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.service.Security.jwtRequired",
"line_number": 273,
"usage_type": "attribute"
},
{
"api_name": "python_framework.api.src.service.Security",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.service.openapi.OpenApiManager.ABLE_TO_RECIEVE_BODY_LIST",
"line_number": 280,
"usage_type": "attribute"
},
{
"api_name": "python_framework.api.src.service.openapi.OpenApiManager",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.helper.Serializer.convertFromJsonToObject",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.helper.Serializer",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "python_helper.Constant.SLASH",
"line_number": 289,
"usage_type": "attribute"
},
{
"api_name": "python_helper.Constant",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.service.openapi.OpenApiManager.DEFAULT_CONTENT_TYPE",
"line_number": 293,
"usage_type": "attribute"
},
{
"api_name": "python_framework.api.src.service.openapi.OpenApiManager",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.service.openapi.OpenApiManager.DEFAULT_CONTENT_TYPE",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "python_framework.api.src.service.openapi.OpenApiManager",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.overrideSignatures",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.helper.Serializer.isList",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.helper.Serializer",
"line_number": 346,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.helper.Serializer.isList",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.helper.Serializer",
"line_number": 348,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.service.GlobalException.validateArgs",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.service.GlobalException",
"line_number": 351,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.service.GlobalException.validateArgs",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.service.GlobalException",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.service.GlobalException.validateArgs",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.service.GlobalException",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 342,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 366,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 369,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.overrideSignatures",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 361,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 386,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.overrideSignatures",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 382,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 405,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 409,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.overrideSignatures",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 399,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 422,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 425,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.overrideSignatures",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 417,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 439,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.overrideSignatures",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 435,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 458,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.overrideSignatures",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 453,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 476,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.overrideSignatures",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 472,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 495,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 495,
"usage_type": "name"
},
{
"api_name": "flask_restful.Resource",
"line_number": 496,
"usage_type": "attribute"
},
{
"api_name": "python_helper.log.debug",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 498,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.overrideSignatures",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 490,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 510,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 510,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.overrideSignatures",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 506,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 529,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 532,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 532,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.overrideSignatures",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 524,
"usage_type": "name"
},
{
"api_name": "python_helper.log.debug",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "python_helper.log",
"line_number": 544,
"usage_type": "name"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.overrideSignatures",
"line_number": 554,
"usage_type": "call"
},
{
"api_name": "python_framework.api.src.annotation.MethodWrapper.Function",
"line_number": 540,
"usage_type": "name"
}
] |
424346879
|
# -*- coding: utf-8 -*-
import pytest
from chocolatepy import ChocolateApp, ChocolateServer, NonChocolateAppError
from webtest import TestApp
@pytest.fixture
def app_one():
app = ChocolateApp("one")
@app.route("/")
def index():
return "one"
return app
@pytest.fixture
def app_two():
app = ChocolateApp("two")
@app.route("/")
def index():
return "two"
return app
@pytest.fixture
def app_three():
app = ChocolateApp("three")
@app.route("/")
def index():
return "three"
return app
@pytest.fixture
def app_four():
app = ChocolateApp("four")
@app.route("/<value>")
def index(value):
return value
return app
def test_register_apps(app_one, app_two, app_three, app_four):
server = ChocolateServer()
server.register_apps(app_one, app_two, app_three, app_four)
app = TestApp(server.server)
assert app.get("/one").status == "200 OK"
assert app.get("/one").text == "one"
assert app.get("/two").status == "200 OK"
assert app.get("/two").text == "two"
assert app.get("/three").status == "200 OK"
assert app.get("/three").text == "three"
assert app.get("/four/foo").status == "200 OK"
assert app.get("/four/foo").text == "foo"
assert app.get("/one").text == app.get("/").text
def test_register_apps_with_default_app(app_one, app_two, app_three, app_four):
server = ChocolateServer()
server.register_apps(app_one, app_three, app_four, default_app=app_two)
app = TestApp(server.server)
assert app.get("/two").text == app.get("/").text
def test_register_non_chocolate_app(app_one, app_two, app_three, app_four):
server = ChocolateServer()
bad_app = "app_five"
try:
server.register_apps(app_one, app_two, app_three, app_four, bad_app)
assert False
except NonChocolateAppError:
assert True
def test_register_non_chocolate_app_as_default(app_one, app_two, app_three, app_four):
server = ChocolateServer()
bad_app = 5
try:
server.register_apps(app_one, app_two, app_three, app_four, default_app=bad_app)
assert False
except NonChocolateAppError:
assert True
| null |
tests/test_server.py
|
test_server.py
|
py
| 2,216 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "chocolatepy.ChocolateApp",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "chocolatepy.ChocolateApp",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "chocolatepy.ChocolateApp",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "chocolatepy.ChocolateApp",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "chocolatepy.ChocolateServer",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "webtest.TestApp",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "chocolatepy.ChocolateServer",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "webtest.TestApp",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "chocolatepy.ChocolateServer",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "chocolatepy.NonChocolateAppError",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "chocolatepy.ChocolateServer",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "chocolatepy.NonChocolateAppError",
"line_number": 104,
"usage_type": "name"
}
] |
631909505
|
import array as arr
import re
from matplotlib import pyplot as plt
from matplotlib import style
#creating an array with 'd' as double float
time = arr.array('d',[]); #array for time
value = arr.array('d',[]); #array for voltage/current
#reading file
filename = "rangkaian_RC.csv";
file = open(filename,'r');
#reading entire line in file eksternal
with open(filename,'r') as filehandle:
for line in filehandle:
#cek = re.split(" |\n",line); #splitting the line with the deliminator " " or "\n"
token = re.split(";",line); #splitting the line with the deliminator ";"
time.append(float(token[0]));
value.append(float(token[1]));
#user input
user_input = input();
#displaying the graphic
style.use('ggplot')
plt.scatter(time, value)#, align='center')
if((user_input == 'v') or (user_input == 'V')) :
plt.title('Voltage vs Time');
plt.ylabel('Voltage(V)');
plt.xlabel('Time(s)');
elif ((user_input == 'i') or (user_input == 'I')) :
plt.title('Current vs Time');
plt.ylabel('Current(A)');
plt.xlabel('Time(s)');
else :
print("Inputan user salah.\n");
plt.show()
| null |
plotting_math_graph.py
|
plotting_math_graph.py
|
py
| 1,185 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "array.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "array.array",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.style.use",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.style",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
}
] |
185123426
|
from django.contrib import admin
from .models import Post, Category, Tag, Product
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display = ['title','created_time','modified_time','category','author',]
class ProductAdmin(admin.ModelAdmin):
list_display = ['name','excerpt',]
admin.site.register(Post,PostAdmin)
admin.site.register(Category)
admin.site.register(Tag)
admin.site.register(Product,ProductAdmin)
admin.site.site_header = '瑞新药业'
admin.site.site_title = '瑞新药业'
| null |
blog/admin.py
|
admin.py
|
py
| 513 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Post",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.Category",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.Tag",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.Product",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 17,
"usage_type": "name"
}
] |
528888459
|
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils import timezone
from django.utils.http import urlquote
from django.utils.translation import gettext as _
from wagtail.admin import messages, signals
from wagtail.admin.action_menu import PageActionMenu
from wagtail.admin.views.pages.utils import get_valid_next_url_from_request
from wagtail.core import hooks
from wagtail.core.models import Page
def add_subpage(request, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
if not parent_page.permissions_for_user(request.user).can_add_subpage():
raise PermissionDenied
page_types = [
(model.get_verbose_name(), model._meta.app_label, model._meta.model_name)
for model in type(parent_page).creatable_subpage_models()
if model.can_create_at(parent_page)
]
# sort by lower-cased version of verbose name
page_types.sort(key=lambda page_type: page_type[0].lower())
if len(page_types) == 1:
# Only one page type is available - redirect straight to the create form rather than
# making the user choose
verbose_name, app_label, model_name = page_types[0]
return redirect('wagtailadmin_pages:add', app_label, model_name, parent_page.id)
return TemplateResponse(request, 'wagtailadmin/pages/add_subpage.html', {
'parent_page': parent_page,
'page_types': page_types,
'next': get_valid_next_url_from_request(request),
})
def create(request, content_type_app_name, content_type_model_name, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
parent_page_perms = parent_page.permissions_for_user(request.user)
if not parent_page_perms.can_add_subpage():
raise PermissionDenied
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
# Get class
page_class = content_type.model_class()
# Make sure the class is a descendant of Page
if not issubclass(page_class, Page):
raise Http404
# page must be in the list of allowed subpage types for this parent ID
if page_class not in parent_page.creatable_subpage_models():
raise PermissionDenied
if not page_class.can_create_at(parent_page):
raise PermissionDenied
for fn in hooks.get_hooks('before_create_page'):
result = fn(request, parent_page, page_class)
if hasattr(result, 'status_code'):
return result
page = page_class(owner=request.user)
edit_handler = page_class.get_edit_handler()
edit_handler = edit_handler.bind_to(request=request, instance=page)
form_class = edit_handler.get_form_class()
next_url = get_valid_next_url_from_request(request)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=page,
parent_page=parent_page)
if form.is_valid():
page = form.save(commit=False)
is_publishing = bool(request.POST.get('action-publish')) and parent_page_perms.can_publish_subpage()
is_submitting = bool(request.POST.get('action-submit')) and parent_page.has_workflow
if not is_publishing:
page.live = False
# Save page
parent_page.add_child(instance=page)
# Save revision
revision = page.save_revision(user=request.user, log_action=False)
# Publish
if is_publishing:
for fn in hooks.get_hooks('before_publish_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
revision.publish(user=request.user)
for fn in hooks.get_hooks('after_publish_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
# Submit
if is_submitting:
workflow = page.get_workflow()
workflow.start(page, request.user)
# Notifications
if is_publishing:
if page.go_live_at and page.go_live_at > timezone.now():
messages.success(request, _("Page '{0}' created and scheduled for publishing.").format(page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
else:
buttons = []
if page.url is not None:
buttons.append(messages.button(page.url, _('View live'), new_window=True))
buttons.append(messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit')))
messages.success(request, _("Page '{0}' created and published.").format(page.get_admin_display_title()), buttons=buttons)
elif is_submitting:
buttons = []
if page.is_previewable():
buttons.append(
messages.button(
reverse('wagtailadmin_pages:view_draft', args=(page.id,)),
_('View draft'),
new_window=True
),
)
buttons.append(
messages.button(
reverse('wagtailadmin_pages:edit', args=(page.id,)),
_('Edit')
)
)
messages.success(
request,
_("Page '{0}' created and submitted for moderation.").format(page.get_admin_display_title()),
buttons=buttons
)
else:
messages.success(request, _("Page '{0}' created.").format(page.get_admin_display_title()))
for fn in hooks.get_hooks('after_create_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if is_publishing or is_submitting:
# we're done here
if next_url:
# redirect back to 'next' url if present
return redirect(next_url)
# redirect back to the explorer
return redirect('wagtailadmin_explore', page.get_parent().id)
else:
# Just saving - remain on edit page for further edits
target_url = reverse('wagtailadmin_pages:edit', args=[page.id])
if next_url:
# Ensure the 'next' url is passed through again if present
target_url += '?next=%s' % urlquote(next_url)
return redirect(target_url)
else:
messages.validation_error(
request, _("The page could not be created due to validation errors"), form
)
has_unsaved_changes = True
else:
signals.init_new_page.send(sender=create, page=page, parent=parent_page)
form = form_class(instance=page, parent_page=parent_page)
has_unsaved_changes = False
edit_handler = edit_handler.bind_to(form=form)
return TemplateResponse(request, 'wagtailadmin/pages/create.html', {
'content_type': content_type,
'page_class': page_class,
'parent_page': parent_page,
'edit_handler': edit_handler,
'action_menu': PageActionMenu(request, view='create', parent_page=parent_page),
'preview_modes': page.preview_modes,
'form': form,
'next': next_url,
'has_unsaved_changes': has_unsaved_changes,
})
| null |
wagtail/admin/views/pages/create.py
|
create.py
|
py
| 8,112 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "wagtail.core.models.Page",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "django.core.exceptions.PermissionDenied",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.template.response.TemplateResponse",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.views.pages.utils.get_valid_next_url_from_request",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "wagtail.core.models.Page",
"line_number": 45,
"usage_type": "argument"
},
{
"api_name": "django.core.exceptions.PermissionDenied",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_by_natural_key",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType.DoesNotExist",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "wagtail.core.models.Page",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "django.http.Http404",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.PermissionDenied",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "django.core.exceptions.PermissionDenied",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "wagtail.core.hooks.get_hooks",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "wagtail.core.hooks",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "wagtail.admin.views.pages.utils.get_valid_next_url_from_request",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "wagtail.core.hooks.get_hooks",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "wagtail.core.hooks",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "wagtail.core.hooks.get_hooks",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "wagtail.core.hooks",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "wagtail.admin.messages.success",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages.button",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages.button",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages.button",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages.success",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages.button",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages.button",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages.success",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages.success",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "wagtail.core.hooks.get_hooks",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "wagtail.core.hooks",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "django.utils.http.urlquote",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages.validation_error",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.messages",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.signals.init_new_page.send",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.signals.init_new_page",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "wagtail.admin.signals",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "django.template.response.TemplateResponse",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.action_menu.PageActionMenu",
"line_number": 194,
"usage_type": "call"
}
] |
624926750
|
from tqdm import tqdm
import textdistance as tt
import pandas as pd
import concurrent.futures
import math
def AdjustYear(x):
if 100 > x > 20:
x = 1900 + x
elif x < 20:
2000 + x
return x
def sim_chooserController(sim_chooser, simple,dif_sim_per_column=False):
if sim_chooser is None and not dif_sim_per_column:
if not simple:
sim_chooser = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
else:
sim_chooser = [1, 2, 3, 4, 5, 6]
if sim_chooser is not None and dif_sim_per_column:
try:
for n,col in enumerate(sim_chooser):
if type(col) is not list:
sim_chooser[n] = list(col)
sim_chooser[n].sort()
except:
sim_chooser = [[]]
print("invalid sim_chooser")
if type(sim_chooser) is not list:
try:
sim_chooser = [sim_chooser]
except:
print("wrong sim_chooser declaration, should be in list type")
sim_chooser = []
if not dif_sim_per_column:
sim_chooser.sort()
return sim_chooser
def getFeaturesNames(columns, simple=False, sim_chooser=None,dif_sim_per_column=False):
'''
Have in input the collum names of the database and return an array
with all the names of the features who will be used in the matching.
The textdistance library have a function to each of this similarities
simple:
1-Hamming 2-Levenshtein 3-Damerau levenshtein 4-Needleman-Wunsch 5-Gotoh 6-Smith-Waterman
Not simples:
7-Jaccard 8-Tversky index 9-Overlap coefficient 10-Cosine 11-Monge-Elkan 12-Bag distance
13-Arithmetic coding 14 numerical distance
'''
sim_chooser = sim_chooserController(sim_chooser, simple,dif_sim_per_column)
col_names = ['id_l', 'id_r']
if not dif_sim_per_column:
for a in columns:
# Edit basedh
if 1 in sim_chooser:
col_names.append(a + featureMapper(1)) # Hamming
# col_names.append(a+'_mli') # Mlipns
if 2 in sim_chooser:
col_names.append(a + featureMapper(2)) # Levenshtein
# col_names.append(a+'_S95') # Strcmp95
# col_names.append(a+'_jW') # Jaro-Winkler
if 3 in sim_chooser:
col_names.append(a + featureMapper(3)) # damerau levenshtein
if 4 in sim_chooser:
col_names.append(a + featureMapper(4)) # Needleman-Wunsch
if 5 in sim_chooser:
col_names.append(a + featureMapper(5)) # Gotoh
if 6 in sim_chooser:
col_names.append(a + featureMapper(6)) # Smith-Waterman
if not simple:
# Token based
if 7 in sim_chooser:
col_names.append(a + featureMapper(7)) # jaccard
# col_names.append(a+'_sD') # Sørensen–Dice coefficient
if 8 in sim_chooser:
col_names.append(a + featureMapper(8)) # Tversky index
if 9 in sim_chooser:
col_names.append(a + featureMapper(9)) # Overlap coefficient
# col_names.append(a+'_td') #Tanimoto distance
if 10 in sim_chooser:
col_names.append(a + featureMapper(10)) # cosine
if 11 in sim_chooser:
col_names.append(a + featureMapper(11)) # Monge-Elkan
if 12 in sim_chooser:
col_names.append(a + featureMapper(12)) # Bag distance
# Compression based
if 13 in sim_chooser:
col_names.append(a + featureMapper(13)) # Arithmetic coding
# col_names.append(a+'_RLE') #RLE
# col_names.append(a+'_BRLE') #BWT RLE
# col_names.append(a+'_sqrt') #Square Root
# if sim_chooser == 14 or sim_chooser == 0:
# col_names.append(a + '_ent') # Entropy
# Phonetic
# col_names.append(a+'_MRA') #MRA
# col_names.append(a+'_edi') #Editex
# Numerical only
if 14 in sim_chooser:
col_names.append(a + featureMapper(14)) # MaxMin division
else:
for n,a in enumerate(columns):
# Edit basedh
if 1 in sim_chooser[n]:
col_names.append(a + featureMapper(1)) # Hamming
if 2 in sim_chooser[n]:
col_names.append(a + featureMapper(2)) # Levenshtein
if 3 in sim_chooser[n]:
col_names.append(a + featureMapper(3)) # damerau levenshtein
if 4 in sim_chooser[n]:
col_names.append(a + featureMapper(4)) # Needleman-Wunsch
if 5 in sim_chooser[n]:
col_names.append(a + featureMapper(5)) # Gotoh
if 6 in sim_chooser[n]:
col_names.append(a + featureMapper(6)) # Smith-Waterman
if not simple:
# Token based
if 7 in sim_chooser[n]:
col_names.append(a + featureMapper(7)) # jaccard
if 8 in sim_chooser[n]:
col_names.append(a + featureMapper(8)) # Tversky index
if 9 in sim_chooser[n]:
col_names.append(a + featureMapper(9)) # Overlap coefficient
if 10 in sim_chooser[n]:
col_names.append(a + featureMapper(10)) # cosine
if 11 in sim_chooser[n]:
col_names.append(a + featureMapper(11)) # Monge-Elkan
if 12 in sim_chooser[n]:
col_names.append(a + featureMapper(12)) # Bag distance
# Compression based
if 13 in sim_chooser[n]:
col_names.append(a + featureMapper(13)) # Arithmetic coding
# Numerical division
if 14 in sim_chooser[n]:
col_names.append(a + featureMapper(14)) # MaxMin division
return col_names
def getColSim(df1: pd.DataFrame, df2: pd.DataFrame, pairs, simple=False, sim_chooser=None, dif_sim_per_column=False):
'''
Calculate the similarity between a pair of rows of 2 different dataframes,
if simple variable is set to true then some similarities will not be calculated
simple:
1-Hamming 2-Levenshtein 3-Damerau levenshtein 4-Needleman-Wunsch 5-Gotoh 6-Smith-Waterman
Not simples:
7-Jaccard 8-Tversky index 9-Overlap coefficient 10-Cosine 11-Monge-Elkan 12-Bag distance
13-Arithmetic coding 14-minMax Calculator
* dif_sim_per_colum if True have sim_chooser as a list of lists
if false (default) have a sim chooser as a list or a number
'''
sim_chooser = sim_chooserController(sim_chooser, simple, dif_sim_per_column)
all_pairs_sim = []
for p in tqdm(pairs):
r0 = df1.loc[p[0]]
r1 = df2.loc[p[1]]
similarities = [p[0], p[1]]
for n,a in enumerate(df1.columns):
s0 = str(r0[a]).lower()
s1 = str(r1[a]).lower()
if(dif_sim_per_column):
sim=simCalculator(sim_chooser[n], s0, s1)
else:
sim = simCalculator(sim_chooser, s0, s1)
for s in sim:
similarities.append(s)
all_pairs_sim.append(similarities)
return all_pairs_sim
def simCalculator(sim_chooser, s0, s1):
similarities = []
if 1 in sim_chooser:
# Hamming
sim = tt.hamming.normalized_similarity(s0, s1)
similarities.append(sim)
if 2 in sim_chooser:
# Levenshtein
sim = tt.levenshtein.normalized_similarity(s0, s1)
similarities.append(sim)
if 3 in sim_chooser:
# Damerau levenshtein # this is slow
sim = tt.damerau_levenshtein.normalized_similarity(s0, s1)
##sim = pylev.damerau_levenshtein(s0,s1)
similarities.append(sim)
if 4 in sim_chooser:
# Needleman-Wunsch
sim = tt.needleman_wunsch.normalized_similarity(s0, s1)
similarities.append(sim)
if 5 in sim_chooser:
# Gotoh
sim = tt.gotoh.normalized_similarity(s0, s1)
similarities.append(sim)
if 6 in sim_chooser:
# Smith-Waterman
sim = tt.smith_waterman.normalized_similarity(s0, s1)
similarities.append(sim)
###Token Based
if 7 in sim_chooser:
# Jaccard
sim = tt.jaccard.normalized_similarity(s0, s1)
similarities.append(sim)
##Sørensen–Dice coefficient
# sim = tt.Sorensen.normalized_similarity(s0,s1)
# similarities.append(sim)
if 8 in sim_chooser:
# Tversky index
sim = tt.tversky.normalized_similarity(s0, s1)
similarities.append(sim)
if 9 in sim_chooser:
# Overlap coefficient
sim = tt.overlap.normalized_similarity(s0, s1)
similarities.append(sim)
##Tanimoto distance
# sim = tt.Tanimoto.normalized_similarity(s0,s1)
# similarities.append(sim)
if 10 in sim_chooser:
# Cosine
sim = tt.cosine.normalized_similarity(s0, s1)
similarities.append(sim)
if 11 in sim_chooser:
# Monge-Elkan
sim = tt.monge_elkan.normalized_similarity(s0, s1)
similarities.append(sim)
if 12 in sim_chooser:
# Bag distance
sim = tt.bag.normalized_similarity(s0, s1)
similarities.append(sim)
###Compression Based
if 13 in sim_chooser:
# Arithmetic coding
sim = tt.arith_ncd.normalized_similarity(s0, s1)
similarities.append(sim)
##RLE
# sim = tt.RLENCD.normalized_similarity(s0,s1)
# similarities.append(sim)
##BWT RLE
# sim = tt.BWTRLENCD.normalized_similarity(s0,s1)
# similarities.append(sim)
##Square Root
# sim = tt.sqrt_ncd.normalized_similarity(s0,s1)
# similarities.append(sim)
# if sim_chooser == 14 or sim_chooser == 0:
# Entropy
# sim = tt.entropy_ncd.normalized_similarity(s0, s1)
# similarities.append(sim)
if 14 in sim_chooser:
# minMax division (only for numbers)
try:
sim = min(float(s0),float(s1))/max(float(s0),float(s1))
if math.isnan(sim):
sim = 0
except:
sim = 0
similarities.append(sim)
return similarities
def parallelGetColSim(df1: pd.DataFrame, df2: pd.DataFrame, pairs, simple=False, sim_chooser=None
, dif_sim_per_column=False, process=4):
"""
Parallelized GetColSim
Calculate the similarity between a pair of rows of 2 different dataframes,
if simple variable is set to true then some similarities will not be calculated
simple:
1-Hamming 2-Levenshtein 3-Damerau levenshtein 4-Needleman-Wunsch 5-Gotoh 6-Smith-Waterman
Not simples:
7-Jaccard 8-Tversky index 9-Overlap coefficient 10-Cosine 11-Monge-Elkan 12-Bag distance
13-Arithmetic coding 14-MinMax division
* dif_sim_per_colum if True have sim_chooser as a list of lists
if false (default) have a sim chooser as a list or a number
"""
all_pairs_sim = []
list_of_pairs=list(pairs)
aux = round(len(list_of_pairs) / process)
with concurrent.futures.ProcessPoolExecutor() as executor:
results = []
for p in range(process):
if p == process-1:
results.append(executor.submit(getColSim, df1, df2, list_of_pairs[p * aux:], simple, sim_chooser
, dif_sim_per_column))
else:
results.append(executor.submit(getColSim, df1, df2, list_of_pairs[p * aux: (p + 1) * aux], simple,
sim_chooser, dif_sim_per_column))
for f in concurrent.futures.as_completed(results):
all_pairs_sim += f.result()
return all_pairs_sim
def featureMapper(n):
mapper = {
1: '_ham',
2: '_ls',
3: '_dLs',
4: '_NW',
5: '_go',
6: '_sW',
7: '_js',
8: '_tv',
9: '_oc',
10: '_cs',
11: '_mE',
12: '_bag',
13: '_ath',
14: '_div'
}
return mapper.get(n)
| null |
Prep/features_utills.py
|
features_utills.py
|
py
| 12,223 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.DataFrame",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "textdistance.hamming.normalized_similarity",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "textdistance.hamming",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "textdistance.levenshtein.normalized_similarity",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "textdistance.levenshtein",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "textdistance.damerau_levenshtein.normalized_similarity",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "textdistance.damerau_levenshtein",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "textdistance.needleman_wunsch.normalized_similarity",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "textdistance.needleman_wunsch",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "textdistance.gotoh.normalized_similarity",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "textdistance.gotoh",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "textdistance.smith_waterman.normalized_similarity",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "textdistance.smith_waterman",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "textdistance.jaccard.normalized_similarity",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "textdistance.jaccard",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "textdistance.tversky.normalized_similarity",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "textdistance.tversky",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "textdistance.overlap.normalized_similarity",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "textdistance.overlap",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "textdistance.cosine.normalized_similarity",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "textdistance.cosine",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "textdistance.monge_elkan.normalized_similarity",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "textdistance.monge_elkan",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "textdistance.bag.normalized_similarity",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "textdistance.bag",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "textdistance.arith_ncd.normalized_similarity",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "textdistance.arith_ncd",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "math.isnan",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "concurrent.futures.futures.ProcessPoolExecutor",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.futures",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "concurrent.futures",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "concurrent.futures.futures.as_completed",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.futures",
"line_number": 294,
"usage_type": "attribute"
},
{
"api_name": "concurrent.futures",
"line_number": 294,
"usage_type": "name"
}
] |
103334059
|
# encoding: utf-8
from __future__ import unicode_literals
import os
import tempfile
import unittest
from mock import Mock
from pdefc import CompilerException
from pdefc.lang.packages import PackageInfo
from pdefc.sources import UrlPackageSource, FilePackageSource, InMemoryPackageSource, PackageSources, UTF8, \
PackageSource
class TestPackageSources(unittest.TestCase):
def setUp(self):
self.tempfiles = []
def tearDown(self):
for path in self.tempfiles:
os.remove(path)
def test_add_source(self):
info = PackageInfo('test')
source = InMemoryPackageSource(info)
sources = PackageSources()
sources.add_source(source)
assert sources.get('test') is source
def test_add_path__file(self):
source = Mock()
source.package_name = 'test'
_, filename = tempfile.mkstemp('pdef-tests')
sources = PackageSources()
sources._create_file_source = lambda filename: source
sources.add_path(filename)
os.remove(filename)
assert sources.get('test') is source
def test_add_path__url(self):
source = Mock()
source.package_name = 'test'
sources = PackageSources()
sources._create_url_source = lambda url: source
sources.add_path('http://localhost:8080/test/api.yaml')
assert sources.get('test') is source
def test_get__not_found(self):
sources = PackageSources()
self.assertRaises(CompilerException, sources.get, 'absent')
class TestFilePackageSource(unittest.TestCase):
def test(self):
# Given a fixture package info and files.
info = PackageInfo('project_api', modules=['users', 'users.events'])
files = {
'../../test.yaml': info.to_yaml(),
'../../users.pdef': 'users module',
'../../users/events.pdef': 'events module'
}
# Create a package source.
source = FilePackageSource('../../test.yaml')
source._read_file = lambda filepath: files[filepath]
source._read()
# The source should read the info and the modules.
assert source.package_name == 'project_api'
assert source.package_info.to_dict() == info.to_dict()
assert source.module_sources[0].name == 'users'
assert source.module_sources[1].name == 'users.events'
assert source.module_sources[0].data == 'users module'
assert source.module_sources[1].data == 'events module'
class TestUrlPackageSource(unittest.TestCase):
def test_module(self):
# Given a fixture package info and urls.
info = PackageInfo('project_api', modules=['users', 'users.events'])
urls = {
'http://localhost/project/api/api.yaml': info.to_yaml(),
'http://localhost/project/api/users.pdef': 'users module',
'http://localhost/project/api/users/events.pdef': 'events module'
}
# Create a package source.
source = UrlPackageSource('http://localhost/project/api/api.yaml')
source._fetch_url = lambda url: urls[url]
# The source should read the info and the modules.
assert source.package_name == 'project_api'
assert source.package_info.to_dict() == info.to_dict()
assert source.module_sources[0].name == 'users'
assert source.module_sources[1].name == 'users.events'
assert source.module_sources[0].data == 'users module'
assert source.module_sources[1].data == 'events module'
def test_module_url(self):
source = UrlPackageSource('http://localhost:8080/project/api/api.yaml')
path = source._module_url('users.internal.events')
assert path == 'http://localhost:8080/project/api/users/internal/events.pdef'
def test_fetch_unicode(self):
# Given a UTF-8 encoded URL source.
class File(object):
def read(self):
return 'Привет, как дела?'.encode(UTF8)
# Download the source.
source = UrlPackageSource('http://localhost/test.yaml')
source._download = lambda url: File()
# The data should be decoded as UTF8
data = source._fetch_url('http://localhost/')
assert data == 'Привет, как дела?'
| null |
src/pdefc/tests/test_sources.py
|
test_sources.py
|
py
| 4,299 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "unittest.TestCase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pdefc.lang.packages.PackageInfo",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pdefc.sources.InMemoryPackageSource",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pdefc.sources.PackageSources",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tempfile.mkstemp",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pdefc.sources.PackageSources",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pdefc.sources.PackageSources",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pdefc.sources.PackageSources",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pdefc.CompilerException",
"line_number": 53,
"usage_type": "argument"
},
{
"api_name": "unittest.TestCase",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "pdefc.lang.packages.PackageInfo",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pdefc.sources.FilePackageSource",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "pdefc.lang.packages.PackageInfo",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pdefc.sources.UrlPackageSource",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "pdefc.sources.UrlPackageSource",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "pdefc.sources.UTF8",
"line_number": 115,
"usage_type": "argument"
},
{
"api_name": "pdefc.sources.UrlPackageSource",
"line_number": 118,
"usage_type": "call"
}
] |
336246661
|
import json
import os
import datetime
import argparse
from duplocli.terraform.aws.common.tf_utils import TfUtils
from duplocli.terraform.aws.step1.aws_create_tfstate_step1 import AwsCreateTfstateStep1
from duplocli.terraform.aws.step1.get_aws_object_list import GetAwsObjectList
from duplocli.terraform.aws.step2.aws_tf_import_step2 import AwsTfImportStep2
from duplocli.terraform.aws.common.tf_file_utils import TfFileUtils
import psutil
class ImportParameters:
tenant_name = None
def __init__(self, parameters):
self.tenant_name = self.get_key(parameters, 'tenant_name')
self.aws_region = self.get_key(parameters, 'aws_region')
self.zip_folder = self.get_key(parameters, 'zip_folder')
self.zip_file_path = self.get_key(parameters, 'zip_file_path')
self.import_name = self.get_key(parameters, 'import_name')
self.download_aws_keys = self.get_key(parameters, 'download_aws_keys')
self.url = self.get_key(parameters, 'url')
self.tenant_id = self.get_key(parameters, 'tenant_id')
self.api_token = self.get_key(parameters, 'api_token')
self.params_json_file_path = self.get_key(parameters, 'params_json_file_path')
self.temp_folder = self.get_key(parameters, 'temp_folder')
self.tenant_with_prefix = self.get_key(parameters, 'tenant_with_prefix')
self.state_file = self.get_key(parameters, 'state_file')
def get_key(self, parameters, key):
if key in parameters:
return parameters[key]
return None
class AwsParseParams:
def __init__(self):
self.file_utils = TfFileUtils(self.get_default_params(), step="step1")
######## ####
def check_required_fields(self,parameters, required_fields):
for required_field in required_fields:
if parameters[required_field] is None:
fields=",".join(required_fields)
print("missing required_fields = " + parameters)
print(self.get_help())
raise Exception("missing required_fields = " +fields)
def resolve_parameters(self, parsed_args):
parameters = self.app_defaults(parsed_args)
# validate params
required_fields = ["tenant_name", "aws_region"]
self.check_required_fields(parameters, required_fields)
if parameters["download_aws_keys"] == "yes":
required_fields=["url","tenant_id","api_token"]
self.check_required_fields(parameters, required_fields)
params = ImportParameters(parameters)
# if params.zip_file_path is None:
if params.import_name is None:
now = datetime.datetime.now()
now_str = now.strftime("%m-%d-%Y--%H-%M-%S")
params.import_name = now_str
if self.parameters_default["temp_folder"] == params.temp_folder:
#append import_name to zip_file_path, zip_folder, temp_folder
params.temp_folder = os.path.join(params.temp_folder, params.tenant_name, params.import_name)
params.zip_folder = os.path.join(params.temp_folder, "zip")
if params.zip_file_path is None:
params.zip_file_path = os.path.join(params.zip_folder, params.import_name)
print("temp_folder ***** ", params.temp_folder)
print("zip_folder ***** ", params.zip_folder)
print("zip_file_path ***** ", os.path.abspath(params.zip_file_path+".zip") )
return params
######## ####
def get_default_params(self):
file_utils = TfFileUtils(None, step=None, set_temp_and_zip_folders=False)
parameters = file_utils.load_json_file("import_tf_parameters_default.json")
self.parameters_default = parameters
params = ImportParameters(parameters)
return params
def get_help(self):
return """
argument to python file
[-t / --tenant_id TENANTID] -- TenantId e.g. 97a833a4-2662-4e9c-9867-222565ec5cb6
[-n / --tenant_name TENANTNAME] -- TenantName e.g. webdev
[-r / --aws_region AWSREGION] -- AWSREGION e.g. us-west2
[-a / --api_token APITOKEN] -- Duplo API Token
[-u / --url URL] -- Duplo URL e.g. https://msp.duplocloud.net
[-k / --download_aws_keys DOWNLOADKEYS] -- Aws keypair=yes/no, private key used for ssh into EC2 servers
[-z / --zip_folder ZIPFOLDER] -- folder to save imported files in zip format
self.import_name = self.get_key(parameters, 'import_name')
[-i / --import_name IMPORTNAME] -- import name and zip file path are mutually exclusive. import name will create sub folders and zip file with same name.
[-o / --zip_file_path ZIPFILEPATH] -- zip file path to save imported terraform files in zip format
[-j / --params_json_file_path PARAMSJSONFILE] -- All params passed in single JSON file
[-h / --help HELP] -- help
OR alternately
pass the above parameters in single json file
[-j/--params_json_file_path PARAMSJSONFILE] = FOLDER/terraform_import_json.json
terraform_import_json.json
{
"tenant_name": "xxxxxx",
"aws_region": "xxxxxx",
"zip_folder": "zip",
"download_aws_keys": "yes",
"url": "https://xxx.duplocloud.net",
"tenant_id": "xxx-2662-4e9c-9867-9a4565ec5cb6",
"api_token": "xxxxxx",
"import_name":"UNIQUE_NAME"
}
OR alternately
pass the above parameters in ENV variables
export tenant_name="xxxxxx"
export aws_region="xxxxxx"
export zip_folder="zip",
export download_aws_keys="yes",
export url="https://xxx.duplocloud.net",
export tenant_id="xxx-2662-4e9c-9867-9a4565ec5cb6",
export api_token="xxxxxx"
export zip_file_path="/tmp/NAMe.zip" or export import_name="UNIQUE_NAME"
Sequence of parameters evaluation is: default -> ENV -> JSON_FILE -> arguments
parameters in argument
-> override parameters in terraform_import_json
AND parameters in terraform_import_json
-> override parameters in ENV variables
AND parameters in ENV variables
-> override default values (import_tf_parameters_default.json)
"""
######## ####
def get_parser(self):
help_str = self.get_help()
# parser = argparse.ArgumentParser(prog='AwsTfImport',add_help=False)
# parser = argparse.ArgumentParser(description="Download Terraform state files.", argument_default=argparse.SUPPRESS,
# allow_abbrev=False, add_help=False)
parser = argparse.ArgumentParser(description="Download Terraform state files.", usage=self.get_help())
parser.add_argument('-t', '--tenant_id', action='store', dest='tenant_id')
parser.add_argument('-n', '--tenant_name', action='store', dest='tenant_name')
parser.add_argument('-r', '--aws_region', action='store', dest='aws_region')
parser.add_argument('-a', '--api_token', action='store', dest='api_token')
parser.add_argument('-u', '--url', action='store', dest='url')
parser.add_argument('-k', '--download_aws_keys', action='store', dest='download_keys')
parser.add_argument('-z', '--zip_folder', action='store', dest='zip_folder')
parser.add_argument('-i', '--import_name', action='store', dest='import_name')
parser.add_argument('-o', '--zip_file_path', action='store', dest='zip_file_path')
parser.add_argument('-j', '--params_json_file_path', action='store', dest='params_json_file_path')
# parser.add_argument('-h', '--help', action='help' , help=" params usage")
return parser
######## ####
def app_defaults(self, parsed_args):
parameters = self.file_utils.load_json_file("import_tf_parameters_default.json")
print("########## default parameters ########## ")
for key in parameters:
print(" default parameter values", key, parameters[key])
print("########## passed as environ variables ########## ")
for key in parameters:
if key in os.environ:
print(" override parameter by passed as environ variable ", key, os.environ[key])
val = os.environ[key]
parameters[key] = val
print("########## params_json_file_path parameters ########## ")
if parsed_args.params_json_file_path is not None:
print("params_json_file_path ", parsed_args.params_json_file_path)
parameters_json = self.file_utils.load_json_file(parsed_args.params_json_file_path)
for key in parameters_json:
print(" params_json_file_path parameter values", key, parameters_json[key])
parameters[key] = parameters_json[key]
print("########## passed as arguments parameters ########## ")
for key, val in vars(parsed_args).items():
if val is not None:
print(" override parameter by passed in arguments ", key, val)
parameters[key] = val
print("########## final parameters ########## ")
for key in parameters:
print("final", key, parameters[key])
return parameters
| null |
duplocli/terraform/aws/aws_parse_params.py
|
aws_parse_params.py
|
py
| 9,444 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "duplocli.terraform.aws.common.tf_file_utils.TfFileUtils",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "duplocli.terraform.aws.common.tf_file_utils.TfFileUtils",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 186,
"usage_type": "attribute"
}
] |
392625329
|
"""
Holds handling functions for user operations.
Uses a floating instance of the database client that is instanciated in
the `config.db` module like all other `util` modules.
"""
from config.db import get_database, get_database_client_name
from models import exceptions
import models.users as user_models
# instanciate the main collection to use for this util file for convenience
def users_collection():
return get_database()[get_database_client_name()]["users"]
async def register_user(
form: user_models.UserRegistrationForm) -> user_models.UserId:
"""
Register a user registration form to the database and return it's user ID.
"""
# cast input form (python class) -> dictionary (become JSON eventually)
form_dict = form.dict()
# insert id into column
users_collection().insert_one(form_dict)
# return user_id if success
return form_dict["_id"]
async def get_user_info_by_identifier(
identifier: user_models.UserIdentifier) -> user_models.User:
"""
Returns a User object by it's given identifier.
"""
query = identifier.get_database_query()
#query to database
user_document = users_collection().find_one(query)
if not user_document:
raise exceptions.UserNotFoundException
# cast the database response into a User object
return user_models.User(**user_document)
async def delete_user(identifier: user_models.UserIdentifier) -> None:
"""
Deletes a user by it's identifier
"""
query = identifier.get_database_query()
response = users_collection().delete_one(query)
if response.deleted_count == 0:
detail = "User not found and could not be deleted"
raise exceptions.UserNotFoundException(detail=detail)
| null |
util/users.py
|
users.py
|
py
| 1,757 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "config.db.get_database",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "config.db.get_database_client_name",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.users.UserRegistrationForm",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "models.users",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "models.users.UserId",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "models.users.UserIdentifier",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "models.users",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "models.exceptions.UserNotFoundException",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "models.exceptions",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "models.users.User",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "models.users",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "models.users.User",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "models.users.UserIdentifier",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "models.users",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "models.exceptions.UserNotFoundException",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "models.exceptions",
"line_number": 57,
"usage_type": "name"
}
] |
367157607
|
from sklearn import neighbors
import pickle
import ProcessImage as image
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
#k-legkozelebbi szomszed modszere
class train(object):
def __init__(self, neigh_numbers, first_two=False, features=[]):
self.neigh_numbers=neigh_numbers
self.first_two=first_two
self.features=features
def train(self, X, y):
if self.first_two:
X=X[:, :2]
for weight in ['uniform', 'distance']:
self.neigh=neighbors.KNeighborsClassifier(self.neigh_numbers, weights=weight)
self.neigh.fit(X, y)
name='out/kneigh_'+str(weight)+'_'+str(self.neigh_numbers)+'.pkl'
#with open(name, 'wb') as f:
# f.flush()
# pickle.dump(self.neigh, f)
def predict(self, X):
if self.first_two:
X=X[:, :2]
#list=[X]
return self.neigh.predict(X)
def test(self):
im=image.ProcessImages('test', two_value=self.first_two, list_=self.features)
X,Y=im.make_all()
if self.first_two:
X=X[:, :2]
return self.neigh.score(X, Y)
def valid(self):
im=image.ProcessImages('validation', two_value=self.first_two, list_=self.features)
X, Y = im.make_all()
if self.first_two:
X=X[:, :2]
return self.neigh.score(X, Y)
def load(self, neigh_numbers, type='distance'):
filename= name='out/kneigh_'+type+'_'+str(self.neigh_numbers)+'.pkl'
with open(filename, 'rb') as f:
self.neigh=pickle.load(f)
#resource:
def train_first_two_features(self, X, y, weight='uniform'):
X=X[:, :2]
self.neigh=neighbors.KNeighborsClassifier(self.neigh_numbers, weights=weight)
self.neigh.fit(X, y)
x_min, x_max= X[:, 0].min()-1, X[:, 0].max()+1
y_min, y_max= X[:, 1].min()-1, X[:, 1].max()+1
x_, y_ = np.meshgrid(np.arange(x_min, x_max, 0.005),
np.arange(y_min, y_max, 0.005))
Z = self.neigh.predict(np.c_[x_.ravel(), y_.ravel()])
Z=Z.reshape(x_.shape)
plt.figure()
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
plt.pcolormesh(x_, y_, Z, cmap=cmap_light)
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold,
edgecolor='k', s=20)
plt.xlim(x_.min(), x_.max())
plt.ylim(y_.min(), y_.max())
plt.show()
def test_train_two_features(self, testX, testY, trainX, trainY, weight='uniform'):
trainX=trainX[:, :2]
testX=testX[:, :2]
self.neigh = neighbors.KNeighborsClassifier(self.neigh_numbers, weights=weight)
self.neigh.fit(trainX, trainY)
x_min, x_max = trainX[:, 0].min() - 1, trainX[:, 0].max() + 1
y_min, y_max = trainX[:, 1].min() - 1, trainX[:, 1].max() + 1
x_, y_ = np.meshgrid(np.arange(x_min, x_max, 0.004),
np.arange(y_min, y_max, 0.004))
Z = self.neigh.predict(np.c_[x_.ravel(), y_.ravel()])
Z = Z.reshape(x_.shape)
plt.figure()
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
plt.pcolormesh(x_, y_, Z, cmap=cmap_light)
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
plt.scatter(testX[:, 0], testX[:, 1], c=testY, cmap=cmap_bold,
edgecolor='k', s=20)
plt.xlim(x_.min(), x_.max())
plt.ylim(y_.min(), y_.max())
plt.show()
| null |
beadando-tovabbi-modszerekkel/train_k_neighbours.py
|
train_k_neighbours.py
|
py
| 3,621 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "ProcessImage.ProcessImages",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "ProcessImage.ProcessImages",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "numpy.meshgrid",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.c_",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.ListedColormap",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pcolormesh",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.ListedColormap",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "numpy.meshgrid",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.c_",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.ListedColormap",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pcolormesh",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.ListedColormap",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
}
] |
243717432
|
# coding: utf-8
# Author: F. Alex Wolf (http://falexwolf.de)
"""tSNE
Notes
-----
This module automatically choose from three t-SNE versions from
- sklearn.manifold.TSNE
- Dmitry Ulyanov (multicore, fastest)
https://github.com/DmitryUlyanov/Multicore-TSNE
install via 'pip install psutil cffi', get code from github
"""
import numpy as np
from ..tools.pca import pca
from .. import settings as sett
from .. import logging as logg
def tsne(adata, random_state=0, n_pcs=50, perplexity=30, n_jobs=None, copy=False):
u"""tSNE
Reference
---------
L.J.P. van der Maaten and G.E. Hinton.
Visualizing High-Dimensional Data Using t-SNE.
Journal of Machine Learning Research 9(Nov):2579-2605, 2008.
Parameters
----------
adata : AnnData
Annotated data matrix, optionally with adata.smp['X_pca'], which is
written when running sc.pca(adata). Is directly used for tSNE.
random_state : unsigned int or -1, optional (default: 0)
Change to use different intial states for the optimization, if -1, use
default behavior of implementation (sklearn uses np.random.seed,
Multicore-TSNE produces a new plot at every call).
n_pcs : int, optional (default: 50)
Number of principal components in preprocessing PCA.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
n_jobs : int or None (default: None)
Use the multicore implementation, if it is installed. Defaults to
sett.n_jobs.
Notes
-----
X_tsne : np.ndarray of shape n_samples x 2
Array that stores the tSNE representation of the data. Analogous
to X_pca, X_diffmap and X_spring.
is added to adata.smp.
"""
logg.m('compute tSNE', r=True)
adata = adata.copy() if copy else adata
# preprocessing by PCA
if 'X_pca' in adata.smp and adata.smp['X_pca'].shape[1] >= n_pcs:
X = adata.smp['X_pca'][:, :n_pcs]
logg.m('... using X_pca for tSNE')
else:
if n_pcs > 0 and adata.X.shape[1] > n_pcs:
logg.m('... preprocess using PCA with', n_pcs, 'PCs')
logg.m('avoid this by setting n_pcs = 0', v='hint')
X = pca(adata.X, random_state=random_state, n_comps=n_pcs)
adata.smp['X_pca'] = X
else:
X = adata.X
logg.m('... using', n_pcs, 'principal components')
# params for sklearn
params_sklearn = {'perplexity': perplexity,
'random_state': None if random_state == -1 else random_state,
'verbose': sett.verbosity,
'learning_rate': 200,
'early_exaggeration': 12,
# 'method': 'exact'
}
n_jobs = sett.n_jobs if n_jobs is None else n_jobs
# deal with different tSNE implementations
multicore_failed = False
if n_jobs > 1:
try:
from MulticoreTSNE import MulticoreTSNE as TSNE
tsne = TSNE(n_jobs=n_jobs, **params_sklearn)
logg.m('... using MulticoreTSNE')
X_tsne = tsne.fit_transform(X.astype(np.float64))
except ImportError:
multicore_failed = True
sett.m(0, '--> did not find package MulticoreTSNE: to speed up the computation install it from\n'
' https://github.com/DmitryUlyanov/Multicore-TSNE')
if n_jobs == 1 or multicore_failed:
from sklearn.manifold import TSNE
tsne = TSNE(**params_sklearn)
logg.m('consider installing the package MulticoreTSNE from\n'
' https://github.com/DmitryUlyanov/Multicore-TSNE\n'
' Even for `n_jobs=1` this speeds up the computation considerably.',
v='hint')
logg.m('... using sklearn.manifold.TSNE')
X_tsne = tsne.fit_transform(X)
# update AnnData instance
adata.smp['X_tsne'] = X_tsne
logg.m('finished', t=True, end=' ')
logg.m('and added\n'
' "X_tsne" coordinates, the tSNE representation of X (adata.smp)')
return adata if copy else None
| null |
scanpy/tools/tsne.py
|
tsne.py
|
py
| 4,394 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "tools.pca.pca",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "MulticoreTSNE.MulticoreTSNE",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "sklearn.manifold.TSNE",
"line_number": 95,
"usage_type": "call"
}
] |
80493053
|
from collections import Counter
from decimal import Decimal
from ouretf.accounts.brokerage_account import BrokerageAccount
from ouretf.accounts.exceptions import NotEnoughFundsException, NotEnoughHoldingsException, OrderAlreadyCanceled, \
OrderDoesNotExist
from ouretf.accounts.mock.orders import MockBuyOrder, MockSellOrder
from ouretf.accounts.mock.ticker import MockTicker
class MockAccount(BrokerageAccount):
"""
A mock brokerage account.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.buy_orders = []
self.open_buy_orders = []
self.sell_orders = []
self.open_sell_orders = []
self.orders = {}
self.holdings = Counter()
self.cash = 0
self.unsettled_cash = 0
self.reserved_cash = 0 # used for unfilled orders
self.reserved_shares = Counter()
self.cash_settlements = []
self.tickers = {}
def buy_logic(self, symbol, limit, amount):
order = MockBuyOrder(symbol, limit, amount)
self.buy_orders.append(order)
self.open_buy_orders.append(order)
self.reserved_cash += limit * amount
self.cash -= limit * amount
self.orders[order.order_number] = order
return order.order_number
def sell_logic(self, symbol, limit, amount):
order = MockSellOrder(symbol, limit, amount)
self.sell_orders.append(order)
self.open_sell_orders.append(order)
self.reserved_shares[symbol] += amount
self.orders[order.order_number] = order
return order.order_number
def check_buy_preconditions(self, symbol, limit, amount):
needed_cash = limit * amount
if needed_cash > self.cash:
raise NotEnoughFundsException(needed_cash, self.cash)
def check_sell_preconditions(self, symbol, limit, amount):
num_have = self.holdings.get(symbol, 0)
reserved_amount = self.reserved_shares.get(symbol, 0)
if amount > num_have - reserved_amount:
raise NotEnoughHoldingsException(symbol, amount, num_have, reserved_amount)
def set_ticker_info(self, symbol, **kwargs):
symbol = symbol.upper()
ticker_info = self.tickers.get(symbol)
if not ticker_info:
ticker_info = MockTicker(symbol, **kwargs)
else:
ticker_info.set_info(**kwargs)
self.tickers[symbol] = ticker_info
self._execute_orders()
def _execute_orders(self):
for order in self.open_buy_orders:
ask_price = self.tickers[order.symbol].ask
if ask_price <= order.limit:
self._execute_purchase(order, ask_price)
for order in self.open_sell_orders:
bid_price = self.tickers[order.symbol].bid
if bid_price >= order.limit:
self._execute_sale(order, bid_price)
def _execute_purchase(self, order, ask_price):
if not isinstance(ask_price, Decimal):
ask_price = Decimal(ask_price)
cost = order.execute(ask_price)
symbol = order.symbol
self.reserved_cash -= order.reserved_cash
self.cash += order.reserved_cash - cost
self.holdings[symbol] += order.amount
self.open_buy_orders.remove(order)
def _execute_sale(self, order, bid_price):
if not isinstance(bid_price, Decimal):
bid_price = Decimal(bid_price)
order.execute(bid_price)
symbol = order.symbol
self.unsettled_cash += order.amount * bid_price
self.holdings[symbol] -= order.amount
self.reserved_shares[symbol] -= order.amount
self.open_sell_orders.remove(order)
def cancel_order_logic(self, order_number):
for order in self.open_buy_orders:
if order.order_number == order_number:
self.reserved_cash -= order.reserved_cash
self.cash += order.reserved_cash
self.open_buy_orders.remove(order)
order.cancel()
return
for order in self.open_sell_orders:
if order.order_number == order_number:
self.reserved_shares[order.symbol] -= order.amount
self.open_sell_orders.remove(order)
order.cancel()
return
existing_order = self.orders.get(order_number)
if existing_order:
assert existing_order.canceled
raise OrderAlreadyCanceled(order_number)
raise OrderDoesNotExist(order_number)
| null |
ouretf/accounts/mock/account.py
|
account.py
|
py
| 4,536 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "ouretf.accounts.brokerage_account.BrokerageAccount",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "collections.Counter",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "ouretf.accounts.mock.orders.MockBuyOrder",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "ouretf.accounts.mock.orders.MockSellOrder",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "ouretf.accounts.exceptions.NotEnoughFundsException",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "ouretf.accounts.exceptions.NotEnoughHoldingsException",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "ouretf.accounts.mock.ticker.MockTicker",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 82,
"usage_type": "argument"
},
{
"api_name": "decimal.Decimal",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 92,
"usage_type": "argument"
},
{
"api_name": "decimal.Decimal",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "ouretf.accounts.exceptions.OrderAlreadyCanceled",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "ouretf.accounts.exceptions.OrderDoesNotExist",
"line_number": 119,
"usage_type": "call"
}
] |
261816346
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import click
import requests
import yamale
import yaml
from betterboto import client as betterboto_client
from servicecatalog_puppet import asset_helpers
from servicecatalog_puppet import config
from servicecatalog_puppet import constants
from servicecatalog_puppet import manifest_utils
logger = logging.getLogger(__name__)
def expand(f, puppet_account_id, single_account, subset=None):
click.echo("Expanding")
manifest = manifest_utils.load(f, puppet_account_id)
org_iam_role_arn = config.get_org_iam_role_arn(puppet_account_id)
if org_iam_role_arn is None:
click.echo("No org role set - not expanding")
new_manifest = manifest
else:
click.echo("Expanding using role: {}".format(org_iam_role_arn))
with betterboto_client.CrossAccountClientContextManager(
"organizations", org_iam_role_arn, "org-iam-role"
) as client:
new_manifest = manifest_utils.expand_manifest(manifest, client)
click.echo("Expanded")
if single_account:
click.echo(f"Filtering for single account: {single_account}")
for account in new_manifest.get("accounts", []):
if str(account.get("account_id")) == str(single_account):
click.echo(f"Found single account: {single_account}")
new_manifest["accounts"] = [account]
break
click.echo("Filtered")
new_manifest = manifest_utils.rewrite_depends_on(new_manifest)
new_manifest = manifest_utils.rewrite_ssm_parameters(new_manifest)
new_manifest = manifest_utils.rewrite_stacks(new_manifest, puppet_account_id)
if subset:
click.echo(f"Filtering for subset: {subset}")
new_manifest = manifest_utils.isolate(
manifest_utils.Manifest(new_manifest), subset
)
manifest_accounts_all = [
{"account_id": a.get("account_id"), "email": a.get("email")}
for a in new_manifest.get("accounts", [])
]
manifest_accounts_excluding = [
a for a in manifest_accounts_all if a.get("account_id") != puppet_account_id
]
dumped = json.dumps(new_manifest)
dumped = dumped.replace(
"${AWS::ManifestAccountsAll}", json.dumps(manifest_accounts_all).replace('"', '\\"')
)
dumped = dumped.replace(
"${AWS::ManifestAccountsSpokes}", json.dumps(manifest_accounts_excluding).replace('"', '\\"')
)
new_manifest = json.loads(dumped)
if new_manifest.get(constants.LAMBDA_INVOCATIONS) is None:
new_manifest[constants.LAMBDA_INVOCATIONS] = dict()
home_region = config.get_home_region(puppet_account_id)
with betterboto_client.ClientContextManager("ssm") as ssm:
response = ssm.get_parameter(Name="service-catalog-puppet-version")
version = response.get("Parameter").get("Value")
new_manifest["config_cache"] = dict(
home_region=home_region,
regions=config.get_regions(puppet_account_id, home_region),
should_collect_cloudformation_events=config.get_should_use_sns(
puppet_account_id, home_region
),
should_forward_events_to_eventbridge=config.get_should_use_eventbridge(
puppet_account_id, home_region
),
should_forward_failures_to_opscenter=config.get_should_forward_failures_to_opscenter(
puppet_account_id, home_region
),
puppet_version=version,
)
new_name = f.name.replace(".yaml", "-expanded.yaml")
logger.info("Writing new manifest: {}".format(new_name))
with open(new_name, "w") as output:
output.write(yaml.safe_dump(new_manifest, default_flow_style=False))
def explode(f):
logger.info("Exploding")
puppet_account_id = config.get_puppet_account_id()
original_name = f.name
expanded_output = f.name.replace(".yaml", "-expanded.yaml")
expanded_manifest = manifest_utils.load(
open(expanded_output, "r"), puppet_account_id
)
expanded_manifest = manifest_utils.Manifest(expanded_manifest)
exploded = manifest_utils.explode(expanded_manifest)
logger.info(f"found {len(exploded)} graphs")
count = 0
for mani in exploded:
with open(original_name.replace(".yaml", f"-exploded-{count}.yaml"), "w") as f:
f.write(yaml.safe_dump(json.loads(json.dumps(mani))))
count += 1
def validate(f):
logger.info("Validating {}".format(f.name))
manifest = manifest_utils.load(f, config.get_puppet_account_id())
schema = yamale.make_schema(asset_helpers.resolve_from_site_packages("schema.yaml"))
data = yamale.make_data(content=yaml.safe_dump(manifest))
yamale.validate(schema, data, strict=False)
tags_defined_by_accounts = {}
for account in manifest.get("accounts"):
for tag in account.get("tags", []):
tags_defined_by_accounts[tag] = True
for collection_type in constants.ALL_SECTION_NAMES:
collection_to_check = manifest.get(collection_type, {})
for collection_name, collection_item in collection_to_check.items():
for deploy_to in collection_item.get("deploy_to", {}).get("tags", []):
tag_to_check = deploy_to.get("tag")
if tags_defined_by_accounts.get(tag_to_check) is None:
print(
f"{collection_type}.{collection_name} uses tag {tag_to_check} in deploy_to that does not exist"
)
for depends_on in collection_item.get("depends_on", []):
if isinstance(depends_on, str):
if manifest.get(constants.LAUNCHES).get(depends_on) is None:
print(
f"{collection_type}.{collection_name} uses {depends_on} in depends_on that does not exist"
)
else:
tt = constants.SECTION_SINGULAR_TO_PLURAL.get(
depends_on.get("type", constants.LAUNCH)
)
dd = depends_on.get("name")
if manifest.get(tt).get(dd) is None:
print(
f"{collection_type}.{collection_name} uses {depends_on} in depends_on that does not exist"
)
click.echo("Finished validating: {}".format(f.name))
click.echo("Finished validating: OK")
def import_product_set(f, name, portfolio_name):
url = f"https://raw.githubusercontent.com/awslabs/aws-service-catalog-products/master/{name}/manifest.yaml"
response = requests.get(url)
logger.info(f"Getting {url}")
manifest = yaml.safe_load(f.read())
if manifest.get("launches") is None:
manifest["launches"] = {}
manifest_segment = yaml.safe_load(response.text)
for launch_name, details in manifest_segment.get("launches").items():
details["portfolio"] = portfolio_name
manifest["launches"][launch_name] = details
with open(f.name, "w") as f:
f.write(yaml.safe_dump(manifest))
def get_manifest():
with betterboto_client.ClientContextManager("codecommit") as codecommit:
content = codecommit.get_file(
repositoryName=constants.SERVICE_CATALOG_PUPPET_REPO_NAME,
filePath="manifest.yaml",
).get("fileContent")
return yaml.safe_load(content)
def save_manifest(manifest):
with betterboto_client.ClientContextManager("codecommit") as codecommit:
parent_commit_id = (
codecommit.get_branch(
repositoryName=constants.SERVICE_CATALOG_PUPPET_REPO_NAME,
branchName="master",
)
.get("branch")
.get("commitId")
)
codecommit.put_file(
repositoryName=constants.SERVICE_CATALOG_PUPPET_REPO_NAME,
branchName="master",
fileContent=yaml.safe_dump(manifest),
parentCommitId=parent_commit_id,
commitMessage="Auto generated commit",
filePath=f"manifest.yaml",
)
def add_to_accounts(account_or_ou):
manifest = get_manifest()
manifest.get("accounts").append(account_or_ou)
save_manifest(manifest)
def remove_from_accounts(account_id_or_ou_id_or_ou_path):
manifest = get_manifest()
for account in manifest.get("accounts", []):
if account.get("account_id", "") == account_id_or_ou_id_or_ou_path:
manifest.get("accounts").remove(account)
return save_manifest(manifest)
elif account.get("ou", "") == account_id_or_ou_id_or_ou_path:
manifest.get("accounts").remove(account)
return save_manifest(manifest)
raise Exception(f"Did not remove {account_id_or_ou_id_or_ou_path}")
def add_to_launches(launch_name, launch):
manifest = get_manifest()
launches = manifest.get("launches", {})
launches[launch_name] = launch
manifest["launches"] = launches
save_manifest(manifest)
def remove_from_launches(launch_name):
manifest = get_manifest()
del manifest.get("launches")[launch_name]
save_manifest(manifest)
| null |
servicecatalog_puppet/commands/manifest.py
|
manifest.py
|
py
| 9,189 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils.load",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.config.get_org_iam_role_arn",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.config",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "click.echo",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "betterboto.client.CrossAccountClientContextManager",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "betterboto.client",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.manifest_utils.expand_manifest",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "click.echo",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils.rewrite_depends_on",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.manifest_utils.rewrite_ssm_parameters",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.manifest_utils.rewrite_stacks",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "click.echo",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils.isolate",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.manifest_utils.Manifest",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.constants.LAMBDA_INVOCATIONS",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "servicecatalog_puppet.constants",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.constants.LAMBDA_INVOCATIONS",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "servicecatalog_puppet.constants",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.config.get_home_region",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.config",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "betterboto.client.ClientContextManager",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "betterboto.client",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.config.get_regions",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.config",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.config.get_should_use_sns",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.config",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.config.get_should_use_eventbridge",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.config",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.config.get_should_forward_failures_to_opscenter",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.config",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "yaml.safe_dump",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.config.get_puppet_account_id",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.config",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.manifest_utils.load",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.manifest_utils.Manifest",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.manifest_utils.explode",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "yaml.safe_dump",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils.load",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.manifest_utils",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.config.get_puppet_account_id",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.config",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "yamale.make_schema",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.asset_helpers.resolve_from_site_packages",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.asset_helpers",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "yamale.make_data",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "yaml.safe_dump",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "yamale.validate",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.constants.ALL_SECTION_NAMES",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "servicecatalog_puppet.constants",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.constants.LAUNCHES",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "servicecatalog_puppet.constants",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.constants.SECTION_SINGULAR_TO_PLURAL.get",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "servicecatalog_puppet.constants.SECTION_SINGULAR_TO_PLURAL",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "servicecatalog_puppet.constants",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.constants.LAUNCH",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "servicecatalog_puppet.constants",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "click.echo",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "yaml.safe_dump",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "betterboto.client.ClientContextManager",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "betterboto.client",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.constants.SERVICE_CATALOG_PUPPET_REPO_NAME",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "servicecatalog_puppet.constants",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "yaml.safe_load",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "betterboto.client.ClientContextManager",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "betterboto.client",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.constants.SERVICE_CATALOG_PUPPET_REPO_NAME",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "servicecatalog_puppet.constants",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "servicecatalog_puppet.constants.SERVICE_CATALOG_PUPPET_REPO_NAME",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "servicecatalog_puppet.constants",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "yaml.safe_dump",
"line_number": 203,
"usage_type": "call"
}
] |
557626240
|
from flask import jsonify, request, url_for
from models import User
from urlparse import urlparse, urljoin
# USER AUTHENTICATION FUNCTIONS
# Response handler for user info retrieved from Google OAuth
# Declare local user based on authorized user data
# Register new user if this user has never logged in to my site
# Send my app's token back to logged-in user
def auth_user(user_info):
user = session.query(User).filter_by(email=user_info.email).first()
if not user:
user = User(email = user_info.email, name = user_info.name)
session.add(user)
session.commit()
token = user.generate_auth_token(600)
return jsonify({'token': token.decode('ascii'), 'duration': 600})
# Handle OAuth response depending on response from Google
# "remote.name" is currently presumed to be Google - no other OAuth at this time
def handle_authorize(remote, token, user_info):
if token:
g.token = token
if user_info:
auth_user(user_info)
return redirect(url_for('viewItems'))
# raise some_error
# CRUD AUTHORIZATION FUNCTIONS
# Ensure that users attempting to U or D another user's item are redirected to the correct page
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
# Users can only update and delete their own items
def verify_user_permission(item):
return current_user.id == item.user_id
# NEW ITEM FUNCTIONS
def is_existing_category(category):
return session.query(Category).filter_by(name=category).one()
def check_category_needed(category):
if not is_existing_category(category):
new_cat = Category(name=category)
session.add(new_cat)
session.commit()
return
| null |
helpers.py
|
helpers.py
|
py
| 1,826 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "models.User",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "models.User",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "urlparse.urlparse",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "flask.request.host_url",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "urlparse.urlparse",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "urlparse.urljoin",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "flask.request.host_url",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 37,
"usage_type": "name"
}
] |
30676078
|
import discord
import datetime
from database.entities.User import User
from commands.utils import Status, get_emoji, mention_author
async def run(db_config, ctx):
author = str(ctx.message.author)
content = ctx.message.content
print("\nReceived message from '"+author+"' with content '"+content+"'")
user = User.get_user(db_config, author)
now = datetime.datetime.now(datetime.timezone.utc)
previous_bento = user.last_bento_date
if previous_bento is not None:
next_bracket = previous_bento.replace(microsecond=0, second=0, minute=0)
if previous_bento.hour % 2 == 0:
next_bracket += datetime.timedelta(hours=1)
else:
next_bracket += datetime.timedelta(hours=2)
print("previous_bento:",previous_bento)
print("next_bracket:",next_bracket)
next_bracket = next_bracket.replace(tzinfo=datetime.timezone.utc)
print("next_bracket aware:",next_bracket)
difference = (next_bracket - now).total_seconds()
print("now:",now)
print("difference:",difference)
if difference > 0:
await no_bento(user, ctx, difference)
return
currency_number = user.crepes
if currency_number is None:
currency_number = 0
currency_number += 1
user.crepes = currency_number
user.last_bento_date = now
user.update_user(db_config,now,content)
emoji = get_emoji("crepe")
emoji_str = emoji.toString(ctx)
title = "Wait! Are you going to the dungeon today? Please take this with you! >///<"
description = mention_author(ctx) + " has received a " + emoji_str + "!"
if currency_number == 1:
footer = "There is " + str(currency_number) + " " + emoji.name + " left in their bento box!"
else:
footer = "There are " + str(currency_number) + " " + emoji.plural + " left in their bento box!"
embed = discord.Embed()
embed.color = Status.OK.value
embed.title = title
embed.description = description
embed.set_footer(text=footer)
embed.set_image(url="attachment://yes.png")
await ctx.send(embed=embed, file=discord.File("./images/bento/yes.png"))
async def no_bento(user, ctx, difference):
currency_number = user.crepes
if currency_number is None:
currency_number = 0
emoji = get_emoji("crepe")
title = "You are back already?"
minutes_left = int(difference / 60)
description = "Sorry, I don't have anything ready for you, " + mention_author(ctx) + "..."
#description += " Please come back again later!"
description += " Please come back again in **" + str(minutes_left) + "** min!"
if currency_number > 1:
footer = "There are " + str(currency_number) + " " + emoji.plural + " left in your bento box!"
else:
footer = "There is " + str(currency_number) + " " + emoji.name + " left in your bento box!"
embed = discord.Embed()
embed.color = Status.KO.value
embed.title = title
embed.description = description
embed.set_footer(text=footer)
embed.set_image(url="attachment://nope.png")
await ctx.send(embed=embed, file=discord.File("./images/bento/nope.png"))
| null |
DanMemoDiscordBot/commands/bento.py
|
bento.py
|
py
| 3,196 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "database.entities.User.User.get_user",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "database.entities.User.User",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "datetime.timezone",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.timezone",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "commands.utils.get_emoji",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "commands.utils.mention_author",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "commands.utils.Status.OK",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "commands.utils.Status",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "discord.File",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "commands.utils.get_emoji",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "commands.utils.mention_author",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "commands.utils.Status.KO",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "commands.utils.Status",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "discord.File",
"line_number": 94,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.